repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
anoopcs9/samba | python/samba/web_server/__init__.py | 45 | 2675 | # -*- coding: utf-8 -*-
#
# Unix SMB/CIFS implementation.
# Copyright © Jelmer Vernooij <[email protected]> 2008
#
# Implementation of SWAT that uses WSGI
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
def render_placeholder(environ, start_response):
"""Send the user a simple placeholder about missing SWAT."""
status = '200 OK'
response_headers = [('Content-type', 'text/html')]
start_response(status, response_headers)
yield "<!doctype html>\n"
yield "<html>\n"
yield " <title>The Samba web service</title>\n"
yield "</html>\n"
yield "<body>\n"
yield "<p>Welcome to this Samba web server.</p>\n"
yield "<p>This page is a simple placeholder. You probably want to install "
yield "SWAT. More information can be found "
yield "<a href='http://wiki.samba.org/index.php/SWAT2'>on the wiki</a>.</p>"
yield "</p>\n"
yield "</body>\n"
yield "</html>\n"
def __call__(environ, start_response):
"""Handle a HTTP request."""
from wsgiref.util import application_uri, shift_path_info
from urlparse import urljoin
try:
import swat
except ImportError, e:
print "NO SWAT: %r" % e
have_swat = False
else:
have_swat = True
orig_path = environ['PATH_INFO']
name = shift_path_info(environ)
if name == "":
if have_swat:
start_response('301 Redirect',
[('Location', urljoin(application_uri(environ), 'swat')),])
return []
else:
return render_placeholder(environ, start_response)
elif have_swat and name == "swat":
return swat.__call__(environ, start_response)
else:
status = '404 Not found'
response_headers = [('Content-type', 'text/html')]
start_response(status, response_headers)
return ["The path %s (%s) was not found" % (orig_path, name)]
if __name__ == '__main__':
from wsgiref import simple_server
httpd = simple_server.make_server('localhost', 8090, __call__)
print "Serving HTTP on port 8090..."
httpd.serve_forever()
| gpl-3.0 | -5,066,836,962,388,675,000 | -5,496,651,567,033,651,000 | 32.848101 | 80 | 0.644727 | false |
hfp/tensorflow-xsmm | tensorflow/contrib/tpu/python/tpu/device_assignment.py | 7 | 12535 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Library of TPU helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.tpu.python.tpu.topology import Topology
def _compute_task_and_cores_to_replicas(core_assignment, topology):
"""Computes a nested dict which maps task and logical core to replicas."""
task_and_cores_to_replicas = {}
for replica in xrange(core_assignment.shape[0]):
for logical_core in xrange(core_assignment.shape[1]):
coordinates = core_assignment[replica, logical_core, :]
task_id = topology.task_ordinal_at_coordinates(coordinates)
if task_id not in task_and_cores_to_replicas:
task_and_cores_to_replicas[task_id] = {}
if logical_core not in task_and_cores_to_replicas[task_id]:
task_and_cores_to_replicas[task_id][logical_core] = set()
task_and_cores_to_replicas[task_id][logical_core].add(replica)
task_to_sorted_replica_id = {}
for task, core_to_replicas in task_and_cores_to_replicas.items():
core_to_sorted_replicas = {}
for core, replicas in core_to_replicas.items():
core_to_sorted_replicas[core] = sorted(replicas)
task_to_sorted_replica_id[task] = core_to_sorted_replicas
return task_to_sorted_replica_id
class DeviceAssignment(object):
"""Mapping from logical cores in a computation to the physical TPU topology.
Prefer to use the `device_assignment()` helper to construct a
`DeviceAssignment`; it is easier if less flexible than constructing a
`DeviceAssignment` directly.
"""
def __init__(self, topology, core_assignment):
"""Constructs a `DeviceAssignment` object.
Args:
topology: A `Topology` object that describes the physical TPU topology.
core_assignment: A logical to physical core mapping, represented as a
rank 3 numpy array. See the description of the `core_assignment`
property for more details.
Raises:
ValueError: If `topology` is not `Topology` object.
ValueError: If `core_assignment` is not a rank 3 numpy array.
"""
if not isinstance(topology, Topology):
raise ValueError("topology must be a Topology object, got {}".format(
type(topology)))
core_assignment = np.asarray(core_assignment, dtype=np.int32)
self._topology = topology
if core_assignment.ndim != 3:
raise ValueError("core_assignment must be a rank 3 numpy array, "
"got shape {}".format(core_assignment.shape))
self._num_replicas = core_assignment.shape[0]
self._num_cores_per_replica = core_assignment.shape[1]
if core_assignment.shape[-1] != topology.mesh_rank:
raise ValueError(
"minor dimension of core_assignment must have size equal to topology "
"rank ({}), got shape {}".format(topology.mesh_rank,
core_assignment.shape))
self._core_assignment = core_assignment
self._task_and_cores_to_replicas = _compute_task_and_cores_to_replicas(
self._core_assignment, topology)
@property
def topology(self):
"""A `Topology` that describes the TPU topology."""
return self._topology
@property
def num_cores_per_replica(self):
"""The number of cores per replica."""
return self._num_cores_per_replica
@property
def num_replicas(self):
"""The number of replicas of the computation."""
return self._num_replicas
@property
def core_assignment(self):
"""The logical to physical core mapping.
Returns:
An integer numpy array of rank 3, with shape
`[num_replicas, num_cores_per_replica, topology_rank]`. Maps
(replica, logical core) pairs to physical topology coordinates.
"""
return self._core_assignment
def _coordinates(self, replica, logical_core):
"""Returns the physical topology coordinates of a logical core."""
return tuple(self.core_assignment[replica, logical_core, :])
def lookup_replicas(self, task_id, logical_core):
"""Lookup replica ids by task number and logical core.
Args:
task_id: TensorFlow task number.
logical_core: An integer, identifying a logical core.
Returns:
A sorted list of the replicas that are attached to that task and
logical_core.
Raises:
ValueError: If no replica exists in the task which contains the logical
core.
"""
try:
return self._task_and_cores_to_replicas[task_id][logical_core]
except KeyError:
raise ValueError(
"Can not find any replica in task: {} contains logical_core: {} ".
format(task_id, logical_core))
def tpu_ordinal(self, replica=0, logical_core=0):
"""Returns the ordinal of the TPU device assigned to a logical core."""
coordinates = self._coordinates(replica, logical_core)
return self._topology.tpu_device_ordinal_at_coordinates(coordinates)
def host_device(self, replica=0, logical_core=0, job=None):
"""Returns the CPU device attached to a logical core."""
coordinates = self._coordinates(replica, logical_core)
return self._topology.cpu_device_name_at_coordinates(coordinates, job=job)
def tpu_device(self, replica=0, logical_core=0, job=None):
"""Returns the name of the TPU device assigned to a logical core."""
coordinates = self._coordinates(replica, logical_core)
return self._topology.tpu_device_name_at_coordinates(coordinates, job=job)
def device_assignment(topology,
computation_shape=None,
computation_stride=None,
num_replicas=1):
"""Computes a device_assignment of a computation across a TPU topology.
Attempts to choose a compact grid of cores for locality.
Returns a `DeviceAssignment` that describes the cores in the topology assigned
to each core of each replica.
`computation_shape` and `computation_stride` values should be powers of 2 for
optimal packing.
Args:
topology: A `Topology` object that describes the TPU cluster topology.
To obtain a TPU topology, evaluate the `Tensor` returned by
`initialize_system` using `Session.run`. Either a serialized
`TopologyProto` or a `Topology` object may be passed. Note: you must
evaluate the `Tensor` first; you cannot pass an unevaluated `Tensor` here.
computation_shape: A rank 1 int32 numpy array with size equal to the
topology rank, describing the shape of the computation's block of cores.
If None, the `computation_shape` is `[1] * topology_rank`.
computation_stride: A rank 1 int32 numpy array of size `topology_rank`,
describing the inter-core spacing of the `computation_shape` cores in the
TPU topology. If None, the `computation_stride` is `[1] * topology_rank`.
num_replicas: The number of computation replicas to run. The replicas will
be packed into the free spaces of the topology.
Returns:
A DeviceAssignment object, which describes the mapping between the logical
cores in each computation replica and the physical cores in the TPU
topology.
Raises:
ValueError: If `topology` is not a valid `Topology` object.
ValueError: If `computation_shape` or `computation_stride` are not 1D int32
numpy arrays with shape [3] where all values are positive.
ValueError: If computation's replicas cannot fit into the TPU topology.
"""
# Deserialize the Topology proto, if it is a string.
if isinstance(topology, bytes):
topology = Topology(serialized=topology)
if not isinstance(topology, Topology):
raise ValueError("`topology` is not a Topology object; got {}".format(
type(topology)))
topology_rank = len(topology.mesh_shape)
mesh_shape = topology.mesh_shape
if computation_shape is None:
computation_shape = np.array([1] * topology_rank, dtype=np.int32)
else:
computation_shape = np.asarray(computation_shape, dtype=np.int32)
if computation_stride is None:
computation_stride = np.array([1] * topology_rank, dtype=np.int32)
else:
computation_stride = np.asarray(computation_stride, dtype=np.int32)
if computation_shape.shape != (topology_rank,):
raise ValueError("computation_shape must have shape [{}]; got {}".format(
topology_rank, computation_shape.shape))
if computation_stride.shape != (topology_rank,):
raise ValueError("computation_stride must have shape [{}]; got {}".format(
topology_rank, computation_stride.shape))
if any(computation_shape < 1):
raise ValueError(
"computation_shape must be positive; got computation_shape={}".format(
computation_shape))
if any(computation_stride < 1):
raise ValueError(
"computation_stride must be positive; got computation_stride={}".format(
computation_stride))
# Computes the physical size of one computation instance.
computation_footprint = computation_shape * computation_stride
if any(computation_footprint > mesh_shape):
raise ValueError(
"computation footprint {} does not fit in TPU topology shape {}".format(
computation_footprint, mesh_shape))
# Computes how many copies of the computation footprint fit in the mesh.
block_counts = mesh_shape // computation_footprint
replica_counts = block_counts * computation_stride
max_replicas = np.prod(replica_counts)
if num_replicas > max_replicas:
raise ValueError(
"requested {} replicas but only {} replicas with shape {} and "
"computation_stride {} fit in a TPU mesh of shape {}".format(
num_replicas, max_replicas, computation_shape, computation_stride,
mesh_shape))
def ceil_of_ratio(n, m):
return (n + m - 1) // m
replica_shape = [0] * topology_rank
if num_replicas > 0:
remaining_replicas = num_replicas
remaining_dims = topology_rank
# Choose dimensions as close to an equal cube as possible, in order of
# increasing dimension size. By visiting dimensions in increasing size, we
# assign the most constrained dimension first, so we won't make infeasible
# choices.
#
# As a secondary sort order, visit the dimensions in reverse order. This
# means we try to use both cores on the same chip in preference to two cores
# on different chips.
for x, ni in sorted(((x, -i) for (i, x) in enumerate(replica_counts))):
i = -ni
target_size = int(math.ceil(remaining_replicas**(1.0 / remaining_dims)))
replica_shape[i] = min(target_size, x)
remaining_replicas = ceil_of_ratio(remaining_replicas, replica_shape[i])
remaining_dims -= 1
assert remaining_replicas == 1 and remaining_dims == 0
# Assigns an offset to each replica such that no two replicas overlap.
replica_offsets = np.full([num_replicas, topology_rank], -1, dtype=np.int32)
for replica in xrange(num_replicas):
# Chooses a replica number in each axis.
t = replica
pos = []
for dim in replica_shape[::-1]:
pos.append(t % dim)
t //= dim
replica_pos = np.array(pos[::-1], dtype=np.int32)
# Determines where that replica starts in each axis.
outer = replica_pos // computation_stride
inner = replica_pos % computation_stride
replica_offsets[replica, :] = outer * computation_footprint + inner
# Computes a complete logical core -> physical core mapping for each replica.
indices = [
np.arange(0, computation_shape[i] * computation_stride[i],
computation_stride[i]) for i in xrange(topology_rank)
]
indices = np.concatenate(
[i[..., np.newaxis] for i in np.meshgrid(*indices, indexing="ij")],
axis=-1)
indices = indices.reshape((-1, topology_rank))
assignment = indices + replica_offsets[:, np.newaxis, :]
return DeviceAssignment(topology, core_assignment=assignment)
| apache-2.0 | 2,646,266,514,245,090,000 | -1,499,063,654,107,388,200 | 39.435484 | 80 | 0.690467 | false |
blaggacao/OpenUpgrade | addons/l10n_pa/__openerp__.py | 260 | 1737 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Cubic ERP - Teradata SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Panama Localization Chart Account",
"version": "1.0",
"description": """
Panamenian accounting chart and tax localization.
Plan contable panameño e impuestos de acuerdo a disposiciones vigentes
Con la Colaboración de
- AHMNET CORP http://www.ahmnet.com
""",
"author": "Cubic ERP",
"website": "http://cubicERP.com",
"category": "Localization/Account Charts",
"depends": [
"account_chart",
],
"data":[
"account_tax_code.xml",
"l10n_pa_chart.xml",
"account_tax.xml",
"l10n_pa_wizard.xml",
],
"demo_xml": [
],
"active": False,
"installable": True,
"certificate" : "",
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,646,874,013,170,579,500 | 7,422,206,984,696,195,000 | 33.019608 | 78 | 0.605764 | false |
teltek/edx-platform | common/test/acceptance/pages/studio/overview.py | 4 | 40752 | """
Course Outline page in Studio.
"""
import datetime
from bok_choy.javascript import js_defined, wait_for_js
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from common.test.acceptance.pages.common.utils import click_css, confirm_prompt
from common.test.acceptance.pages.studio.container import ContainerPage
from common.test.acceptance.pages.studio.course_page import CoursePage
from common.test.acceptance.pages.studio.utils import set_input_value, set_input_value_and_save
from common.test.acceptance.tests.helpers import disable_animations, enable_animations, select_option_by_text
@js_defined('jQuery')
class CourseOutlineItem(object):
"""
A mixin class for any :class:`PageObject` shown in a course outline.
"""
# Note there are a few pylint disable=no-member occurances in this class, because
# it was written assuming it is going to be a mixin to a PageObject and will have functions
# such as self.wait_for_ajax, which doesn't exist on a generic `object`.
BODY_SELECTOR = None
EDIT_BUTTON_SELECTOR = '.xblock-field-value-edit'
NAME_SELECTOR = '.item-title'
NAME_INPUT_SELECTOR = '.xblock-field-input'
NAME_FIELD_WRAPPER_SELECTOR = '.xblock-title .wrapper-xblock-field'
STATUS_MESSAGE_SELECTOR = '> div[class$="status"] .status-message'
CONFIGURATION_BUTTON_SELECTOR = '.action-item .configure-button'
def __repr__(self):
# CourseOutlineItem is also used as a mixin for CourseOutlinePage, which doesn't have a locator
# Check for the existence of a locator so that errors when navigating to the course outline page don't show up
# as errors in the repr method instead.
try:
return "{}(<browser>, {!r})".format(self.__class__.__name__, self.locator)
except AttributeError:
return "{}(<browser>)".format(self.__class__.__name__)
def _bounded_selector(self, selector):
"""
Returns `selector`, but limited to this particular `CourseOutlineItem` context
"""
# If the item doesn't have a body selector or locator, then it can't be bounded
# This happens in the context of the CourseOutlinePage
# pylint: disable=no-member
if self.BODY_SELECTOR and hasattr(self, 'locator'):
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
else:
return selector
@property
def name(self):
"""
Returns the display name of this object.
"""
name_element = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).first # pylint: disable=no-member
if name_element:
return name_element.text[0]
else:
return None
@property
def has_status_message(self):
"""
Returns True if the item has a status message, False otherwise.
"""
return self.q(css=self._bounded_selector(self.STATUS_MESSAGE_SELECTOR)).first.visible # pylint: disable=no-member
@property
def status_message(self):
"""
Returns the status message of this item.
"""
return self.q(css=self._bounded_selector(self.STATUS_MESSAGE_SELECTOR)).text[0] # pylint: disable=no-member
@property
def has_staff_lock_warning(self):
""" Returns True if the 'Contains staff only content' message is visible """
return self.status_message == 'Contains staff only content' if self.has_status_message else False
@property
def has_restricted_warning(self):
""" Returns True if the 'Access to this unit is restricted to' message is visible """
return 'Access to this unit is restricted to' in self.status_message if self.has_status_message else False
@property
def is_staff_only(self):
""" Returns True if the visiblity state of this item is staff only (has a black sidebar) """
return "is-staff-only" in self.q(css=self._bounded_selector(''))[0].get_attribute("class") # pylint: disable=no-member
def edit_name(self):
"""
Puts the item's name into editable form.
"""
self.q(css=self._bounded_selector(self.EDIT_BUTTON_SELECTOR)).first.click() # pylint: disable=no-member
def enter_name(self, new_name):
"""
Enters new_name as the item's display name.
"""
set_input_value(self, self._bounded_selector(self.NAME_INPUT_SELECTOR), new_name)
def change_name(self, new_name):
"""
Changes the container's name.
"""
self.edit_name()
set_input_value_and_save(self, self._bounded_selector(self.NAME_INPUT_SELECTOR), new_name)
self.wait_for_ajax() # pylint: disable=no-member
def finalize_name(self):
"""
Presses ENTER, saving the value of the display name for this item.
"""
# pylint: disable=no-member
self.q(css=self._bounded_selector(self.NAME_INPUT_SELECTOR)).results[0].send_keys(Keys.ENTER)
self.wait_for_ajax()
def set_staff_lock(self, is_locked):
"""
Sets the explicit staff lock of item on the container page to is_locked.
"""
modal = self.edit()
modal.is_explicitly_locked = is_locked
modal.save()
def get_enrollment_select_options(self):
"""
Gets the option names available for unit group access
"""
modal = self.edit()
group_options = self.q(css='.group-select-title option').text
modal.cancel()
return group_options
def toggle_unit_access(self, partition_name, group_ids):
"""
Toggles unit access to the groups in group_ids
"""
if group_ids:
modal = self.edit()
groups_select = self.q(css='.group-select-title select')
select_option_by_text(groups_select, partition_name)
for group_id in group_ids:
checkbox = self.q(css='#content-group-{group_id}'.format(group_id=group_id))
checkbox.click()
modal.save()
def in_editable_form(self):
"""
Return whether this outline item's display name is in its editable form.
"""
# pylint: disable=no-member
return "is-editing" in self.q(
css=self._bounded_selector(self.NAME_FIELD_WRAPPER_SELECTOR)
)[0].get_attribute("class")
def edit(self):
"""
Puts the item into editable form.
"""
self.q(css=self._bounded_selector(self.CONFIGURATION_BUTTON_SELECTOR)).first.click() # pylint: disable=no-member
if 'subsection' in self.BODY_SELECTOR:
modal = SubsectionOutlineModal(self)
else:
modal = CourseOutlineModal(self)
EmptyPromise(lambda: modal.is_shown(), 'Modal is shown.') # pylint: disable=unnecessary-lambda
return modal
@property
def release_date(self):
"""
Returns the release date from the page. Date is "mm/dd/yyyy" string.
"""
element = self.q(css=self._bounded_selector(".status-release-value")) # pylint: disable=no-member
return element.first.text[0] if element.present else None
@property
def due_date(self):
"""
Returns the due date from the page. Date is "mm/dd/yyyy" string.
"""
element = self.q(css=self._bounded_selector(".status-grading-date")) # pylint: disable=no-member
return element.first.text[0] if element.present else None
@property
def policy(self):
"""
Select the grading format with `value` in the drop-down list.
"""
element = self.q(css=self._bounded_selector(".status-grading-value")) # pylint: disable=no-member
return element.first.text[0] if element.present else None
@wait_for_js
def publish(self):
"""
Publish the unit.
"""
click_css(self, self._bounded_selector('.action-publish'), require_notification=False)
modal = CourseOutlineModal(self)
EmptyPromise(lambda: modal.is_shown(), 'Modal is shown.') # pylint: disable=unnecessary-lambda
modal.publish()
@property
def publish_action(self):
"""
Returns the link for publishing a unit.
"""
return self.q(css=self._bounded_selector('.action-publish')).first # pylint: disable=no-member
class CourseOutlineContainer(CourseOutlineItem):
"""
A mixin to a CourseOutline page object that adds the ability to load
a child page object by title or by index.
CHILD_CLASS must be a :class:`CourseOutlineChild` subclass.
"""
CHILD_CLASS = None
ADD_BUTTON_SELECTOR = '> .outline-content > .add-item a.button-new'
def child(self, title, child_class=None):
"""
:type self: object
"""
if not child_class:
child_class = self.CHILD_CLASS
# pylint: disable=no-member
return child_class(
self.browser,
self.q(css=child_class.BODY_SELECTOR).filter(
lambda el: title in [inner.text for inner in
el.find_elements_by_css_selector(child_class.NAME_SELECTOR)]
).attrs('data-locator')[0]
)
def children(self, child_class=None):
"""
Returns all the children page objects of class child_class.
"""
if not child_class:
child_class = self.CHILD_CLASS
# pylint: disable=no-member
return self.q(css=self._bounded_selector(child_class.BODY_SELECTOR)).map(
lambda el: child_class(self.browser, el.get_attribute('data-locator'))).results
def child_at(self, index, child_class=None):
"""
Returns the child at the specified index.
:type self: object
"""
if not child_class:
child_class = self.CHILD_CLASS
return self.children(child_class)[index]
def add_child(self, require_notification=True):
"""
Adds a child to this xblock, waiting for notifications.
"""
click_css(
self,
self._bounded_selector(self.ADD_BUTTON_SELECTOR),
require_notification=require_notification,
)
def expand_subsection(self):
"""
Toggle the expansion of this subsection.
"""
disable_animations(self)
def subsection_expanded():
"""
Returns whether or not this subsection is expanded.
"""
self.wait_for_element_presence(
self._bounded_selector(self.ADD_BUTTON_SELECTOR), 'Toggle control is present'
)
css_element = self._bounded_selector(self.ADD_BUTTON_SELECTOR)
add_button = self.q(css=css_element).first.results # pylint: disable=no-member
self.scroll_to_element(css_element) # pylint: disable=no-member
return add_button and add_button[0].is_displayed()
currently_expanded = subsection_expanded()
# Need to click slightly off-center in order for the click to be recognized.
css_element = self._bounded_selector('.ui-toggle-expansion .fa')
self.scroll_to_element(css_element) # pylint: disable=no-member
ele = self.browser.find_element_by_css_selector(css_element) # pylint: disable=no-member
ActionChains(self.browser).move_to_element_with_offset(ele, 8, 8).click().perform() # pylint: disable=no-member
self.wait_for_element_presence(self._bounded_selector(self.ADD_BUTTON_SELECTOR), 'Subsection is expanded')
EmptyPromise(
lambda: subsection_expanded() != currently_expanded,
"Check that the container {} has been toggled".format(self.locator)
).fulfill()
enable_animations(self)
return self
@property
def is_collapsed(self):
"""
Return whether this outline item is currently collapsed.
"""
css_element = self._bounded_selector('')
self.scroll_to_element(css_element) # pylint: disable=no-member
return "is-collapsed" in self.q(css=css_element).first.attrs("class")[0] # pylint: disable=no-member
class CourseOutlineChild(PageObject, CourseOutlineItem):
"""
A page object that will be used as a child of :class:`CourseOutlineContainer`.
"""
url = None
BODY_SELECTOR = '.outline-item'
def __init__(self, browser, locator):
super(CourseOutlineChild, self).__init__(browser)
self.locator = locator
def is_browser_on_page(self):
return self.q(css='{}[data-locator="{}"]'.format(self.BODY_SELECTOR, self.locator)).present
def delete(self, cancel=False):
"""
Clicks the delete button, then cancels at the confirmation prompt if cancel is True.
"""
click_css(self, self._bounded_selector('.delete-button'), require_notification=False)
confirm_prompt(self, cancel)
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular `CourseOutlineChild` context
"""
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
@property
def name(self):
titles = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).text
if titles:
return titles[0]
else:
return None
@property
def children(self):
"""
Will return any first-generation descendant items of this item.
"""
descendants = self.q(css=self._bounded_selector(self.BODY_SELECTOR)).map(
lambda el: CourseOutlineChild(self.browser, el.get_attribute('data-locator'))).results
# Now remove any non-direct descendants.
grandkids = []
for descendant in descendants:
grandkids.extend(descendant.children)
grand_locators = [grandkid.locator for grandkid in grandkids]
return [descendant for descendant in descendants if descendant.locator not in grand_locators]
class CourseOutlineUnit(CourseOutlineChild):
"""
PageObject that wraps a unit link on the Studio Course Outline page.
"""
url = None
BODY_SELECTOR = '.outline-unit'
NAME_SELECTOR = '.unit-title a'
def go_to(self):
"""
Open the container page linked to by this unit link, and return
an initialized :class:`.ContainerPage` for that unit.
"""
return ContainerPage(self.browser, self.locator).visit()
def is_browser_on_page(self):
return self.q(css=self.BODY_SELECTOR).present
def children(self):
return self.q(css=self._bounded_selector(self.BODY_SELECTOR)).map(
lambda el: CourseOutlineUnit(self.browser, el.get_attribute('data-locator'))).results
class CourseOutlineSubsection(CourseOutlineContainer, CourseOutlineChild):
"""
:class`.PageObject` that wraps a subsection block on the Studio Course Outline page.
"""
url = None
BODY_SELECTOR = '.outline-subsection'
NAME_SELECTOR = '.subsection-title'
NAME_FIELD_WRAPPER_SELECTOR = '.subsection-header .wrapper-xblock-field'
CHILD_CLASS = CourseOutlineUnit
def unit(self, title):
"""
Return the :class:`.CourseOutlineUnit with the title `title`.
"""
return self.child(title)
def units(self):
"""
Returns the units in this subsection.
"""
return self.children()
def unit_at(self, index):
"""
Returns the CourseOutlineUnit at the specified index.
"""
return self.child_at(index)
def add_unit(self):
"""
Adds a unit to this subsection
"""
self.q(css=self._bounded_selector(self.ADD_BUTTON_SELECTOR)).click()
class CourseOutlineSection(CourseOutlineContainer, CourseOutlineChild):
"""
:class`.PageObject` that wraps a section block on the Studio Course Outline page.
"""
url = None
BODY_SELECTOR = '.outline-section'
NAME_SELECTOR = '.section-title'
NAME_FIELD_WRAPPER_SELECTOR = '.section-header .wrapper-xblock-field'
CHILD_CLASS = CourseOutlineSubsection
def subsection(self, title):
"""
Return the :class:`.CourseOutlineSubsection` with the title `title`.
"""
return self.child(title)
def subsections(self):
"""
Returns a list of the CourseOutlineSubsections of this section
"""
return self.children()
def subsection_at(self, index):
"""
Returns the CourseOutlineSubsection at the specified index.
"""
return self.child_at(index)
def add_subsection(self):
"""
Adds a subsection to this section
"""
self.add_child()
class ExpandCollapseLinkState(object):
"""
Represents the three states that the expand/collapse link can be in
"""
MISSING = 0
COLLAPSE = 1
EXPAND = 2
class CourseOutlinePage(CoursePage, CourseOutlineContainer):
"""
Course Outline page in Studio.
"""
url_path = "course"
CHILD_CLASS = CourseOutlineSection
EXPAND_COLLAPSE_CSS = '.button-toggle-expand-collapse'
BOTTOM_ADD_SECTION_BUTTON = '.outline > .add-section .button-new'
def is_browser_on_page(self):
return all([
self.q(css='body.view-outline').present,
self.q(css='.content-primary').present,
self.q(css='div.ui-loading.is-hidden').present
])
def click_course_status_section_start_date_link(self):
self.course_start_date_link.click()
def click_course_status_section_checklists_link(self):
self.course_checklists_link.click()
def view_live(self):
"""
Clicks the "View Live" link and switches to the new tab
"""
click_css(self, '.view-live-button', require_notification=False)
self.wait_for_page()
self.browser.switch_to_window(self.browser.window_handles[-1])
def section(self, title):
"""
Return the :class:`.CourseOutlineSection` with the title `title`.
"""
return self.child(title)
def section_at(self, index):
"""
Returns the :class:`.CourseOutlineSection` at the specified index.
"""
return self.child_at(index)
def click_section_name(self, parent_css=''):
"""
Find and click on first section name in course outline
"""
self.q(css='{} .section-name'.format(parent_css)).first.click()
def get_section_name(self, parent_css='', page_refresh=False):
"""
Get the list of names of all sections present
"""
if page_refresh:
self.browser.refresh()
return self.q(css='{} .section-name'.format(parent_css)).text
def section_name_edit_form_present(self, parent_css=''):
"""
Check that section name edit form present
"""
return self.q(css='{} .section-name input'.format(parent_css)).present
def change_section_name(self, new_name, parent_css=''):
"""
Change section name of first section present in course outline
"""
self.click_section_name(parent_css)
self.q(css='{} .section-name input'.format(parent_css)).first.fill(new_name)
self.q(css='{} .section-name .save-button'.format(parent_css)).first.click()
self.wait_for_ajax()
def sections(self):
"""
Returns the sections of this course outline page.
"""
return self.children()
def add_section_from_top_button(self):
"""
Clicks the button for adding a section which resides at the top of the screen.
"""
click_css(self, '.wrapper-mast nav.nav-actions .button-new')
def add_section_from_bottom_button(self, click_child_icon=False):
"""
Clicks the button for adding a section which resides at the bottom of the screen.
"""
element_css = self.BOTTOM_ADD_SECTION_BUTTON
if click_child_icon:
element_css += " .fa-plus"
click_css(self, element_css)
def toggle_expand_collapse(self):
"""
Toggles whether all sections are expanded or collapsed
"""
self.q(css=self.EXPAND_COLLAPSE_CSS).click()
def start_reindex(self):
"""
Starts course reindex by clicking reindex button
"""
self.reindex_button.click()
def open_subsection_settings_dialog(self, index=0):
"""
clicks on the settings button of subsection.
"""
self.q(css=".subsection-header-actions .configure-button").nth(index).click()
self.wait_for_element_presence('.course-outline-modal', 'Subsection settings modal is present.')
def change_problem_release_date(self):
"""
Sets a new start date
"""
self.q(css=".subsection-header-actions .configure-button").first.click()
self.q(css="#start_date").fill("01/01/2030")
self.q(css=".action-save").first.click()
self.wait_for_ajax()
def change_problem_due_date(self, date):
"""
Sets a new due date.
Expects date to be a string that will be accepted by the input (for example, '01/01/1970')
"""
self.q(css=".subsection-header-actions .configure-button").first.click()
self.q(css="#due_date").fill(date)
self.q(css=".action-save").first.click()
self.wait_for_ajax()
def select_visibility_tab(self):
"""
Select the advanced settings tab
"""
self.q(css=".settings-tab-button[data-tab='visibility']").first.click()
self.wait_for_element_presence('input[value=hide_after_due]', 'Visibility fields not present.')
def select_advanced_tab(self, desired_item='special_exam'):
"""
Select the advanced settings tab
"""
self.q(css=".settings-tab-button[data-tab='advanced']").first.click()
if desired_item == 'special_exam':
self.wait_for_element_presence('input.no_special_exam', 'Special exam settings fields not present.')
if desired_item == 'gated_content':
self.wait_for_element_visibility('#is_prereq', 'Gating settings fields are present.')
def make_exam_proctored(self):
"""
Makes a Proctored exam.
"""
self.q(css="input.proctored_exam").first.click()
self.q(css=".action-save").first.click()
self.wait_for_ajax()
def make_exam_timed(self, hide_after_due=False):
"""
Makes a timed exam.
"""
self.q(css="input.timed_exam").first.click()
if hide_after_due:
self.select_visibility_tab()
self.q(css='input[name=content-visibility][value=hide_after_due]').first.click()
self.q(css=".action-save").first.click()
self.wait_for_ajax()
def make_subsection_hidden_after_due_date(self):
"""
Sets a subsection to be hidden after due date.
"""
self.q(css='input[value=hide_after_due]').first.click()
self.q(css=".action-save").first.click()
self.wait_for_ajax()
def select_none_exam(self):
"""
Choose "none" exam but do not press enter
"""
self.q(css="input.no_special_exam").first.click()
def select_timed_exam(self):
"""
Choose a timed exam but do not press enter
"""
self.q(css="input.timed_exam").first.click()
def select_proctored_exam(self):
"""
Choose a proctored exam but do not press enter
"""
self.q(css="input.proctored_exam").first.click()
def select_practice_exam(self):
"""
Choose a practice exam but do not press enter
"""
self.q(css="input.practice_exam").first.click()
def time_allotted_field_visible(self):
"""
returns whether the time allotted field is visible
"""
return self.q(css=".field-time-limit").visible
def exam_review_rules_field_visible(self):
"""
Returns whether the review rules field is visible
"""
return self.q(css=".field-exam-review-rules").visible
def proctoring_items_are_displayed(self):
"""
Returns True if all the items are found.
"""
# The None radio button
if not self.q(css="input.no_special_exam").present:
return False
# The Timed exam radio button
if not self.q(css="input.timed_exam").present:
return False
# The Proctored exam radio button
if not self.q(css="input.proctored_exam").present:
return False
# The Practice exam radio button
if not self.q(css="input.practice_exam").present:
return False
return True
def make_gating_prerequisite(self):
"""
Makes a subsection a gating prerequisite.
"""
if not self.q(css="#is_prereq")[0].is_selected():
self.q(css='label[for="is_prereq"]').click()
self.q(css=".action-save").first.click()
self.wait_for_ajax()
def add_prerequisite_to_subsection(self, min_score, min_completion):
"""
Adds a prerequisite to a subsection.
"""
Select(self.q(css="#prereq")[0]).select_by_index(1)
self.q(css="#prereq_min_score").fill(min_score)
self.q(css="#prereq_min_completion").fill(min_completion)
self.q(css=".action-save").first.click()
self.wait_for_ajax()
def gating_prerequisite_checkbox_is_visible(self):
"""
Returns True if the gating prerequisite checkbox is visible.
"""
# The Prerequisite checkbox is visible
return self.q(css="#is_prereq").visible
def gating_prerequisite_checkbox_is_checked(self):
"""
Returns True if the gating prerequisite checkbox is checked.
"""
# The Prerequisite checkbox is checked
return self.q(css="#is_prereq:checked").present
def gating_prerequisites_dropdown_is_visible(self):
"""
Returns True if the gating prerequisites dropdown is visible.
"""
# The Prerequisites dropdown is visible
return self.q(css="#prereq").visible
def gating_prerequisite_min_score_is_visible(self):
"""
Returns True if the gating prerequisite minimum score input is visible.
"""
# The Prerequisites dropdown is visible
return self.q(css="#prereq_min_score").visible
@property
def has_course_status_section(self):
# SFE and SFE-wrapper classes come from studio-frontend and
# wrap content provided by the studio-frontend package
return self.q(css='.course-status .SFE .SFE-wrapper').is_present()
@property
def course_start_date_link(self):
return self.q(css='.status-link').first
@property
def course_checklists_link(self):
return self.q(css='.status-link').nth(1)
@property
def bottom_add_section_button(self):
"""
Returns the query representing the bottom add section button.
"""
return self.q(css=self.BOTTOM_ADD_SECTION_BUTTON).first
@property
def has_no_content_message(self):
"""
Returns true if a message informing the user that the course has no content is visible
"""
return self.q(css='.outline .no-content').is_present()
@property
def has_rerun_notification(self):
"""
Returns true iff the rerun notification is present on the page.
"""
return self.q(css='.wrapper-alert.is-shown').is_present()
def dismiss_rerun_notification(self):
"""
Clicks the dismiss button in the rerun notification.
"""
self.q(css='.dismiss-button').click()
@property
def expand_collapse_link_state(self):
"""
Returns the current state of the expand/collapse link
"""
link = self.q(css=self.EXPAND_COLLAPSE_CSS)[0]
if not link.is_displayed():
return ExpandCollapseLinkState.MISSING
elif "collapse-all" in link.get_attribute("class"):
return ExpandCollapseLinkState.COLLAPSE
else:
return ExpandCollapseLinkState.EXPAND
@property
def reindex_button(self):
"""
Returns reindex button.
"""
return self.q(css=".button.button-reindex")[0]
def expand_all_subsections(self):
"""
Expands all the subsections in this course.
"""
for section in self.sections():
if section.is_collapsed:
section.expand_subsection()
for subsection in section.subsections():
if subsection.is_collapsed:
subsection.expand_subsection()
@property
def xblocks(self):
"""
Return a list of xblocks loaded on the outline page.
"""
return self.children(CourseOutlineChild)
@property
def license(self):
"""
Returns the course license text, if present. Else returns None.
"""
return self.q(css=".license-value").first.text[0]
@property
def deprecated_warning_visible(self):
"""
Returns true if the deprecated warning is visible.
"""
return self.q(css='.wrapper-alert-error.is-shown').is_present()
@property
def warning_heading_text(self):
"""
Returns deprecated warning heading text.
"""
return self.q(css='.warning-heading-text').text[0]
@property
def components_list_heading(self):
"""
Returns deprecated warning component list heading text.
"""
return self.q(css='.components-list-heading-text').text[0]
@property
def modules_remove_text_shown(self):
"""
Returns True if deprecated warning advance modules remove text is visible.
"""
return self.q(css='.advance-modules-remove-text').visible
@property
def modules_remove_text(self):
"""
Returns deprecated warning advance modules remove text.
"""
return self.q(css='.advance-modules-remove-text').text[0]
@property
def components_visible(self):
"""
Returns True if components list visible.
"""
return self.q(css='.components-list').visible
@property
def components_display_names(self):
"""
Returns deprecated warning components display name list.
"""
return self.q(css='.components-list li>a').text
@property
def deprecated_advance_modules(self):
"""
Returns deprecated advance modules list.
"""
return self.q(css='.advance-modules-list li').text
class CourseOutlineModal(object):
"""
Page object specifically for a modal window on the course outline page.
Subsections are handled slightly differently in some regards, and should use SubsectionOutlineModal.
"""
MODAL_SELECTOR = ".wrapper-modal-window"
def __init__(self, page):
self.page = page
def _bounded_selector(self, selector):
"""
Returns `selector`, but limited to this particular `CourseOutlineModal` context.
"""
return " ".join([self.MODAL_SELECTOR, selector])
def is_shown(self):
"""
Return whether or not the modal defined by self.MODAL_SELECTOR is shown.
"""
return self.page.q(css=self.MODAL_SELECTOR).present
def find_css(self, selector):
"""
Find the given css selector on the page.
"""
return self.page.q(css=self._bounded_selector(selector))
def click(self, selector, index=0):
"""
Perform a Click action on the given selector.
"""
self.find_css(selector).nth(index).click()
def save(self):
"""
Click the save action button, and wait for the ajax call to return.
"""
self.click(".action-save")
self.page.wait_for_ajax()
def publish(self):
"""
Click the publish action button, and wait for the ajax call to return.
"""
self.click(".action-publish")
self.page.wait_for_ajax()
def cancel(self):
"""
Click the cancel action button.
"""
self.click(".action-cancel")
def has_release_date(self):
"""
Check if the input box for the release date exists in the subsection's settings window
"""
return self.find_css("#start_date").present
def has_release_time(self):
"""
Check if the input box for the release time exists in the subsection's settings window
"""
return self.find_css("#start_time").present
def has_due_date(self):
"""
Check if the input box for the due date exists in the subsection's settings window
"""
return self.find_css("#due_date").present
def has_due_time(self):
"""
Check if the input box for the due time exists in the subsection's settings window
"""
return self.find_css("#due_time").present
def has_policy(self):
"""
Check if the input for the grading policy is present.
"""
return self.find_css("#grading_type").present
def set_date(self, property_name, input_selector, date):
"""
Set `date` value to input pointed by `selector` and `property_name`.
"""
month, day, year = map(int, date.split('/'))
self.click(input_selector)
if getattr(self, property_name):
current_month, current_year = map(int, getattr(self, property_name).split('/')[1:])
else: # Use default timepicker values, which are current month and year.
current_month, current_year = datetime.datetime.today().month, datetime.datetime.today().year
date_diff = 12 * (year - current_year) + month - current_month
selector = "a.ui-datepicker-{}".format('next' if date_diff > 0 else 'prev')
for __ in xrange(abs(date_diff)):
self.page.q(css=selector).click()
self.page.q(css="a.ui-state-default").nth(day - 1).click() # set day
self.page.wait_for_element_invisibility("#ui-datepicker-div", "datepicker should be closed")
EmptyPromise(
lambda: getattr(self, property_name) == u'{m}/{d}/{y}'.format(m=month, d=day, y=year),
"{} is updated in modal.".format(property_name)
).fulfill()
def set_time(self, input_selector, time):
"""
Set `time` value to input pointed by `input_selector`
Not using the time picker to make sure it's not being rounded up
"""
self.page.q(css=input_selector).fill(time)
self.page.q(css=input_selector).results[0].send_keys(Keys.ENTER)
@property
def release_date(self):
"""
Returns the unit's release date. Date is "mm/dd/yyyy" string.
"""
return self.find_css("#start_date").first.attrs('value')[0]
@release_date.setter
def release_date(self, date):
"""
Sets the unit's release date to `date`. Date is "mm/dd/yyyy" string.
"""
self.set_date('release_date', "#start_date", date)
@property
def release_time(self):
"""
Returns the current value of the release time. Default is u'00:00'
"""
return self.find_css("#start_time").first.attrs('value')[0]
@release_time.setter
def release_time(self, time):
"""
Time is "HH:MM" string.
"""
self.set_time("#start_time", time)
@property
def due_date(self):
"""
Returns the due date from the page. Date is "mm/dd/yyyy" string.
"""
return self.find_css("#due_date").first.attrs('value')[0]
@due_date.setter
def due_date(self, date):
"""
Sets the due date for the unit. Date is "mm/dd/yyyy" string.
"""
self.set_date('due_date', "#due_date", date)
@property
def due_time(self):
"""
Returns the current value of the release time. Default is u''
"""
return self.find_css("#due_time").first.attrs('value')[0]
@due_time.setter
def due_time(self, time):
"""
Time is "HH:MM" string.
"""
self.set_time("#due_time", time)
@property
def policy(self):
"""
Select the grading format with `value` in the drop-down list.
"""
element = self.find_css('#grading_type')[0]
return self.get_selected_option_text(element)
@policy.setter
def policy(self, grading_label):
"""
Select the grading format with `value` in the drop-down list.
"""
element = self.find_css('#grading_type')[0]
select = Select(element)
select.select_by_visible_text(grading_label)
EmptyPromise(
lambda: self.policy == grading_label,
"Grading label is updated.",
).fulfill()
@property
def is_staff_lock_visible(self):
"""
Returns True if the staff lock option is visible.
"""
return self.find_css('#staff_lock').visible
def ensure_staff_lock_visible(self):
"""
Ensures the staff lock option is visible, clicking on the advanced tab
if needed.
"""
if not self.is_staff_lock_visible:
self.find_css(".settings-tab-button[data-tab=visibility]").click()
EmptyPromise(
lambda: self.is_staff_lock_visible,
"Staff lock option is visible",
).fulfill()
@property
def is_explicitly_locked(self):
"""
Returns true if the explict staff lock checkbox is checked, false otherwise.
"""
self.ensure_staff_lock_visible()
return self.find_css('#staff_lock')[0].is_selected()
@is_explicitly_locked.setter
def is_explicitly_locked(self, value):
"""
Checks the explicit staff lock box if value is true, otherwise selects "visible".
"""
self.ensure_staff_lock_visible()
if value != self.is_explicitly_locked:
self.find_css('#staff_lock').click()
EmptyPromise(lambda: value == self.is_explicitly_locked, "Explicit staff lock is updated").fulfill()
def shows_staff_lock_warning(self):
"""
Returns true iff the staff lock warning is visible.
"""
return self.find_css('.staff-lock .tip-warning').visible
def get_selected_option_text(self, element):
"""
Returns the text of the first selected option for the element.
"""
if element:
select = Select(element)
return select.first_selected_option.text
else:
return None
class SubsectionOutlineModal(CourseOutlineModal):
"""
Subclass to handle a few special cases with subsection modals.
"""
@property
def is_explicitly_locked(self):
"""
Override - returns True if staff_only is set.
"""
return self.subsection_visibility == 'staff_only'
@property
def subsection_visibility(self):
"""
Returns the current visibility setting for a subsection
"""
self.ensure_staff_lock_visible()
return self.find_css('input[name=content-visibility]:checked').first.attrs('value')[0]
@is_explicitly_locked.setter
def is_explicitly_locked(self, value):
"""
Override - sets visibility to staff_only if True, else 'visible'.
For hide_after_due, use the set_subsection_visibility method directly.
"""
self.subsection_visibility = 'staff_only' if value else 'visible'
@subsection_visibility.setter
def subsection_visibility(self, value):
"""
Sets the subsection visibility to the given value.
"""
self.ensure_staff_lock_visible()
self.find_css('input[name=content-visibility][value=' + value + ']').click()
EmptyPromise(lambda: value == self.subsection_visibility, "Subsection visibility is updated").fulfill()
@property
def is_staff_lock_visible(self):
"""
Override - Returns true if the staff lock option is visible.
"""
return self.find_css('input[name=content-visibility]').visible
| agpl-3.0 | 3,999,148,568,500,708,400 | 3,572,437,763,340,267,500 | 32.847176 | 127 | 0.608854 | false |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/snowballstemmer/romanian_stemmer.py | 19 | 30431 | # self file was generated automatically by the Snowball to Python interpreter
from .basestemmer import BaseStemmer
from .among import Among
class RomanianStemmer(BaseStemmer):
'''
self class was automatically generated by a Snowball to Python interpreter
It implements the stemming algorithm defined by a snowball script.
'''
serialVersionUID = 1
a_0 = [
Among(u"", -1, 3),
Among(u"I", 0, 1),
Among(u"U", 0, 2)
]
a_1 = [
Among(u"ea", -1, 3),
Among(u"a\u0163ia", -1, 7),
Among(u"aua", -1, 2),
Among(u"iua", -1, 4),
Among(u"a\u0163ie", -1, 7),
Among(u"ele", -1, 3),
Among(u"ile", -1, 5),
Among(u"iile", 6, 4),
Among(u"iei", -1, 4),
Among(u"atei", -1, 6),
Among(u"ii", -1, 4),
Among(u"ului", -1, 1),
Among(u"ul", -1, 1),
Among(u"elor", -1, 3),
Among(u"ilor", -1, 4),
Among(u"iilor", 14, 4)
]
a_2 = [
Among(u"icala", -1, 4),
Among(u"iciva", -1, 4),
Among(u"ativa", -1, 5),
Among(u"itiva", -1, 6),
Among(u"icale", -1, 4),
Among(u"a\u0163iune", -1, 5),
Among(u"i\u0163iune", -1, 6),
Among(u"atoare", -1, 5),
Among(u"itoare", -1, 6),
Among(u"\u0103toare", -1, 5),
Among(u"icitate", -1, 4),
Among(u"abilitate", -1, 1),
Among(u"ibilitate", -1, 2),
Among(u"ivitate", -1, 3),
Among(u"icive", -1, 4),
Among(u"ative", -1, 5),
Among(u"itive", -1, 6),
Among(u"icali", -1, 4),
Among(u"atori", -1, 5),
Among(u"icatori", 18, 4),
Among(u"itori", -1, 6),
Among(u"\u0103tori", -1, 5),
Among(u"icitati", -1, 4),
Among(u"abilitati", -1, 1),
Among(u"ivitati", -1, 3),
Among(u"icivi", -1, 4),
Among(u"ativi", -1, 5),
Among(u"itivi", -1, 6),
Among(u"icit\u0103i", -1, 4),
Among(u"abilit\u0103i", -1, 1),
Among(u"ivit\u0103i", -1, 3),
Among(u"icit\u0103\u0163i", -1, 4),
Among(u"abilit\u0103\u0163i", -1, 1),
Among(u"ivit\u0103\u0163i", -1, 3),
Among(u"ical", -1, 4),
Among(u"ator", -1, 5),
Among(u"icator", 35, 4),
Among(u"itor", -1, 6),
Among(u"\u0103tor", -1, 5),
Among(u"iciv", -1, 4),
Among(u"ativ", -1, 5),
Among(u"itiv", -1, 6),
Among(u"ical\u0103", -1, 4),
Among(u"iciv\u0103", -1, 4),
Among(u"ativ\u0103", -1, 5),
Among(u"itiv\u0103", -1, 6)
]
a_3 = [
Among(u"ica", -1, 1),
Among(u"abila", -1, 1),
Among(u"ibila", -1, 1),
Among(u"oasa", -1, 1),
Among(u"ata", -1, 1),
Among(u"ita", -1, 1),
Among(u"anta", -1, 1),
Among(u"ista", -1, 3),
Among(u"uta", -1, 1),
Among(u"iva", -1, 1),
Among(u"ic", -1, 1),
Among(u"ice", -1, 1),
Among(u"abile", -1, 1),
Among(u"ibile", -1, 1),
Among(u"isme", -1, 3),
Among(u"iune", -1, 2),
Among(u"oase", -1, 1),
Among(u"ate", -1, 1),
Among(u"itate", 17, 1),
Among(u"ite", -1, 1),
Among(u"ante", -1, 1),
Among(u"iste", -1, 3),
Among(u"ute", -1, 1),
Among(u"ive", -1, 1),
Among(u"ici", -1, 1),
Among(u"abili", -1, 1),
Among(u"ibili", -1, 1),
Among(u"iuni", -1, 2),
Among(u"atori", -1, 1),
Among(u"osi", -1, 1),
Among(u"ati", -1, 1),
Among(u"itati", 30, 1),
Among(u"iti", -1, 1),
Among(u"anti", -1, 1),
Among(u"isti", -1, 3),
Among(u"uti", -1, 1),
Among(u"i\u015Fti", -1, 3),
Among(u"ivi", -1, 1),
Among(u"it\u0103i", -1, 1),
Among(u"o\u015Fi", -1, 1),
Among(u"it\u0103\u0163i", -1, 1),
Among(u"abil", -1, 1),
Among(u"ibil", -1, 1),
Among(u"ism", -1, 3),
Among(u"ator", -1, 1),
Among(u"os", -1, 1),
Among(u"at", -1, 1),
Among(u"it", -1, 1),
Among(u"ant", -1, 1),
Among(u"ist", -1, 3),
Among(u"ut", -1, 1),
Among(u"iv", -1, 1),
Among(u"ic\u0103", -1, 1),
Among(u"abil\u0103", -1, 1),
Among(u"ibil\u0103", -1, 1),
Among(u"oas\u0103", -1, 1),
Among(u"at\u0103", -1, 1),
Among(u"it\u0103", -1, 1),
Among(u"ant\u0103", -1, 1),
Among(u"ist\u0103", -1, 3),
Among(u"ut\u0103", -1, 1),
Among(u"iv\u0103", -1, 1)
]
a_4 = [
Among(u"ea", -1, 1),
Among(u"ia", -1, 1),
Among(u"esc", -1, 1),
Among(u"\u0103sc", -1, 1),
Among(u"ind", -1, 1),
Among(u"\u00E2nd", -1, 1),
Among(u"are", -1, 1),
Among(u"ere", -1, 1),
Among(u"ire", -1, 1),
Among(u"\u00E2re", -1, 1),
Among(u"se", -1, 2),
Among(u"ase", 10, 1),
Among(u"sese", 10, 2),
Among(u"ise", 10, 1),
Among(u"use", 10, 1),
Among(u"\u00E2se", 10, 1),
Among(u"e\u015Fte", -1, 1),
Among(u"\u0103\u015Fte", -1, 1),
Among(u"eze", -1, 1),
Among(u"ai", -1, 1),
Among(u"eai", 19, 1),
Among(u"iai", 19, 1),
Among(u"sei", -1, 2),
Among(u"e\u015Fti", -1, 1),
Among(u"\u0103\u015Fti", -1, 1),
Among(u"ui", -1, 1),
Among(u"ezi", -1, 1),
Among(u"\u00E2i", -1, 1),
Among(u"a\u015Fi", -1, 1),
Among(u"se\u015Fi", -1, 2),
Among(u"ase\u015Fi", 29, 1),
Among(u"sese\u015Fi", 29, 2),
Among(u"ise\u015Fi", 29, 1),
Among(u"use\u015Fi", 29, 1),
Among(u"\u00E2se\u015Fi", 29, 1),
Among(u"i\u015Fi", -1, 1),
Among(u"u\u015Fi", -1, 1),
Among(u"\u00E2\u015Fi", -1, 1),
Among(u"a\u0163i", -1, 2),
Among(u"ea\u0163i", 38, 1),
Among(u"ia\u0163i", 38, 1),
Among(u"e\u0163i", -1, 2),
Among(u"i\u0163i", -1, 2),
Among(u"\u00E2\u0163i", -1, 2),
Among(u"ar\u0103\u0163i", -1, 1),
Among(u"ser\u0103\u0163i", -1, 2),
Among(u"aser\u0103\u0163i", 45, 1),
Among(u"seser\u0103\u0163i", 45, 2),
Among(u"iser\u0103\u0163i", 45, 1),
Among(u"user\u0103\u0163i", 45, 1),
Among(u"\u00E2ser\u0103\u0163i", 45, 1),
Among(u"ir\u0103\u0163i", -1, 1),
Among(u"ur\u0103\u0163i", -1, 1),
Among(u"\u00E2r\u0103\u0163i", -1, 1),
Among(u"am", -1, 1),
Among(u"eam", 54, 1),
Among(u"iam", 54, 1),
Among(u"em", -1, 2),
Among(u"asem", 57, 1),
Among(u"sesem", 57, 2),
Among(u"isem", 57, 1),
Among(u"usem", 57, 1),
Among(u"\u00E2sem", 57, 1),
Among(u"im", -1, 2),
Among(u"\u00E2m", -1, 2),
Among(u"\u0103m", -1, 2),
Among(u"ar\u0103m", 65, 1),
Among(u"ser\u0103m", 65, 2),
Among(u"aser\u0103m", 67, 1),
Among(u"seser\u0103m", 67, 2),
Among(u"iser\u0103m", 67, 1),
Among(u"user\u0103m", 67, 1),
Among(u"\u00E2ser\u0103m", 67, 1),
Among(u"ir\u0103m", 65, 1),
Among(u"ur\u0103m", 65, 1),
Among(u"\u00E2r\u0103m", 65, 1),
Among(u"au", -1, 1),
Among(u"eau", 76, 1),
Among(u"iau", 76, 1),
Among(u"indu", -1, 1),
Among(u"\u00E2ndu", -1, 1),
Among(u"ez", -1, 1),
Among(u"easc\u0103", -1, 1),
Among(u"ar\u0103", -1, 1),
Among(u"ser\u0103", -1, 2),
Among(u"aser\u0103", 84, 1),
Among(u"seser\u0103", 84, 2),
Among(u"iser\u0103", 84, 1),
Among(u"user\u0103", 84, 1),
Among(u"\u00E2ser\u0103", 84, 1),
Among(u"ir\u0103", -1, 1),
Among(u"ur\u0103", -1, 1),
Among(u"\u00E2r\u0103", -1, 1),
Among(u"eaz\u0103", -1, 1)
]
a_5 = [
Among(u"a", -1, 1),
Among(u"e", -1, 1),
Among(u"ie", 1, 1),
Among(u"i", -1, 1),
Among(u"\u0103", -1, 1)
]
g_v = [17, 65, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 32, 0, 0, 4]
B_standard_suffix_removed = False
I_p2 = 0
I_p1 = 0
I_pV = 0
def copy_from(self, other):
self.B_standard_suffix_removed = other.B_standard_suffix_removed
self.I_p2 = other.I_p2
self.I_p1 = other.I_p1
self.I_pV = other.I_pV
super.copy_from(other)
def r_prelude(self):
# (, line 31
# repeat, line 32
try:
while True:
try:
v_1 = self.cursor
try:
# goto, line 32
try:
while True:
v_2 = self.cursor
try:
# (, line 32
if not self.in_grouping(RomanianStemmer.g_v, 97, 259):
raise lab4()
# [, line 33
self.bra = self.cursor
# or, line 33
try:
v_3 = self.cursor
try:
# (, line 33
# literal, line 33
if not self.eq_s(1, u"u"):
raise lab6()
# ], line 33
self.ket = self.cursor
if not self.in_grouping(RomanianStemmer.g_v, 97, 259):
raise lab6()
# <-, line 33
if not self.slice_from(u"U"):
return False
raise lab5()
except lab6: pass
self.cursor = v_3
# (, line 34
# literal, line 34
if not self.eq_s(1, u"i"):
raise lab4()
# ], line 34
self.ket = self.cursor
if not self.in_grouping(RomanianStemmer.g_v, 97, 259):
raise lab4()
# <-, line 34
if not self.slice_from(u"I"):
return False
except lab5: pass
self.cursor = v_2
raise lab3()
except lab4: pass
self.cursor = v_2
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
except lab3: pass
raise lab1()
except lab2: pass
self.cursor = v_1
raise lab0()
except lab1: pass
except lab0: pass
return True
def r_mark_regions(self):
# (, line 38
self.I_pV = self.limit;
self.I_p1 = self.limit;
self.I_p2 = self.limit;
# do, line 44
v_1 = self.cursor
try:
# (, line 44
# or, line 46
try:
v_2 = self.cursor
try:
# (, line 45
if not self.in_grouping(RomanianStemmer.g_v, 97, 259):
raise lab2()
# or, line 45
try:
v_3 = self.cursor
try:
# (, line 45
if not self.out_grouping(RomanianStemmer.g_v, 97, 259):
raise lab4()
# gopast, line 45
try:
while True:
try:
if not self.in_grouping(RomanianStemmer.g_v, 97, 259):
raise lab6()
raise lab5()
except lab6: pass
if self.cursor >= self.limit:
raise lab4()
self.cursor += 1
except lab5: pass
raise lab3()
except lab4: pass
self.cursor = v_3
# (, line 45
if not self.in_grouping(RomanianStemmer.g_v, 97, 259):
raise lab2()
# gopast, line 45
try:
while True:
try:
if not self.out_grouping(RomanianStemmer.g_v, 97, 259):
raise lab8()
raise lab7()
except lab8: pass
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
except lab7: pass
except lab3: pass
raise lab1()
except lab2: pass
self.cursor = v_2
# (, line 47
if not self.out_grouping(RomanianStemmer.g_v, 97, 259):
raise lab0()
# or, line 47
try:
v_6 = self.cursor
try:
# (, line 47
if not self.out_grouping(RomanianStemmer.g_v, 97, 259):
raise lab10()
# gopast, line 47
try:
while True:
try:
if not self.in_grouping(RomanianStemmer.g_v, 97, 259):
raise lab12()
raise lab11()
except lab12: pass
if self.cursor >= self.limit:
raise lab10()
self.cursor += 1
except lab11: pass
raise lab9()
except lab10: pass
self.cursor = v_6
# (, line 47
if not self.in_grouping(RomanianStemmer.g_v, 97, 259):
raise lab0()
# next, line 47
if self.cursor >= self.limit:
raise lab0()
self.cursor += 1
except lab9: pass
except lab1: pass
# setmark pV, line 48
self.I_pV = self.cursor
except lab0: pass
self.cursor = v_1
# do, line 50
v_8 = self.cursor
try:
# (, line 50
# gopast, line 51
try:
while True:
try:
if not self.in_grouping(RomanianStemmer.g_v, 97, 259):
raise lab15()
raise lab14()
except lab15: pass
if self.cursor >= self.limit:
raise lab13()
self.cursor += 1
except lab14: pass
# gopast, line 51
try:
while True:
try:
if not self.out_grouping(RomanianStemmer.g_v, 97, 259):
raise lab17()
raise lab16()
except lab17: pass
if self.cursor >= self.limit:
raise lab13()
self.cursor += 1
except lab16: pass
# setmark p1, line 51
self.I_p1 = self.cursor
# gopast, line 52
try:
while True:
try:
if not self.in_grouping(RomanianStemmer.g_v, 97, 259):
raise lab19()
raise lab18()
except lab19: pass
if self.cursor >= self.limit:
raise lab13()
self.cursor += 1
except lab18: pass
# gopast, line 52
try:
while True:
try:
if not self.out_grouping(RomanianStemmer.g_v, 97, 259):
raise lab21()
raise lab20()
except lab21: pass
if self.cursor >= self.limit:
raise lab13()
self.cursor += 1
except lab20: pass
# setmark p2, line 52
self.I_p2 = self.cursor
except lab13: pass
self.cursor = v_8
return True
def r_postlude(self):
# repeat, line 56
try:
while True:
try:
v_1 = self.cursor
try:
# (, line 56
# [, line 58
self.bra = self.cursor
# substring, line 58
among_var = self.find_among(RomanianStemmer.a_0, 3)
if among_var == 0:
raise lab2()
# ], line 58
self.ket = self.cursor
if among_var == 0:
raise lab2()
elif among_var == 1:
# (, line 59
# <-, line 59
if not self.slice_from(u"i"):
return False
elif among_var == 2:
# (, line 60
# <-, line 60
if not self.slice_from(u"u"):
return False
elif among_var == 3:
# (, line 61
# next, line 61
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
raise lab1()
except lab2: pass
self.cursor = v_1
raise lab0()
except lab1: pass
except lab0: pass
return True
def r_RV(self):
if not self.I_pV <= self.cursor:
return False
return True
def r_R1(self):
if not self.I_p1 <= self.cursor:
return False
return True
def r_R2(self):
if not self.I_p2 <= self.cursor:
return False
return True
def r_step_0(self):
# (, line 72
# [, line 73
self.ket = self.cursor
# substring, line 73
among_var = self.find_among_b(RomanianStemmer.a_1, 16)
if among_var == 0:
return False
# ], line 73
self.bra = self.cursor
# call R1, line 73
if not self.r_R1():
return False
if among_var == 0:
return False
elif among_var == 1:
# (, line 75
# delete, line 75
if not self.slice_del():
return False
elif among_var == 2:
# (, line 77
# <-, line 77
if not self.slice_from(u"a"):
return False
elif among_var == 3:
# (, line 79
# <-, line 79
if not self.slice_from(u"e"):
return False
elif among_var == 4:
# (, line 81
# <-, line 81
if not self.slice_from(u"i"):
return False
elif among_var == 5:
# (, line 83
# not, line 83
v_1 = self.limit - self.cursor
try:
# literal, line 83
if not self.eq_s_b(2, u"ab"):
raise lab0()
return False
except lab0: pass
self.cursor = self.limit - v_1
# <-, line 83
if not self.slice_from(u"i"):
return False
elif among_var == 6:
# (, line 85
# <-, line 85
if not self.slice_from(u"at"):
return False
elif among_var == 7:
# (, line 87
# <-, line 87
if not self.slice_from(u"a\u0163i"):
return False
return True
def r_combo_suffix(self):
# test, line 91
v_1 = self.limit - self.cursor
# (, line 91
# [, line 92
self.ket = self.cursor
# substring, line 92
among_var = self.find_among_b(RomanianStemmer.a_2, 46)
if among_var == 0:
return False
# ], line 92
self.bra = self.cursor
# call R1, line 92
if not self.r_R1():
return False
# (, line 92
if among_var == 0:
return False
elif among_var == 1:
# (, line 100
# <-, line 101
if not self.slice_from(u"abil"):
return False
elif among_var == 2:
# (, line 103
# <-, line 104
if not self.slice_from(u"ibil"):
return False
elif among_var == 3:
# (, line 106
# <-, line 107
if not self.slice_from(u"iv"):
return False
elif among_var == 4:
# (, line 112
# <-, line 113
if not self.slice_from(u"ic"):
return False
elif among_var == 5:
# (, line 117
# <-, line 118
if not self.slice_from(u"at"):
return False
elif among_var == 6:
# (, line 121
# <-, line 122
if not self.slice_from(u"it"):
return False
# set standard_suffix_removed, line 125
self.B_standard_suffix_removed = True
self.cursor = self.limit - v_1
return True
def r_standard_suffix(self):
# (, line 129
# unset standard_suffix_removed, line 130
self.B_standard_suffix_removed = False
# repeat, line 131
try:
while True:
try:
v_1 = self.limit - self.cursor
try:
# call combo_suffix, line 131
if not self.r_combo_suffix():
raise lab2()
raise lab1()
except lab2: pass
self.cursor = self.limit - v_1
raise lab0()
except lab1: pass
except lab0: pass
# [, line 132
self.ket = self.cursor
# substring, line 132
among_var = self.find_among_b(RomanianStemmer.a_3, 62)
if among_var == 0:
return False
# ], line 132
self.bra = self.cursor
# call R2, line 132
if not self.r_R2():
return False
# (, line 132
if among_var == 0:
return False
elif among_var == 1:
# (, line 148
# delete, line 149
if not self.slice_del():
return False
elif among_var == 2:
# (, line 151
# literal, line 152
if not self.eq_s_b(1, u"\u0163"):
return False
# ], line 152
self.bra = self.cursor
# <-, line 152
if not self.slice_from(u"t"):
return False
elif among_var == 3:
# (, line 155
# <-, line 156
if not self.slice_from(u"ist"):
return False
# set standard_suffix_removed, line 160
self.B_standard_suffix_removed = True
return True
def r_verb_suffix(self):
# setlimit, line 164
v_1 = self.limit - self.cursor
# tomark, line 164
if self.cursor < self.I_pV:
return False
self.cursor = self.I_pV
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 164
# [, line 165
self.ket = self.cursor
# substring, line 165
among_var = self.find_among_b(RomanianStemmer.a_4, 94)
if among_var == 0:
self.limit_backward = v_2
return False
# ], line 165
self.bra = self.cursor
if among_var == 0:
self.limit_backward = v_2
return False
elif among_var == 1:
# (, line 200
# or, line 200
try:
v_3 = self.limit - self.cursor
try:
if not self.out_grouping_b(RomanianStemmer.g_v, 97, 259):
raise lab1()
raise lab0()
except lab1: pass
self.cursor = self.limit - v_3
# literal, line 200
if not self.eq_s_b(1, u"u"):
self.limit_backward = v_2
return False
except lab0: pass
# delete, line 200
if not self.slice_del():
return False
elif among_var == 2:
# (, line 214
# delete, line 214
if not self.slice_del():
return False
self.limit_backward = v_2
return True
def r_vowel_suffix(self):
# (, line 218
# [, line 219
self.ket = self.cursor
# substring, line 219
among_var = self.find_among_b(RomanianStemmer.a_5, 5)
if among_var == 0:
return False
# ], line 219
self.bra = self.cursor
# call RV, line 219
if not self.r_RV():
return False
if among_var == 0:
return False
elif among_var == 1:
# (, line 220
# delete, line 220
if not self.slice_del():
return False
return True
def _stem(self):
# (, line 225
# do, line 226
v_1 = self.cursor
try:
# call prelude, line 226
if not self.r_prelude():
raise lab0()
except lab0: pass
self.cursor = v_1
# do, line 227
v_2 = self.cursor
try:
# call mark_regions, line 227
if not self.r_mark_regions():
raise lab1()
except lab1: pass
self.cursor = v_2
# backwards, line 228
self.limit_backward = self.cursor
self.cursor = self.limit
# (, line 228
# do, line 229
v_3 = self.limit - self.cursor
try:
# call step_0, line 229
if not self.r_step_0():
raise lab2()
except lab2: pass
self.cursor = self.limit - v_3
# do, line 230
v_4 = self.limit - self.cursor
try:
# call standard_suffix, line 230
if not self.r_standard_suffix():
raise lab3()
except lab3: pass
self.cursor = self.limit - v_4
# do, line 231
v_5 = self.limit - self.cursor
try:
# (, line 231
# or, line 231
try:
v_6 = self.limit - self.cursor
try:
# Boolean test standard_suffix_removed, line 231
if not self.B_standard_suffix_removed:
raise lab6()
raise lab5()
except lab6: pass
self.cursor = self.limit - v_6
# call verb_suffix, line 231
if not self.r_verb_suffix():
raise lab4()
except lab5: pass
except lab4: pass
self.cursor = self.limit - v_5
# do, line 232
v_7 = self.limit - self.cursor
try:
# call vowel_suffix, line 232
if not self.r_vowel_suffix():
raise lab7()
except lab7: pass
self.cursor = self.limit - v_7
self.cursor = self.limit_backward
# do, line 234
v_8 = self.cursor
try:
# call postlude, line 234
if not self.r_postlude():
raise lab8()
except lab8: pass
self.cursor = v_8
return True
def equals(self, o):
return isinstance(o, RomanianStemmer)
def hashCode(self):
return hash("RomanianStemmer")
class lab0(BaseException): pass
class lab1(BaseException): pass
class lab2(BaseException): pass
class lab3(BaseException): pass
class lab4(BaseException): pass
class lab5(BaseException): pass
class lab6(BaseException): pass
class lab7(BaseException): pass
class lab8(BaseException): pass
class lab9(BaseException): pass
class lab10(BaseException): pass
class lab11(BaseException): pass
class lab12(BaseException): pass
class lab13(BaseException): pass
class lab14(BaseException): pass
class lab15(BaseException): pass
class lab16(BaseException): pass
class lab17(BaseException): pass
class lab18(BaseException): pass
class lab19(BaseException): pass
class lab20(BaseException): pass
class lab21(BaseException): pass
| gpl-3.0 | 4,002,406,083,959,900,000 | 6,890,794,635,433,320,000 | 32.812222 | 98 | 0.397457 | false |
jeanlinux/calibre | src/calibre/ebooks/rtf2xml/table.py | 24 | 20865 | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import sys, os
from calibre.ebooks.rtf2xml import copy, border_parse
from calibre.ptempfile import better_mktemp
"""
States.
1. 'not_in_table'
1. 'cw<tb<row-def___' start a row definition
2. 'mi<mk<in-table__' start table
2. 'in_table'
1. 'mi<mk<pard-start', start of a row, cell
2. 'mi<mk<not-in-tbl', end the table.
3. 'cw<tb<row-def___' start a row definition
3. in_row_definition
1. 'mi<mk<not-in-tbl' : end the row defintion. If in table, end the table.
2. 'mi<mk<pard-start' : end the row defintion
if already in the table, start a row and cell.
3. 'cw<tb<row_______' : end the row definition, end the row
4. 'cw...' use another method to handle the control word
control word might be added to dictionary.
5. 'mi<mk<in-table__' If already in table, do nothing. Otherwise
start the table.
4. 'in_row'
1. 'mi<mk<pard-start', start cell
2. 'mi<mk<not-in-tbl' end table,
3. 'cw<tb<row_______' close row,
5. 'in_cell'
1. 'mi<mk<not-in-tbl', end table
2. 'cw<tb<cell______', end cell
"""
class Table:
"""
Make tables.
Logic:
Read one line at a time. The default state (self.__state) is
'not_in_table'. Look for either a 'cw<tb<in-table__', or a row definition.
"""
def __init__(self,
in_file,
bug_handler,
copy = None,
run_level = 1,):
"""
Required:
'file'--file to parse
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__run_level = run_level
self.__write_to = better_mktemp()
def __initiate_values(self):
"""
Initiate all values.
"""
self.__state_dict = {
'in_table': self.__in_table_func,
'in_row_def': self.__in_row_def_func,
'not_in_table': self.__not_in_table_func,
'in_cell': self.__in_cell_func,
'in_row': self.__in_row_func,
}
self.__not_in_table_dict = {
'cw<tb<row-def___': self.__found_row_def_func,
'cw<tb<in-table__': self.__start_table_func,
'mi<mk<in-table__' : self.__start_table_func,
}
# can't use this dictionary. When in row_definition, many tokens
# require multiple definitions
self.__in_row_definition_dict = {
'mi<mk<not-in-tbl' : self.__end_row_table_func,
'mi<mk<pard-start' : self.__end_row_def_func,
}
self.__in_row_dict = {
'mi<mk<not-in-tbl' : self.__close_table,
'mi<mk<pard-start' : self.__start_cell_func,
'cw<tb<row_______' : self.__end_row_func,
'cw<tb<cell______' : self.__empty_cell,
}
# set the default state
self.__state = ['not_in_table']
# set empty data for all tables
self.__table_data = []
# just in case there is no table data
self.__row_dict = {}
self.__cell_list = []
self.__cell_widths = []
def __in_table_func(self, line):
"""
Requires:
line -- line to parse
Logic:
Look for the end of the table. If found, close out the table.
Look for 'mi<mk<pard-start', which marks the beginning of a row. Start
a row and start a cell.
"""
# 'cell' : ('tb', 'cell______', self.default_func),
if self.__token_info == 'mi<mk<not-in-tbl' or\
self.__token_info == 'mi<mk<sect-start' or\
self.__token_info == 'mi<mk<sect-close' or\
self.__token_info == 'mi<mk<body-close':
self.__close_table(line)
elif self.__token_info == 'mi<mk<pard-start':
self.__start_row_func(line)
self.__start_cell_func(line)
elif self.__token_info == 'cw<tb<row-def___':
self.__found_row_def_func(line)
elif self.__token_info == 'cw<tb<cell______':
self.__start_row_func(line)
self.__empty_cell( line)
self.__write_obj.write(line)
def __not_in_table_func(self, line):
"""
Requires:
line -- the line of text read in from document
Returns:
nothing
Logic:
The state is not in a table, so look for the two tokens that
mark the start of a table: 'cw<tb<row-def', or 'cw<tb<in-table__'.
If these tokens are found, use another method to start a table
and change states. Otherwise, just output the line.
"""
action = self.__not_in_table_dict.get(self.__token_info)
if action:
action(line)
self.__write_obj.write(line)
def __close_table(self, line):
"""
Requires:
line -- line to parse
Returns:
?
Logic:
Write the end marker for the table.
Write the end tag for the table.
Set the state to ['not_in_table']
"""
self.__write_obj.write('mi<mk<table-end_\n')
self.__state = ['not_in_table']
self.__table_data[-1]['number-of-columns'] = self.__max_number_cells_in_row
self.__table_data[-1]['number-of-rows'] = self.__rows_in_table
average_cells_in_row = self.__mode(self.__list_of_cells_in_row)
self.__table_data[-1]['average-cells-per-row'] = average_cells_in_row
average_cell_width = self.__mode(self.__cell_widths)
self.__table_data[-1]['average-cell-width'] = average_cell_width
def __found_row_def_func(self, line):
"""
Requires:
line don't need this except for consistency with other methods.
Returns:
nothing
Logic:
A row definition has been found. Collect all the data from this
to use later in writing attributes for the table.
"""
self.__state.append('in_row_def')
self.__last_cell_position = 0
self.__row_dict = {}
self.__cell_list = []
self.__cell_list.append({})
self.__cell_widths = []
def __start_table_func(self, line):
"""
Requires:
line -- line to parse
Returns:
?
Logic:
Add the 'in_table' to the state list.
Write out the table marker.
Initialize table values (not sure about these yet)
"""
self.__rows_in_table = 0;
self.__cells_in_table = 0;
self.__cells_in_row = 0;
self.__max_number_cells_in_row = 0
self.__table_data.append({})
self.__list_of_cells_in_row = []
self.__write_obj.write('mi<mk<tabl-start\n')
self.__state.append('in_table')
def __end_row_table_func(self, line):
"""
Requires:
line --just for consistencey
Returns:
?
Logic:
?
"""
self.__close_table(self, line)
def __end_row_def_func(self, line):
"""
Requires:
line --just for consistency
Returns:
nothing
Logic:
change the state.
get rid of the last {} in the cell list
figure out the number of cells based on the self.__row_dict[widths]
('122, 122')
"""
if len(self.__state) > 0:
if self.__state[-1] == 'in_row_def':
self.__state.pop()
# added [{]] at the *end* of each /cell. Get rid of extra one
self.__cell_list.pop()
widths = self.__row_dict.get('widths')
if widths:
width_list = widths.split(',')
num_cells = len (width_list)
self.__row_dict['number-of-cells'] = num_cells
def __in_row_def_func(self, line):
"""
Requires:
line --line to parse
Returns:
nothing
Logic:
In the text that defines a row. If a control word is found, handle the
control word with another method.
Check for states that will end this state.
While in the row definition, certain tokens can end a row or end a table.
If a paragrah definition (pard-start) is found, and the you are already in
a table, start of a row.
"""
if self.__token_info == 'cw<tb<row_______':
# write tags
self.__end_row_func(line)
# change the state
self.__end_row_def_func(line)
self.__write_obj.write(line)
elif line[0:2] == 'cw':
self.__handle_row_token(line)
self.__write_obj.write(line)
elif self.__token_info == 'mi<mk<not-in-tbl' and 'in_table' in self.__state:
self.__end_row_def_func(line)
self.__close_table(line)
self.__write_obj.write(line)
elif self.__token_info == 'mi<mk<pard-start':
self.__end_row_def_func(line)
# if already in the table, start a row, then cell.
if (self.__state) > 0 and self.__state[-1] == 'in_table':
self.__start_row_func(line)
self.__start_cell_func(line)
self.__write_obj.write(line)
elif self.__token_info == 'mi<mk<in-table__':
self.__end_row_def_func(line)
# if not in table, start a new table
if len(self.__state) > 0 and self.__state[-1] != 'in_table':
self.__start_table_func(line)
self.__write_obj.write(line)
else:
self.__write_obj.write(line)
def __handle_row_token(self, line):
"""
Requires:
line -- line to parse
Returns:
?
Logic:
the tokens in the row definition contain the following information:
1. row borders.
2. cell borders for all cells in the row.
3. cell postions for all cells in the row.
Put all information about row borders into a row dictionary.
Put all information about cell borders into into the dictionary in
the last item in the cell list. ([{border:something, width:something},
{border:something, width:something}])
cw<bd<bor-t-r-to<nu<bdr-hair__|bdr-li-wid:0.50
"""
if line[3:5] == 'bd':
border_obj = border_parse.BorderParse()
the_dict = border_obj.parse_border(line)
keys = the_dict.keys()
# border-cell-top-hairline
in_cell = 0
for key in keys:
if key[0:11] == 'border-cell':
in_cell = 1
for key in keys:
if in_cell:
self.__cell_list[-1][key] = the_dict[key]
else:
self.__row_dict[key] = the_dict[key]
# cw<tb<cell-posit<nu<216.00
elif self.__token_info == 'cw<tb<cell-posit':
self.__found_cell_position(line)
# cw<tb<row-pos-le<nu<-5.40
elif self.__token_info == 'cw<tb<row-pos-le':
position = line[20:-1]
self.__row_dict['left-row-position'] = position
elif self.__token_info == 'cw<tb<row-header':
self.__row_dict['header'] = 'true'
def __start_cell_func(self, line):
"""
Required:
line -- the line of text
Returns:
nothing
Logic:
Append 'in_cell' for states
If the self.__cell list containst dictionaries, get the last dictionary.
Write value => attributes for key=> value
pop the self.__cell_list.
Otherwise, print out a cell tag.
"""
self.__state.append('in_cell')
# self.__cell_list = []
if len(self.__cell_list) > 0:
self.__write_obj.write('mi<tg<open-att__<cell')
# cell_dict = self.__cell_list[-1]
cell_dict = self.__cell_list[0]
keys = cell_dict.keys()
for key in keys:
self.__write_obj.write('<%s>%s' % (key, cell_dict[key]))
self.__write_obj.write('\n')
# self.__cell_list.pop()
self.__cell_list.pop(0)
# self.__cell_list = self.__cell_list[1:]
else:
self.__write_obj.write('mi<tg<open______<cell\n')
self.__cells_in_table += 1
self.__cells_in_row += 1
def __start_row_func(self, line):
"""
Required:
line -- the line of text
Returns:
nothing
Logic:
Append 'in_row' for states
Write value => attributes for key=> value
"""
self.__state.append('in_row')
self.__write_obj.write('mi<tg<open-att__<row')
keys = self.__row_dict.keys()
for key in keys:
self.__write_obj.write('<%s>%s' % (key, self.__row_dict[key]))
self.__write_obj.write('\n')
self.__cells_in_row = 0
self.__rows_in_table += 1
def __found_cell_position(self, line):
"""
needs:
line: current line
returns:
nothing
logic:
Calculate the cell width.
If the cell is the first cell, you should add the left cell position to it.
(This value is often negative.)
Next, set the new last_cell_position to the current cell position.
"""
# cw<tb<cell-posit<nu<216.00
new_cell_position = round(float(line[20:-1]), 2)
left_position = 0
if self.__last_cell_position == 0:
left_position = self.__row_dict.get('left-row-position', 0)
left_position = float(left_position)
width = new_cell_position - self.__last_cell_position - left_position
# width = round(width, 2)
width = str('%.2f' % width)
self.__last_cell_position = new_cell_position
widths_exists = self.__row_dict.get('widths')
if widths_exists:
self.__row_dict['widths'] += ', %s' % str(width)
else:
self.__row_dict['widths'] = str(width)
self.__cell_list[-1]['width'] = width
self.__cell_list.append({})
self.__cell_widths.append(width)
def __in_cell_func(self, line):
"""
Required:
line
Returns:
nothing
Logic:
In the middle of a cell.
Look for the close of the table. If found, use the close table function to close
the table.
Look for the close of the cell. If found, use the close cell function to close out
the cell.
Otherwise, print out the line.
"""
# cw<tb<cell______<nu<true
# mi<mk<sect-start
if self.__token_info == 'mi<mk<not-in-tbl' or\
self.__token_info == 'mi<mk<sect-start' or\
self.__token_info == 'mi<mk<sect-close' or\
self.__token_info == 'mi<mk<body-close':
self.__end_cell_func(line)
self.__end_row_func(line)
self.__close_table(line)
self.__write_obj.write(line)
elif self.__token_info == 'cw<tb<cell______':
self.__end_cell_func(line)
else:
self.__write_obj.write(line)
def __end_cell_func(self, line):
"""
Requires:
line
Returns:
nothing
Logic:
End the cell. Print out the closing marks. Pop the self.__state.
"""
if len(self.__state) > 1:
if self.__state[-1] == 'in_cell':
self.__state.pop()
self.__write_obj.write('mi<mk<close_cell\n')
self.__write_obj.write('mi<tg<close_____<cell\n')
self.__write_obj.write('mi<mk<closecell_\n')
def __in_row_func(self, line):
if self.__token_info == 'mi<mk<not-in-tbl' or\
self.__token_info == 'mi<mk<sect-start' or\
self.__token_info == 'mi<mk<sect-close' or\
self.__token_info == 'mi<mk<body-close':
self.__end_row_func(line)
self.__close_table(line)
self.__write_obj.write(line)
else:
action = self.__in_row_dict.get(self.__token_info)
if action:
action(line)
self.__write_obj.write(line)
"""
elif self.__token_info == 'mi<mk<pard-start':
self.__start_cell_func(line)
self.__write_obj.write(line)
elif self.__token_info == 'cw<tb<row_______':
self.__end_row_func(line)
self.__write_obj.write(line)
else:
self.__write_obj.write(line)
"""
def __end_row_func(self, line):
"""
"""
if len(self.__state) > 1 and self.__state[-1] == 'in_row':
self.__state.pop()
self.__write_obj.write('mi<tg<close_____<row\n')
else:
self.__write_obj.write('mi<tg<empty_____<row\n')
self.__rows_in_table += 1
if self.__cells_in_row > self.__max_number_cells_in_row:
self.__max_number_cells_in_row = self.__cells_in_row
self.__list_of_cells_in_row.append(self.__cells_in_row)
def __empty_cell(self, line):
"""
Required:
line -- line of text
Returns:
nothing
Logic:
Write an empty tag with attributes if there are attributes.
Otherwise, writen an empty tag with cell as element.
"""
if len(self.__cell_list) > 0:
self.__write_obj.write('mi<tg<empty-att_<cell')
cell_dict = self.__cell_list[-1]
keys = cell_dict.keys()
for key in keys:
self.__write_obj.write('<%s>%s' % (key, cell_dict[key]))
self.__write_obj.write('\n')
else:
self.__write_obj.write('mi<tg<empty_____<cell\n')
self.__cells_in_table += 1
self.__cells_in_row += 1
def __mode(self, the_list):
"""
Required:
the_list -- a list of something
Returns:
the number that occurs the most
Logic:
get the count of each item in list. The count that is the greatest
is the mode.
"""
max = 0
mode = 'not-defined'
for item in the_list:
num_of_values = the_list.count(item)
if num_of_values > max:
mode = item
max = num_of_values
return mode
def make_table(self):
"""
Requires:
nothing
Returns:
A dictionary of values for the beginning of the table.
Logic:
Read one line in at a time. Determine what action to take based on
the state.
"""
self.__initiate_values()
read_obj = open(self.__file, 'r')
self.__write_obj = open(self.__write_to, 'w')
line_to_read = 1
while line_to_read:
line_to_read = read_obj.readline()
line = line_to_read
self.__token_info = line[:16]
action = self.__state_dict.get(self.__state[-1])
# print self.__state[-1]
if action == None:
sys.stderr.write('No matching state in module table.py\n')
sys.stderr.write(self.__state[-1] + '\n')
action(line)
read_obj.close()
self.__write_obj.close()
copy_obj = copy.Copy(bug_handler = self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "table.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
return self.__table_data
| gpl-3.0 | 7,468,204,731,981,673,000 | 1,860,042,856,129,545,000 | 37.638889 | 94 | 0.49758 | false |
foursquare/pants | contrib/go/tests/python/pants_test/contrib/go/tasks/test_go_thrift_gen_integration.py | 1 | 5014 | # coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from contextlib import contextmanager
from textwrap import dedent
from pants.base.build_environment import get_buildroot
from pants.util.dirutil import safe_open
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
from pants_test.testutils.file_test_util import exact_files
_NAMESPACE = dedent(
"""
namespace go thrifttest.duck
""")
_DUCK_STRUCT = dedent(
"""
struct Duck {
1: optional string quack,
}
""")
_FEEDER_STRUCT_TEMPLATE = dedent(
"""
service Feeder {{
void feed(1:{include}Duck duck),
}}
""")
class GoThriftGenIntegrationTest(PantsRunIntegrationTest):
@contextmanager
def _create_thrift_project(self, thrift_files):
with self.temporary_sourcedir() as srcdir:
for path, content in thrift_files.items():
with safe_open(os.path.join(srcdir, path), 'w') as fp:
fp.write(content)
with safe_open(os.path.join(srcdir, 'src/thrift/thrifttest/BUILD'), 'w') as fp:
fp.write(dedent("""
go_thrift_library(
name='fleem',
sources=globs('*.thrift'),
)
""").strip())
with safe_open(os.path.join(srcdir, 'src/go/usethrift/example.go'), 'w') as fp:
fp.write(dedent("""
package usethrift
import "thrifttest/duck"
func whatevs(f duck.Feeder) string {
d := duck.NewDuck()
f.Feed(d)
return d.GetQuack()
}
""").strip())
with safe_open(os.path.join(srcdir, 'src/go/usethrift/BUILD'), 'w') as fp:
fp.write(dedent("""
go_library(
dependencies=[
'{srcdir}/src/thrift/thrifttest:fleem'
]
)
""".format(srcdir=os.path.relpath(srcdir, get_buildroot()))).strip())
with safe_open(os.path.join(srcdir, '3rdparty/go/github.com/apache/thrift/BUILD'), 'w') as fp:
fp.write("go_remote_library(rev='0.9.3', pkg='lib/go/thrift')")
config = {
'gen.go-thrift': {
'thrift_import_target':
os.path.join(os.path.relpath(srcdir, get_buildroot()),
'3rdparty/go/github.com/apache/thrift:lib/go/thrift'),
'thrift_import': 'github.com/apache/thrift/lib/go/thrift'
}
}
yield srcdir, config
def test_go_thrift_gen_single(self):
# Compile with one thrift file.
thrift_files = {
'src/thrift/thrifttest/duck.thrift':
_NAMESPACE + _DUCK_STRUCT + _FEEDER_STRUCT_TEMPLATE.format(include=''),
}
with self.temporary_workdir() as workdir:
with self._create_thrift_project(thrift_files) as (srcdir, config):
args = [
'compile',
os.path.join(srcdir, 'src/go/usethrift')
]
pants_run = self.run_pants_with_workdir(args, workdir, config=config)
self.assert_success(pants_run)
# Fetch the hash for task impl version.
go_thrift_contents = [p for p in os.listdir(os.path.join(workdir, 'gen', 'go-thrift'))
if p != 'current'] # Ignore the 'current' symlink.
self.assertEqual(len(go_thrift_contents), 1)
hash_dir = go_thrift_contents[0]
target_dir = os.path.relpath(os.path.join(srcdir, 'src/thrift/thrifttest/fleem'),
get_buildroot())
root = os.path.join(workdir, 'gen', 'go-thrift', hash_dir,
target_dir.replace(os.path.sep, '.'), 'current')
self.assertEquals(sorted(['src/go/thrifttest/duck/constants.go',
'src/go/thrifttest/duck/ttypes.go',
'src/go/thrifttest/duck/feeder.go',
'src/go/thrifttest/duck/feeder-remote/feeder-remote.go']),
sorted(exact_files(root)))
def test_go_thrift_gen_multi(self):
# Compile with a namespace split across thrift files.
duck_include = dedent(
"""
include "thrifttest/duck.thrift"
""")
thrift_files = {
'src/thrift/thrifttest/duck.thrift': _NAMESPACE + _DUCK_STRUCT,
'src/thrift/thrifttest/feeder.thrift':
_NAMESPACE + duck_include + _FEEDER_STRUCT_TEMPLATE.format(include='duck.'),
}
with self.temporary_workdir() as workdir:
with self._create_thrift_project(thrift_files) as (srcdir, config):
args = [
# Necessary to use a newer thrift version.
'--thrift-version=0.10.0',
'compile',
os.path.join(srcdir, 'src/go/usethrift')
]
pants_run = self.run_pants_with_workdir(args, workdir, config=config)
self.assert_success(pants_run)
| apache-2.0 | -5,428,016,043,506,504,000 | -1,136,903,545,364,730,500 | 35.59854 | 100 | 0.581572 | false |
vijaylbais/boto | boto/vpc/routetable.py | 61 | 4033 | # Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a Route Table
"""
from boto.ec2.ec2object import TaggedEC2Object
from boto.resultset import ResultSet
class RouteTable(TaggedEC2Object):
def __init__(self, connection=None):
super(RouteTable, self).__init__(connection)
self.id = None
self.vpc_id = None
self.routes = []
self.associations = []
def __repr__(self):
return 'RouteTable:%s' % self.id
def startElement(self, name, attrs, connection):
result = super(RouteTable, self).startElement(name, attrs, connection)
if result is not None:
# Parent found an interested element, just return it
return result
if name == 'routeSet':
self.routes = ResultSet([('item', Route)])
return self.routes
elif name == 'associationSet':
self.associations = ResultSet([('item', RouteAssociation)])
return self.associations
else:
return None
def endElement(self, name, value, connection):
if name == 'routeTableId':
self.id = value
elif name == 'vpcId':
self.vpc_id = value
else:
setattr(self, name, value)
class Route(object):
def __init__(self, connection=None):
self.destination_cidr_block = None
self.gateway_id = None
self.instance_id = None
self.interface_id = None
self.vpc_peering_connection_id = None
self.state = None
self.origin = None
def __repr__(self):
return 'Route:%s' % self.destination_cidr_block
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'destinationCidrBlock':
self.destination_cidr_block = value
elif name == 'gatewayId':
self.gateway_id = value
elif name == 'instanceId':
self.instance_id = value
elif name == 'networkInterfaceId':
self.interface_id = value
elif name == 'vpcPeeringConnectionId':
self.vpc_peering_connection_id = value
elif name == 'state':
self.state = value
elif name == 'origin':
self.origin = value
class RouteAssociation(object):
def __init__(self, connection=None):
self.id = None
self.route_table_id = None
self.subnet_id = None
self.main = False
def __repr__(self):
return 'RouteAssociation:%s' % self.id
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'routeTableAssociationId':
self.id = value
elif name == 'routeTableId':
self.route_table_id = value
elif name == 'subnetId':
self.subnet_id = value
elif name == 'main':
self.main = value == 'true'
| mit | -1,641,291,269,892,029,400 | -37,691,214,525,324,090 | 33.177966 | 78 | 0.62906 | false |
hsgr/sunobs | sunobs/settings.py | 4 | 2229 | TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django_browserid.context_processors.browserid',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_browserid.auth.BrowserIDBackend',
)
ROOT_URLCONF = 'sunobs.urls'
USE_TZ = True
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
USE_I18N = False
USE_L10N = False
import os as _os
PROJECT_ROOT = _os.path.abspath(_os.path.dirname(__file__))
# Media files
MEDIA_ROOT = _os.path.join(PROJECT_ROOT, 'media/')
MEDIA_URL = '/media/'
# Static Files
STATIC_URL = '/static/'
STATICFILES_DIRS = (
_os.path.join(PROJECT_ROOT, 'static/'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Templates
TEMPLATE_DIRS = (
_os.path.join(PROJECT_ROOT, 'templates'),
)
# Login
LOGIN_REDIRECT_URL = '/dashboard/'
LOGIN_REDIRECT_URL_FAILURE = '/'
LOGOUT_REDIRECT_URL = '/'
# Our auth model
AUTH_USER_MODEL = 'base.SunUser'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.redirects',
'django.contrib.admin',
# Project apps
'sunobs.base',
'sunobs.observations',
# 3rd party
'django_browserid',
)
from sunobs.local_settings import * | agpl-3.0 | 2,308,930,744,493,956,600 | 156,609,863,896,919,600 | 24.340909 | 69 | 0.715568 | false |
iguzu/gae-django | django/contrib/comments/forms.py | 10 | 7902 | import time
import datetime
from django import forms
from django.forms.util import ErrorDict
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from models import Comment
from django.utils.encoding import force_unicode
from django.utils.hashcompat import sha_constructor
from django.utils.text import get_text_list
from django.utils.translation import ungettext, ugettext_lazy as _
COMMENT_MAX_LENGTH = getattr(settings,'COMMENT_MAX_LENGTH', 3000)
class CommentSecurityForm(forms.Form):
"""
Handles the security aspects (anti-spoofing) for comment forms.
"""
content_type = forms.CharField(widget=forms.HiddenInput)
object_pk = forms.CharField(widget=forms.HiddenInput)
timestamp = forms.IntegerField(widget=forms.HiddenInput)
security_hash = forms.CharField(min_length=40, max_length=40, widget=forms.HiddenInput)
def __init__(self, target_object, data=None, initial=None):
self.target_object = target_object
if initial is None:
initial = {}
initial.update(self.generate_security_data())
super(CommentSecurityForm, self).__init__(data=data, initial=initial)
def security_errors(self):
"""Return just those errors associated with security"""
errors = ErrorDict()
for f in ["honeypot", "timestamp", "security_hash"]:
if f in self.errors:
errors[f] = self.errors[f]
return errors
def clean_security_hash(self):
"""Check the security hash."""
security_hash_dict = {
'content_type' : self.data.get("content_type", ""),
'object_pk' : self.data.get("object_pk", ""),
'timestamp' : self.data.get("timestamp", ""),
}
expected_hash = self.generate_security_hash(**security_hash_dict)
actual_hash = self.cleaned_data["security_hash"]
if expected_hash != actual_hash:
raise forms.ValidationError("Security hash check failed.")
return actual_hash
def clean_timestamp(self):
"""Make sure the timestamp isn't too far (> 2 hours) in the past."""
ts = self.cleaned_data["timestamp"]
if time.time() - ts > (2 * 60 * 60):
raise forms.ValidationError("Timestamp check failed")
return ts
def generate_security_data(self):
"""Generate a dict of security data for "initial" data."""
timestamp = int(time.time())
security_dict = {
'content_type' : str(self.target_object._meta),
'object_pk' : str(self.target_object._get_pk_val()),
'timestamp' : str(timestamp),
'security_hash' : self.initial_security_hash(timestamp),
}
return security_dict
def initial_security_hash(self, timestamp):
"""
Generate the initial security hash from self.content_object
and a (unix) timestamp.
"""
initial_security_dict = {
'content_type' : str(self.target_object._meta),
'object_pk' : str(self.target_object._get_pk_val()),
'timestamp' : str(timestamp),
}
return self.generate_security_hash(**initial_security_dict)
def generate_security_hash(self, content_type, object_pk, timestamp):
"""Generate a (SHA1) security hash from the provided info."""
info = (content_type, object_pk, timestamp, settings.SECRET_KEY)
return sha_constructor("".join(info)).hexdigest()
class CommentDetailsForm(CommentSecurityForm):
"""
Handles the specific details of the comment (name, comment, etc.).
"""
name = forms.CharField(label=_("Name"), max_length=50)
email = forms.EmailField(label=_("Email address"))
url = forms.URLField(label=_("URL"), required=False)
comment = forms.CharField(label=_('Comment'), widget=forms.Textarea,
max_length=COMMENT_MAX_LENGTH)
def get_comment_object(self):
"""
Return a new (unsaved) comment object based on the information in this
form. Assumes that the form is already validated and will throw a
ValueError if not.
Does not set any of the fields that would come from a Request object
(i.e. ``user`` or ``ip_address``).
"""
if not self.is_valid():
raise ValueError("get_comment_object may only be called on valid forms")
CommentModel = self.get_comment_model()
new = CommentModel(**self.get_comment_create_data())
new = self.check_for_duplicate_comment(new)
return new
def get_comment_model(self):
"""
Get the comment model to create with this form. Subclasses in custom
comment apps should override this, get_comment_create_data, and perhaps
check_for_duplicate_comment to provide custom comment models.
"""
return Comment
def get_comment_create_data(self):
"""
Returns the dict of data to be used to create a comment. Subclasses in
custom comment apps that override get_comment_model can override this
method to add extra fields onto a custom comment model.
"""
return dict(
content_type = ContentType.objects.get_for_model(self.target_object),
object_pk = force_unicode(self.target_object._get_pk_val()),
user_name = self.cleaned_data["name"],
user_email = self.cleaned_data["email"],
user_url = self.cleaned_data["url"],
comment = self.cleaned_data["comment"],
submit_date = datetime.datetime.now(),
site_id = settings.SITE_ID,
is_public = True,
is_removed = False,
)
def check_for_duplicate_comment(self, new):
"""
Check that a submitted comment isn't a duplicate. This might be caused
by someone posting a comment twice. If it is a dup, silently return the *previous* comment.
"""
possible_duplicates = self.get_comment_model()._default_manager.filter(
content_type = new.content_type,
object_pk = new.object_pk,
user_name = new.user_name,
user_email = new.user_email,
user_url = new.user_url,
)
for old in possible_duplicates:
if old.submit_date.date() == new.submit_date.date() and old.comment == new.comment:
return old
return new
def clean_comment(self):
"""
If COMMENTS_ALLOW_PROFANITIES is False, check that the comment doesn't
contain anything in PROFANITIES_LIST.
"""
comment = self.cleaned_data["comment"]
if settings.COMMENTS_ALLOW_PROFANITIES == False:
bad_words = [w for w in settings.PROFANITIES_LIST if w in comment.lower()]
if bad_words:
plural = len(bad_words) > 1
raise forms.ValidationError(ungettext(
"Watch your mouth! The word %s is not allowed here.",
"Watch your mouth! The words %s are not allowed here.", plural) % \
get_text_list(['"%s%s%s"' % (i[0], '-'*(len(i)-2), i[-1]) for i in bad_words], 'and'))
return comment
class CommentForm(CommentDetailsForm):
honeypot = forms.CharField(required=False,
label=_('If you enter anything in this field '\
'your comment will be treated as spam'))
def clean_honeypot(self):
"""Check that nothing's been entered into the honeypot."""
value = self.cleaned_data["honeypot"]
if value:
raise forms.ValidationError(self.fields["honeypot"].label)
return value
| bsd-3-clause | -6,925,773,945,174,614,000 | 1,753,831,891,670,506,500 | 41.031915 | 106 | 0.600987 | false |
francesco-mannella/dmp-esn | parametric/parametric_dmp/bin/tr_datasets/e_cursive_curves_angles_start_none/results/plot.py | 18 | 1043 | #!/usr/bin/env python
import glob
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
pathname = os.path.dirname(sys.argv[0])
if pathname:
os.chdir(pathname)
n_dim = None
trains = []
for fname in glob.glob("tl*"):
t = np.loadtxt(fname)
trains.append(t)
tests = []
for fname in glob.glob("tt*"):
t = np.loadtxt(fname)
tests.append(t)
trial_results= []
for fname in glob.glob("rtl*"):
t = np.loadtxt(fname)
trial_results.append(t)
test_results= []
for fname in glob.glob("rtt*"):
t = np.loadtxt(fname)
test_results.append(t)
fig = plt.figure()
ax = fig.add_subplot(111, aspect="equal")
for d in trains:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color="blue", lw=3, alpha=0.5)
for d in tests:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color="red", lw=3, alpha=0.5)
for d in trial_results:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color=[0,0,.5], lw=2)
for d in test_results:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color=[.5,0,0], lw=2)
plt.show()
| gpl-2.0 | -1,649,915,365,325,229,600 | -7,609,860,482,491,857,000 | 21.191489 | 78 | 0.587728 | false |
fabioz/Pydev | plugins/org.python.pydev.jython/Lib/MimeWriter.py | 315 | 6482 | """Generic MIME writer.
This module defines the class MimeWriter. The MimeWriter class implements
a basic formatter for creating MIME multi-part files. It doesn't seek around
the output file nor does it use large amounts of buffer space. You must write
the parts out in the order that they should occur in the final file.
MimeWriter does buffer the headers you add, allowing you to rearrange their
order.
"""
import mimetools
__all__ = ["MimeWriter"]
import warnings
warnings.warn("the MimeWriter module is deprecated; use the email package instead",
DeprecationWarning, 2)
class MimeWriter:
"""Generic MIME writer.
Methods:
__init__()
addheader()
flushheaders()
startbody()
startmultipartbody()
nextpart()
lastpart()
A MIME writer is much more primitive than a MIME parser. It
doesn't seek around on the output file, and it doesn't use large
amounts of buffer space, so you have to write the parts in the
order they should occur on the output file. It does buffer the
headers you add, allowing you to rearrange their order.
General usage is:
f = <open the output file>
w = MimeWriter(f)
...call w.addheader(key, value) 0 or more times...
followed by either:
f = w.startbody(content_type)
...call f.write(data) for body data...
or:
w.startmultipartbody(subtype)
for each part:
subwriter = w.nextpart()
...use the subwriter's methods to create the subpart...
w.lastpart()
The subwriter is another MimeWriter instance, and should be
treated in the same way as the toplevel MimeWriter. This way,
writing recursive body parts is easy.
Warning: don't forget to call lastpart()!
XXX There should be more state so calls made in the wrong order
are detected.
Some special cases:
- startbody() just returns the file passed to the constructor;
but don't use this knowledge, as it may be changed.
- startmultipartbody() actually returns a file as well;
this can be used to write the initial 'if you can read this your
mailer is not MIME-aware' message.
- If you call flushheaders(), the headers accumulated so far are
written out (and forgotten); this is useful if you don't need a
body part at all, e.g. for a subpart of type message/rfc822
that's (mis)used to store some header-like information.
- Passing a keyword argument 'prefix=<flag>' to addheader(),
start*body() affects where the header is inserted; 0 means
append at the end, 1 means insert at the start; default is
append for addheader(), but insert for start*body(), which use
it to determine where the Content-Type header goes.
"""
def __init__(self, fp):
self._fp = fp
self._headers = []
def addheader(self, key, value, prefix=0):
"""Add a header line to the MIME message.
The key is the name of the header, where the value obviously provides
the value of the header. The optional argument prefix determines
where the header is inserted; 0 means append at the end, 1 means
insert at the start. The default is to append.
"""
lines = value.split("\n")
while lines and not lines[-1]: del lines[-1]
while lines and not lines[0]: del lines[0]
for i in range(1, len(lines)):
lines[i] = " " + lines[i].strip()
value = "\n".join(lines) + "\n"
line = key + ": " + value
if prefix:
self._headers.insert(0, line)
else:
self._headers.append(line)
def flushheaders(self):
"""Writes out and forgets all headers accumulated so far.
This is useful if you don't need a body part at all; for example,
for a subpart of type message/rfc822 that's (mis)used to store some
header-like information.
"""
self._fp.writelines(self._headers)
self._headers = []
def startbody(self, ctype, plist=[], prefix=1):
"""Returns a file-like object for writing the body of the message.
The content-type is set to the provided ctype, and the optional
parameter, plist, provides additional parameters for the
content-type declaration. The optional argument prefix determines
where the header is inserted; 0 means append at the end, 1 means
insert at the start. The default is to insert at the start.
"""
for name, value in plist:
ctype = ctype + ';\n %s=\"%s\"' % (name, value)
self.addheader("Content-Type", ctype, prefix=prefix)
self.flushheaders()
self._fp.write("\n")
return self._fp
def startmultipartbody(self, subtype, boundary=None, plist=[], prefix=1):
"""Returns a file-like object for writing the body of the message.
Additionally, this method initializes the multi-part code, where the
subtype parameter provides the multipart subtype, the boundary
parameter may provide a user-defined boundary specification, and the
plist parameter provides optional parameters for the subtype. The
optional argument, prefix, determines where the header is inserted;
0 means append at the end, 1 means insert at the start. The default
is to insert at the start. Subparts should be created using the
nextpart() method.
"""
self._boundary = boundary or mimetools.choose_boundary()
return self.startbody("multipart/" + subtype,
[("boundary", self._boundary)] + plist,
prefix=prefix)
def nextpart(self):
"""Returns a new instance of MimeWriter which represents an
individual part in a multipart message.
This may be used to write the part as well as used for creating
recursively complex multipart messages. The message must first be
initialized with the startmultipartbody() method before using the
nextpart() method.
"""
self._fp.write("\n--" + self._boundary + "\n")
return self.__class__(self._fp)
def lastpart(self):
"""This is used to designate the last part of a multipart message.
It should always be used when writing multipart messages.
"""
self._fp.write("\n--" + self._boundary + "--\n")
if __name__ == '__main__':
import test.test_MimeWriter
| epl-1.0 | 1,163,775,042,582,156,300 | 2,124,187,556,616,332,000 | 33.849462 | 83 | 0.646868 | false |
edgarRd/incubator-airflow | tests/sensors/test_base_sensor.py | 10 | 5164 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from airflow import DAG, configuration, settings
from airflow.exceptions import AirflowSensorTimeout
from airflow.models import DagRun, TaskInstance
from airflow.operators.dummy_operator import DummyOperator
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils import timezone
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from datetime import timedelta
from time import sleep
configuration.load_test_config()
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_DAG_ID = 'unit_test_dag'
DUMMY_OP = 'dummy_op'
SENSOR_OP = 'sensor_op'
class DummySensor(BaseSensorOperator):
def __init__(self, return_value=False, **kwargs):
super(DummySensor, self).__init__(**kwargs)
self.return_value = return_value
def poke(self, context):
return self.return_value
class BaseSensorTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG(TEST_DAG_ID, default_args=args)
session = settings.Session()
session.query(DagRun).delete()
session.query(TaskInstance).delete()
session.commit()
def _make_dag_run(self):
return self.dag.create_dagrun(
run_id='manual__',
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING
)
def _make_sensor(self, return_value, **kwargs):
poke_interval = 'poke_interval'
timeout = 'timeout'
if poke_interval not in kwargs:
kwargs[poke_interval] = 0
if timeout not in kwargs:
kwargs[timeout] = 0
sensor = DummySensor(
task_id=SENSOR_OP,
return_value=return_value,
dag=self.dag,
**kwargs
)
dummy_op = DummyOperator(
task_id=DUMMY_OP,
dag=self.dag
)
dummy_op.set_upstream(sensor)
return sensor
@classmethod
def _run(cls, task):
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_ok(self):
sensor = self._make_sensor(True)
dr = self._make_dag_run()
self._run(sensor)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 2)
for ti in tis:
if ti.task_id == SENSOR_OP:
self.assertEquals(ti.state, State.SUCCESS)
if ti.task_id == DUMMY_OP:
self.assertEquals(ti.state, State.NONE)
def test_fail(self):
sensor = self._make_sensor(False)
dr = self._make_dag_run()
with self.assertRaises(AirflowSensorTimeout):
self._run(sensor)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 2)
for ti in tis:
if ti.task_id == SENSOR_OP:
self.assertEquals(ti.state, State.FAILED)
if ti.task_id == DUMMY_OP:
self.assertEquals(ti.state, State.NONE)
def test_soft_fail(self):
sensor = self._make_sensor(False, soft_fail=True)
dr = self._make_dag_run()
self._run(sensor)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 2)
for ti in tis:
self.assertEquals(ti.state, State.SKIPPED)
def test_soft_fail_with_retries(self):
sensor = self._make_sensor(
return_value=False,
soft_fail=True,
retries=1,
retry_delay=timedelta(milliseconds=1))
dr = self._make_dag_run()
# first run fails and task instance is marked up to retry
with self.assertRaises(AirflowSensorTimeout):
self._run(sensor)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 2)
for ti in tis:
if ti.task_id == SENSOR_OP:
self.assertEquals(ti.state, State.UP_FOR_RETRY)
if ti.task_id == DUMMY_OP:
self.assertEquals(ti.state, State.NONE)
sleep(0.001)
# after retry DAG run is skipped
self._run(sensor)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 2)
for ti in tis:
self.assertEquals(ti.state, State.SKIPPED)
| apache-2.0 | 5,416,495,992,622,926,000 | 6,355,462,874,571,708,000 | 31.275 | 86 | 0.620256 | false |
v-iam/azure-sdk-for-python | azure-servicefabric/azure/servicefabric/models/node_transition_progress.py | 2 | 1433 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class NodeTransitionProgress(Model):
"""Information about an NodeTransition operation. This class contains an
OperationState and a NodeTransitionResult. The NodeTransitionResult is
not valid until OperationState
is Completed or Faulted.
.
:param state: Possible values include: 'Invalid', 'Running',
'RollingBack', 'Completed', 'Faulted', 'Cancelled', 'ForceCancelled'
:type state: str
:param node_transition_result:
:type node_transition_result: :class:`NodeTransitionResult
<azure.servicefabric.models.NodeTransitionResult>`
"""
_attribute_map = {
'state': {'key': 'State', 'type': 'str'},
'node_transition_result': {'key': 'NodeTransitionResult', 'type': 'NodeTransitionResult'},
}
def __init__(self, state=None, node_transition_result=None):
self.state = state
self.node_transition_result = node_transition_result
| mit | -8,009,884,941,157,377,000 | 1,164,926,623,968,862,500 | 37.72973 | 98 | 0.633636 | false |
setten/pymatgen | pymatgen/io/abinit/db.py | 8 | 4991 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Objects and helper function used to store the results in a MongoDb database
"""
from __future__ import division, print_function, unicode_literals
import collections
import copy
from .utils import as_bool
def mongo_getattr(rec, key):
"""
Get value from dict using MongoDB dot-separated path semantics.
For example:
>>> assert mongo_getattr({'a': {'b': 1}, 'x': 2}, 'a.b') == 1
>>> assert mongo_getattr({'a': {'b': 1}, 'x': 2}, 'x') == 2
>>> assert mongo_getattr({'a': {'b': 1}, 'x': 2}, 'a.b.c') is None
:param rec: mongodb document
:param key: path to mongo value
:param default: default to return if not found
:return: value, potentially nested, or default if not found
:raise: AttributeError, if record is not a dict or key is not found.
"""
if not isinstance(rec, collections.Mapping):
raise AttributeError('input record must act like a dict')
if not rec:
raise AttributeError('Empty dict')
if not '.' in key:
return rec.get(key)
for key_part in key.split('.'):
if not isinstance(rec, collections.Mapping):
raise AttributeError('not a mapping for rec_part %s' % key_part)
if not key_part in rec:
raise AttributeError('key %s not in dict %s' % key)
rec = rec[key_part]
return rec
def scan_nestdict(d, key):
"""
Scan a nested dict d, and return the first value associated to the given key.
Returns None if key is not found.
>>> d = {0: 1, 1: {"hello": {"world": {None: [1,2,3]}}}, "foo": [{"bar": 1}, {"color": "red"}]}
>>> assert scan_nestdict(d, 1) == {"hello": {"world": {None: [1,2,3]}}}
>>> assert scan_nestdict(d, "hello") == {"world": {None: [1,2,3]}}
>>> assert scan_nestdict(d, "world") == {None: [1,2,3]}
>>> assert scan_nestdict(d, None) == [1,2,3]
>>> assert scan_nestdict(d, "color") == "red"
"""
if isinstance(d, (list, tuple)):
for item in d:
res = scan_nestdict(item, key)
if res is not None:
return res
return None
if not isinstance(d, collections.Mapping):
return None
if key in d:
return d[key]
else:
for v in d.values():
res = scan_nestdict(v, key)
if res is not None:
return res
return None
class DBConnector(object):
#DEFAULTS = dict(
# database="abinit",
# collection=None,
# port=None,
# host=None,
# user=None,
# password=None,
#}
@classmethod
def autodoc(cls):
return """
enabled: # yes or no (default yes)
database: # Name of the mongodb database (default abinit)
collection: # Name of the collection (default test)
host: # host address e.g. 0.0.0.0 (default None)
port: # port e.g. 8080 (default None)
user: # user name (default None)
password: # password for authentication (default None)
"""
def __init__(self, **kwargs):
if not kwargs:
self.enabled = False
return
self.enabled = as_bool(kwargs.pop("enabled", True))
self.dbname = kwargs.pop("database", "abinit")
self.collection = kwargs.pop("collection", "test")
self.host = kwargs.pop("host", None)
self.port = kwargs.pop("port", None)
self.user = kwargs.pop("user", None)
self.password = kwargs.pop("password", None)
if kwargs:
raise ValueError("Found invalid keywords in the database section:\n %s" % kwargs.keys())
def __bool__(self):
return self.enabled
__nonzero__ = __bool__
def __repr__(self):
return "<%s object at %s>" % (self.__class__.__name__, id(self))
#def __str__(self):
# return str(self.config)
def deepcopy(self):
return copy.deepcopy(self)
def set_collection_name(self, value):
"""Set the name of the collection, return old value"""
old = self.collection
self.collection = str(value)
return old
def get_collection(self, **kwargs):
"""
Establish a connection with the database.
Returns MongoDb collection
"""
from pymongo import MongoClient
if self.host and self.port:
client = MongoClient(host=config.host, port=config.port)
else:
client = MongoClient()
db = client[self.dbname]
# Authenticate if needed
if self.user and self.password:
db.autenticate(self.user, password=self.password)
return db[self.collection]
if __name__ == "__main__":
connector = DBConnector()
print(connector.get_collection())
#connector.set_collection_name("foo")
print(connector)
print(connector.get_collection())
#import unittest
#unittest.main()
| mit | 5,285,878,376,884,137,000 | 1,495,412,679,510,140,000 | 28.708333 | 100 | 0.577239 | false |
instacart/ahab | examples/nathook.py | 1 | 4880 | #!/usr/bin/env python
# coding: utf-8
# © 2015 Instacart
# Published as part of http://tech.instacart.com/ahab/
from contextlib import contextmanager
import logging
from pprint import pformat
from random import randint
import subprocess
from ahab import Ahab
import iptc
log = logging.getLogger()
def main():
logging.basicConfig(level=logging.INFO)
listener = Ahab(handlers=[nat_handler])
listener.listen()
def nat_handler(event, data):
log.info('Event:\n%s', pformat(event))
if 'Config' in data and 'Hostname' in data['Config']:
ident = data['Id']
f = {
'start': create_nat, # On 'start', we create the NAT rules
'die': clear_nat # On 'die', we remove them
}.get(event['status'])
# The 'start' and 'die' events are the only ones relevant for
# managing our NAT rules.
if f is None:
return
host = data['Config']['Hostname']
ip = data['NetworkSettings']['IPAddress']
# We make a few attempts at the IP Tables operaiont, in case
# there is overlap with another event handler trying to do the
# same thing for another container.
for n in range(1, 5):
try:
f(host, ip)
break
except iptc.IPTCError as e:
if 'Resource temporarily unavailable' not in str(e):
log.error('IP Tables trouble for %s during NAT '
'setup, not continuing: %s', ident, e)
break
except Exception as e:
log.error('Unexpected error while handling NAT for %s: '
'%s', ident, e)
break
# No matter what happens, we don't error out, because that
# would crash other handlers that might be in the midst of
# configuring other containers.
def create_nat(host, container_ip):
with table(iptc.Table.NAT) as nat:
free_ips = list(secondary_ips() - ips_in_use())
free = free_ips[randint(1, len(free_ips)) - 1]
# Send packets that come in on the outer IP to the inner IP.
dnat = iptc.Rule()
dnat.dst = free
target = dnat.create_target('DNAT')
target.to_destination = container_ip
comment = dnat.create_match('comment')
comment.comment = 'ahab//' + host
iptc.Chain(nat, 'DOCKER').insert_rule(dnat)
# Rewrite packets from the inner IP so they go out on the outer IP.
snat = iptc.Rule()
snat.src = container_ip
target = snat.create_target('SNAT')
target.to_source = free
comment = snat.create_match('comment')
comment.comment = 'ahab//' + host
iptc.Chain(nat, 'POSTROUTING').insert_rule(snat)
def clear_nat(host, container_ip):
del container_ip # Could be used for sanity check
with table(iptc.Table.NAT) as nat:
token = 'ahab//' + host
chains = ['DOCKER', 'POSTROUTING']
for chain in [iptc.Chain(nat, name) for name in chains]:
for rule in chain.rules:
comments = [m for m in rule.matches if m.name == 'comment']
if any(c.comment == token for c in comments):
chain.delete_rule(rule)
def ips_in_use():
with table(iptc.Table.NAT) as nat:
ips = set()
token = 'ahab//'
chains = ['DOCKER', 'POSTROUTING']
for chain in [iptc.Chain(nat, name) for name in chains]:
for rule in chain.rules:
comments = [m for m in rule.matches if m.name == 'comment']
if any(c.comment.startswith(token) for c in comments):
if rule.dst is not None:
ips |= set([rule.dst.split('/')[0]])
log.info('IPs in use: %s', ips)
return ips
def secondary_ips():
secondary_ips = []
script = 'ip addr list dev eth0 | fgrep secondary'
text = subprocess.check_output(['sh', '-c', script])
for line in text.splitlines():
fields = line.split()
if len(fields) < 2:
continue
secondary_ips += [fields[1].split('/')[0]]
return set(secondary_ips)
open_tables = {}
@contextmanager
def table(tab):
"""Access IPTables transactionally in a uniform way.
Ensures all access is done without autocommit and that only the outer
most task commits, and also ensures we refresh once and commit once.
"""
global open_tables
if tab in open_tables:
yield open_tables[tab]
else:
open_tables[tab] = iptc.Table(tab)
open_tables[tab].refresh()
open_tables[tab].autocommit = False
yield open_tables[tab]
open_tables[tab].commit()
del open_tables[tab]
if __name__ == '__main__':
main()
| isc | 6,128,723,828,484,894,000 | 290,787,012,623,209,660 | 33.118881 | 79 | 0.569994 | false |
jaspreetw/tempest | tempest/api/compute/admin/test_live_migration.py | 3 | 6694 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.compute import base
from tempest import config
from tempest import test
CONF = config.CONF
class LiveBlockMigrationTestJSON(base.BaseV2ComputeAdminTest):
_host_key = 'OS-EXT-SRV-ATTR:host'
@classmethod
def setup_clients(cls):
super(LiveBlockMigrationTestJSON, cls).setup_clients()
cls.admin_hosts_client = cls.os_adm.hosts_client
cls.admin_servers_client = cls.os_adm.servers_client
@classmethod
def resource_setup(cls):
super(LiveBlockMigrationTestJSON, cls).resource_setup()
cls.created_server_ids = []
def _get_compute_hostnames(self):
body = self.admin_hosts_client.list_hosts()
return [
host_record['host_name']
for host_record in body
if host_record['service'] == 'compute'
]
def _get_server_details(self, server_id):
body = self.admin_servers_client.get_server(server_id)
return body
def _get_host_for_server(self, server_id):
return self._get_server_details(server_id)[self._host_key]
def _migrate_server_to(self, server_id, dest_host):
body = self.admin_servers_client.live_migrate_server(
server_id, dest_host,
CONF.compute_feature_enabled.block_migration_for_live_migration)
return body
def _get_host_other_than(self, host):
for target_host in self._get_compute_hostnames():
if host != target_host:
return target_host
def _get_server_status(self, server_id):
return self._get_server_details(server_id)['status']
def _get_an_active_server(self):
for server_id in self.created_server_ids:
if 'ACTIVE' == self._get_server_status(server_id):
return server_id
else:
server = self.create_test_server(wait_until="ACTIVE")
server_id = server['id']
self.created_server_ids.append(server_id)
return server_id
def _volume_clean_up(self, server_id, volume_id):
body = self.volumes_client.show_volume(volume_id)
if body['status'] == 'in-use':
self.servers_client.detach_volume(server_id, volume_id)
self.volumes_client.wait_for_volume_status(volume_id, 'available')
self.volumes_client.delete_volume(volume_id)
def _test_live_block_migration(self, state='ACTIVE'):
"""Tests live block migration between two hosts.
Requires CONF.compute_feature_enabled.live_migration to be True.
:param state: The vm_state the migrated server should be in before and
after the live migration. Supported values are 'ACTIVE'
and 'PAUSED'.
"""
# Live block migrate an instance to another host
if len(self._get_compute_hostnames()) < 2:
raise self.skipTest(
"Less than 2 compute nodes, skipping migration test.")
server_id = self._get_an_active_server()
actual_host = self._get_host_for_server(server_id)
target_host = self._get_host_other_than(actual_host)
if state == 'PAUSED':
self.admin_servers_client.pause_server(server_id)
self.admin_servers_client.wait_for_server_status(server_id, state)
self._migrate_server_to(server_id, target_host)
self.servers_client.wait_for_server_status(server_id, state)
self.assertEqual(target_host, self._get_host_for_server(server_id))
@test.idempotent_id('1dce86b8-eb04-4c03-a9d8-9c1dc3ee0c7b')
@testtools.skipUnless(CONF.compute_feature_enabled.live_migration,
'Live migration not available')
def test_live_block_migration(self):
self._test_live_block_migration()
@test.idempotent_id('1e107f21-61b2-4988-8f22-b196e938ab88')
@testtools.skipUnless(CONF.compute_feature_enabled.live_migration,
'Live migration not available')
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@testtools.skipUnless(CONF.compute_feature_enabled
.live_migrate_paused_instances,
'Live migration of paused instances is not '
'available.')
def test_live_block_migration_paused(self):
self._test_live_block_migration(state='PAUSED')
@test.idempotent_id('e19c0cc6-6720-4ed8-be83-b6603ed5c812')
@testtools.skipIf(not CONF.compute_feature_enabled.live_migration or not
CONF.compute_feature_enabled.
block_migration_for_live_migration,
'Block Live migration not available')
@testtools.skipIf(not CONF.compute_feature_enabled.
block_migrate_cinder_iscsi,
'Block Live migration not configured for iSCSI')
def test_iscsi_volume(self):
# Live block migrate an instance to another host
if len(self._get_compute_hostnames()) < 2:
raise self.skipTest(
"Less than 2 compute nodes, skipping migration test.")
server_id = self._get_an_active_server()
actual_host = self._get_host_for_server(server_id)
target_host = self._get_host_other_than(actual_host)
volume = self.volumes_client.create_volume(display_name='test')
self.volumes_client.wait_for_volume_status(volume['id'],
'available')
self.addCleanup(self._volume_clean_up, server_id, volume['id'])
# Attach the volume to the server
self.servers_client.attach_volume(server_id, volume['id'],
device='/dev/xvdb')
self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
self._migrate_server_to(server_id, target_host)
self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
self.assertEqual(target_host, self._get_host_for_server(server_id))
| apache-2.0 | 709,966,433,090,461,300 | -6,934,102,709,259,231,000 | 40.8375 | 78 | 0.63176 | false |
BlackPole/bp-enigma2 | lib/python/Components/Converter/EventName.py | 3 | 2313 | from Components.Converter.Converter import Converter
from Components.Element import cached
from enigma import eEPGCache
class EventName(Converter, object):
NAME = 0
SHORT_DESCRIPTION = 1
EXTENDED_DESCRIPTION = 2
FULL_DESCRIPTION = 3
ID = 4
NEXT_NAME = 5
NEXT_DESCRIPTION = 6
def __init__(self, type):
Converter.__init__(self, type)
self.epgcache = eEPGCache.getInstance()
if type == "Description":
self.type = self.SHORT_DESCRIPTION
elif type == "ExtendedDescription":
self.type = self.EXTENDED_DESCRIPTION
elif type == "FullDescription":
self.type = self.FULL_DESCRIPTION
elif type == "ID":
self.type = self.ID
elif type == "NextName":
self.type = self.NEXT_NAME
elif type == "NextDescription":
self.type = self.NEXT_DESCRIPTION
else:
self.type = self.NAME
@cached
def getText(self):
event = self.source.event
if event is None:
return ""
if self.type == self.NAME:
return event.getEventName()
elif self.type == self.SHORT_DESCRIPTION:
return event.getShortDescription()
elif self.type == self.EXTENDED_DESCRIPTION:
return event.getExtendedDescription() or event.getShortDescription()
elif self.type == self.FULL_DESCRIPTION:
description = event.getShortDescription()
extended = event.getExtendedDescription()
if description and extended:
description += '\n'
return description + extended
elif self.type == self.ID:
return str(event.getEventId())
elif self.type == self.NEXT_NAME or self.type == self.NEXT_DESCRIPTION:
reference = self.source.service
info = reference and self.source.info
if info is not None:
nextEvent = self.epgcache.lookupEvent(['SETX', (reference.toString(), 1, -1)])
if self.type == self.NEXT_NAME:
return nextEvent[0][2]
else:
if nextEvent[0][1] != "":
return nextEvent[0][1]
else:
return nextEvent[0][0]
return ""
text = property(getText)
| gpl-2.0 | -5,740,423,272,178,641,000 | -7,217,675,377,756,559,000 | 34.045455 | 103 | 0.574578 | false |
cedk/odoo | openerp/addons/base/tests/test_mail_examples.py | 302 | 57129 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
MISC_HTML_SOURCE = """
<font size="2" style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; ">test1</font>
<div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; font-style: normal; ">
<b>test2</b></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; ">
<i>test3</i></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; ">
<u>test4</u></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; ">
<strike>test5</strike></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; ">
<font size="5">test6</font></div><div><ul><li><font color="#1f1f1f" face="monospace" size="2">test7</font></li><li>
<font color="#1f1f1f" face="monospace" size="2">test8</font></li></ul><div><ol><li><font color="#1f1f1f" face="monospace" size="2">test9</font>
</li><li><font color="#1f1f1f" face="monospace" size="2">test10</font></li></ol></div></div>
<blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;"><div><div><div><font color="#1f1f1f" face="monospace" size="2">
test11</font></div></div></div></blockquote><blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;">
<blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;"><div><font color="#1f1f1f" face="monospace" size="2">
test12</font></div><div><font color="#1f1f1f" face="monospace" size="2"><br></font></div></blockquote></blockquote>
<font color="#1f1f1f" face="monospace" size="2"><a href="http://google.com">google</a></font>
<a href="javascript:alert('malicious code')">test link</a>
"""
EDI_LIKE_HTML_SOURCE = """<div style="font-family: 'Lucica Grande', Ubuntu, Arial, Verdana, sans-serif; font-size: 12px; color: rgb(34, 34, 34); background-color: #FFF; ">
<p>Hello ${object.partner_id.name},</p>
<p>A new invoice is available for you: </p>
<p style="border-left: 1px solid #8e0000; margin-left: 30px;">
<strong>REFERENCES</strong><br />
Invoice number: <strong>${object.number}</strong><br />
Invoice total: <strong>${object.amount_total} ${object.currency_id.name}</strong><br />
Invoice date: ${object.date_invoice}<br />
Order reference: ${object.origin}<br />
Your contact: <a href="mailto:${object.user_id.email or ''}?subject=Invoice%20${object.number}">${object.user_id.name}</a>
</p>
<br/>
<p>It is also possible to directly pay with Paypal:</p>
<a style="margin-left: 120px;" href="${object.paypal_url}">
<img class="oe_edi_paypal_button" src="https://www.paypal.com/en_US/i/btn/btn_paynowCC_LG.gif"/>
</a>
<br/>
<p>If you have any question, do not hesitate to contact us.</p>
<p>Thank you for choosing ${object.company_id.name or 'us'}!</p>
<br/>
<br/>
<div style="width: 375px; margin: 0px; padding: 0px; background-color: #8E0000; border-top-left-radius: 5px 5px; border-top-right-radius: 5px 5px; background-repeat: repeat no-repeat;">
<h3 style="margin: 0px; padding: 2px 14px; font-size: 12px; color: #DDD;">
<strong style="text-transform:uppercase;">${object.company_id.name}</strong></h3>
</div>
<div style="width: 347px; margin: 0px; padding: 5px 14px; line-height: 16px; background-color: #F2F2F2;">
<span style="color: #222; margin-bottom: 5px; display: block; ">
${object.company_id.street}<br/>
${object.company_id.street2}<br/>
${object.company_id.zip} ${object.company_id.city}<br/>
${object.company_id.state_id and ('%s, ' % object.company_id.state_id.name) or ''} ${object.company_id.country_id.name or ''}<br/>
</span>
<div style="margin-top: 0px; margin-right: 0px; margin-bottom: 0px; margin-left: 0px; padding-top: 0px; padding-right: 0px; padding-bottom: 0px; padding-left: 0px; ">
Phone: ${object.company_id.phone}
</div>
<div>
Web : <a href="${object.company_id.website}">${object.company_id.website}</a>
</div>
</div>
</div></body></html>"""
OERP_WEBSITE_HTML_1 = """
<div>
<div class="container">
<div class="row">
<div class="col-md-12 text-center mt16 mb16" data-snippet-id="colmd">
<h2>OpenERP HR Features</h2>
<h3 class="text-muted">Manage your company most important asset: People</h3>
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-rounded img-responsive" src="/website/static/src/img/china_thumb.jpg">
<h4 class="mt16">Streamline Recruitments</h4>
<p>Post job offers and keep track of each application received. Follow applicants in your recruitment process with the smart kanban view.</p>
<p>Save time by automating some communications with email templates. Resumes are indexed automatically, allowing you to easily find for specific profiles.</p>
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-rounded img-responsive" src="/website/static/src/img/desert_thumb.jpg">
<h4 class="mt16">Enterprise Social Network</h4>
<p>Break down information silos. Share knowledge and best practices amongst all employees. Follow specific people or documents and join groups of interests to share expertise and documents.</p>
<p>Interact with your collegues in real time with live chat.</p>
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-rounded img-responsive" src="/website/static/src/img/deers_thumb.jpg">
<h4 class="mt16">Leaves Management</h4>
<p>Keep track of the vacation days accrued by each employee. Employees enter their requests (paid holidays, sick leave, etc), for managers to approve and validate. It's all done in just a few clicks. The agenda of each employee is updated accordingly.</p>
</div>
</div>
</div>
</div>"""
OERP_WEBSITE_HTML_1_IN = [
'Manage your company most important asset: People',
'img class="img-rounded img-responsive" src="/website/static/src/img/china_thumb.jpg"',
]
OERP_WEBSITE_HTML_1_OUT = [
'Break down information silos.',
'Keep track of the vacation days accrued by each employee',
'img class="img-rounded img-responsive" src="/website/static/src/img/deers_thumb.jpg',
]
OERP_WEBSITE_HTML_2 = """
<div class="mt16 cke_widget_editable cke_widget_element oe_editable oe_dirty" data-oe-model="blog.post" data-oe-id="6" data-oe-field="content" data-oe-type="html" data-oe-translate="0" data-oe-expression="blog_post.content" data-cke-widget-data="{}" data-cke-widget-keep-attr="0" data-widget="oeref" contenteditable="true" data-cke-widget-editable="text">
<section class="mt16 mb16" data-snippet-id="text-block">
<div class="container">
<div class="row">
<div class="col-md-12 text-center mt16 mb32" data-snippet-id="colmd">
<h2>
OpenERP Project Management
</h2>
<h3 class="text-muted">Infinitely flexible. Incredibly easy to use.</h3>
</div>
<div class="col-md-12 mb16 mt16" data-snippet-id="colmd">
<p>
OpenERP's <b>collaborative and realtime</b> project
management helps your team get work done. Keep
track of everything, from the big picture to the
minute details, from the customer contract to the
billing.
</p><p>
Organize projects around <b>your own processes</b>. Work
on tasks and issues using the kanban view, schedule
tasks using the gantt chart and control deadlines
in the calendar view. Every project may have it's
own stages allowing teams to optimize their job.
</p>
</div>
</div>
</div>
</section>
<section class="" data-snippet-id="image-text">
<div class="container">
<div class="row">
<div class="col-md-6 mt16 mb16" data-snippet-id="colmd">
<img class="img-responsive shadow" src="/website/static/src/img/image_text.jpg">
</div>
<div class="col-md-6 mt32" data-snippet-id="colmd">
<h3>Manage Your Shops</h3>
<p>
OpenERP's Point of Sale introduces a super clean
interface with no installation required that runs
online and offline on modern hardwares.
</p><p>
It's full integration with the company inventory
and accounting, gives you real time statistics and
consolidations amongst all shops without the hassle
of integrating several applications.
</p>
</div>
</div>
</div>
</section>
<section class="" data-snippet-id="text-image">
<div class="container">
<div class="row">
<div class="col-md-6 mt32" data-snippet-id="colmd">
<h3>Enterprise Social Network</h3>
<p>
Make every employee feel more connected and engaged
with twitter-like features for your own company. Follow
people, share best practices, 'like' top ideas, etc.
</p><p>
Connect with experts, follow what interests you, share
documents and promote best practices with OpenERP
Social application. Get work done with effective
collaboration across departments, geographies
and business applications.
</p>
</div>
<div class="col-md-6 mt16 mb16" data-snippet-id="colmd">
<img class="img-responsive shadow" src="/website/static/src/img/text_image.png">
</div>
</div>
</div>
</section><section class="" data-snippet-id="portfolio">
<div class="container">
<div class="row">
<div class="col-md-12 text-center mt16 mb32" data-snippet-id="colmd">
<h2>Our Porfolio</h2>
<h4 class="text-muted">More than 500 successful projects</h4>
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/deers.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/desert.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/china.jpg">
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/desert.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/china.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/deers.jpg">
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/landscape.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/china.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/desert.jpg">
</div>
</div>
</div>
</section>
</div>
"""
OERP_WEBSITE_HTML_2_IN = [
'management helps your team get work done',
]
OERP_WEBSITE_HTML_2_OUT = [
'Make every employee feel more connected',
'img class="img-responsive shadow" src="/website/static/src/img/text_image.png',
]
TEXT_1 = """I contact you about our meeting tomorrow. Here is the schedule I propose:
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?
--
MySignature"""
TEXT_1_IN = ["""I contact you about our meeting tomorrow. Here is the schedule I propose:
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?"""]
TEXT_1_OUT = ["""--
MySignature"""]
TEXT_2 = """Salut Raoul!
Le 28 oct. 2012 à 00:02, Raoul Grosbedon a écrit :
> I contact you about our meeting tomorrow. Here is the schedule I propose: (quote)
Of course. This seems viable.
> 2012/10/27 Bert Tartopoils :
>> blahblahblah (quote)?
>>
>> blahblahblah (quote)
>>
>> Bert TARTOPOILS
>> [email protected]
>>
>
>
> --
> RaoulSignature
Bert TARTOPOILS
[email protected]
"""
TEXT_2_IN = ["Salut Raoul!", "Of course. This seems viable."]
TEXT_2_OUT = ["I contact you about our meeting tomorrow. Here is the schedule I propose: (quote)",
"""> 2012/10/27 Bert Tartopoils :
>> blahblahblah (quote)?
>>
>> blahblahblah (quote)
>>
>> Bert TARTOPOILS
>> [email protected]
>>
>
>
> --
> RaoulSignature"""]
HTML_1 = """<p>I contact you about our meeting for tomorrow. Here is the schedule I propose: (keep)
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?
--
MySignature</p>"""
HTML_1_IN = ["""I contact you about our meeting for tomorrow. Here is the schedule I propose: (keep)
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?"""]
HTML_1_OUT = ["""--
MySignature"""]
HTML_2 = """<div>
<font><span>I contact you about our meeting for tomorrow. Here is the schedule I propose:</span></font>
</div>
<div>
<ul>
<li><span>9 AM: brainstorming about our new amazing business app</span></li>
<li><span>9.45 AM: summary</span></li>
<li><span>10 AM: meeting with Fabien to present our app</span></li>
</ul>
</div>
<div>
<font><span>Is everything ok for you ?</span></font>
</div>"""
HTML_2_IN = ["<font><span>I contact you about our meeting for tomorrow. Here is the schedule I propose:</span></font>",
"<li><span>9 AM: brainstorming about our new amazing business app</span></li>",
"<li><span>9.45 AM: summary</span></li>",
"<li><span>10 AM: meeting with Fabien to present our app</span></li>",
"<font><span>Is everything ok for you ?</span></font>"]
HTML_2_OUT = []
HTML_3 = """<div><pre>This is an answer.
Regards,
XXXXXX
----- Mail original -----</pre>
<pre>Hi,
My CRM-related question.
Regards,
XXXX</pre></div>"""
HTML_3_IN = ["""<div><pre>This is an answer.
Regards,
XXXXXX
----- Mail original -----</pre>"""]
HTML_3_OUT = ["Hi,", "My CRM-related question.",
"Regards,"]
HTML_4 = """
<div>
<div>Hi Nicholas,</div>
<br>
<div>I'm free now. 00447710085916.</div>
<br>
<div>Regards,</div>
<div>Nicholas</div>
<br>
<span id="OLK_SRC_BODY_SECTION">
<div style="font-family:Calibri; font-size:11pt; text-align:left; color:black; BORDER-BOTTOM: medium none; BORDER-LEFT: medium none; PADDING-BOTTOM: 0in; PADDING-LEFT: 0in; PADDING-RIGHT: 0in; BORDER-TOP: #b5c4df 1pt solid; BORDER-RIGHT: medium none; PADDING-TOP: 3pt">
<span style="font-weight:bold">From: </span>OpenERP Enterprise <<a href="mailto:[email protected]">[email protected]</a>><br><span style="font-weight:bold">Reply-To: </span><<a href="mailto:[email protected]">[email protected]</a>><br><span style="font-weight:bold">Date: </span>Wed, 17 Apr 2013 13:30:47 +0000<br><span style="font-weight:bold">To: </span>Microsoft Office User <<a href="mailto:[email protected]">[email protected]</a>><br><span style="font-weight:bold">Subject: </span>Re: your OpenERP.com registration<br>
</div>
<br>
<div>
<p>Hello Nicholas Saxlund, </p>
<p>I noticed you recently registered to our OpenERP Online solution. </p>
<p>You indicated that you wish to use OpenERP in your own company. We would like to know more about your your business needs and requirements, and see how we can help you. When would you be available to discuss your project ?
</p>
<p>Best regards, </p>
<pre><a href="http://openerp.com">http://openerp.com</a>
Belgium: +32.81.81.37.00
U.S.: +1 (650) 307-6736
India: +91 (79) 40 500 100
</pre>
</div>
</span>
</div>"""
HTML_5 = """<div><pre>Hi,
I have downloaded OpenERP installer 7.0 and successfully installed the postgresql server and the OpenERP.
I created a database and started to install module by log in as administrator.
However, I was not able to install any module due to "OpenERP Server Error" as shown in the attachement.
Could you please let me know how could I fix this problem?
Regards,
Goh Sin Yih
________________________________
From: OpenERP Enterprise <[email protected]>
To: [email protected]
Sent: Friday, February 8, 2013 12:46 AM
Subject: Feedback From Your OpenERP Trial
Hello Goh Sin Yih,
Thank you for having tested OpenERP Online.
I noticed you started a trial of OpenERP Online (gsy) but you did not decide to keep using it.
So, I just wanted to get in touch with you to get your feedback. Can you tell me what kind of application you were you looking for and why you didn't decide to continue with OpenERP?
Thanks in advance for providing your feedback,
Do not hesitate to contact me if you have any questions,
Thanks,
</pre>"""
GMAIL_1 = """Hello,<div><br></div><div>Ok for me. I am replying directly in gmail, without signature.</div><div><br></div><div>Kind regards,</div><div><br></div><div>Demo.<br><br><div>On Thu, Nov 8, 2012 at 5:29 PM, <span><<a href="mailto:[email protected]">[email protected]</a>></span> wrote:<br><blockquote><div>I contact you about our meeting for tomorrow. Here is the schedule I propose:</div><div><ul><li>9 AM: brainstorming about our new amazing business app</span></li></li>
<li>9.45 AM: summary</li><li>10 AM: meeting with Fabien to present our app</li></ul></div><div>Is everything ok for you ?</div>
<div><p>--<br>Administrator</p></div>
<div><p>Log in our portal at: <a href="http://localhost:8069#action=login&db=mail_1&login=demo">http://localhost:8069#action=login&db=mail_1&login=demo</a></p></div>
</blockquote></div><br></div>"""
GMAIL_1_IN = ['Ok for me. I am replying directly in gmail, without signature.']
GMAIL_1_OUT = ['Administrator', 'Log in our portal at:']
THUNDERBIRD_1 = """<div>On 11/08/2012 05:29 PM,
<a href="mailto:[email protected]">[email protected]</a> wrote:<br></div>
<blockquote>
<div>I contact you about our meeting for tomorrow. Here is the
schedule I propose:</div>
<div>
<ul><li>9 AM: brainstorming about our new amazing business
app</span></li></li>
<li>9.45 AM: summary</li>
<li>10 AM: meeting with Fabien to present our app</li>
</ul></div>
<div>Is everything ok for you ?</div>
<div>
<p>--<br>
Administrator</p>
</div>
<div>
<p>Log in our portal at:
<a href="http://localhost:8069#action=login&db=mail_1&token=rHdWcUART5PhEnJRaXjH">http://localhost:8069#action=login&db=mail_1&token=rHdWcUART5PhEnJRaXjH</a></p>
</div>
</blockquote>
Ok for me. I am replying directly below your mail, using Thunderbird, with a signature.<br><br>
Did you receive my email about my new laptop, by the way ?<br><br>
Raoul.<br><pre>--
Raoul Grosbedonnée
</pre>"""
THUNDERBIRD_1_IN = ['Ok for me. I am replying directly below your mail, using Thunderbird, with a signature.']
THUNDERBIRD_1_OUT = ['I contact you about our meeting for tomorrow.', 'Raoul Grosbedon']
HOTMAIL_1 = """<div>
<div dir="ltr"><br>
I have an amazing company, i'm learning OpenERP, it is a small company yet, but plannig to grow up quickly.
<br> <br>Kindest regards,<br>xxx<br>
<div>
<div id="SkyDrivePlaceholder">
</div>
<hr id="stopSpelling">
Subject: Re: your OpenERP.com registration<br>From: [email protected]<br>To: [email protected]<br>Date: Wed, 27 Mar 2013 17:12:12 +0000
<br><br>
Hello xxx,
<br>
I noticed you recently created an OpenERP.com account to access OpenERP Apps.
<br>
You indicated that you wish to use OpenERP in your own company.
We would like to know more about your your business needs and requirements, and see how
we can help you. When would you be available to discuss your project ?<br>
Best regards,<br>
<pre>
<a href="http://openerp.com" target="_blank">http://openerp.com</a>
Belgium: +32.81.81.37.00
U.S.: +1 (650) 307-6736
India: +91 (79) 40 500 100
</pre>
</div>
</div>
</div>"""
HOTMAIL_1_IN = ["I have an amazing company, i'm learning OpenERP, it is a small company yet, but plannig to grow up quickly."]
HOTMAIL_1_OUT = ["Subject: Re: your OpenERP.com registration", " I noticed you recently created an OpenERP.com account to access OpenERP Apps.",
"We would like to know more about your your business needs and requirements", "Belgium: +32.81.81.37.00"]
MSOFFICE_1 = """
<div>
<div class="WordSection1">
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
Our requirements are simple. Just looking to replace some spreadsheets for tracking quotes and possibly using the timecard module.
We are a company of 25 engineers providing product design services to clients.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
I’ll install on a windows server and run a very limited trial to see how it works.
If we adopt OpenERP we will probably move to Linux or look for a hosted SaaS option.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
<br>
I am also evaluating Adempiere and maybe others.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span>
</p>
<p> </p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
I expect the trial will take 2-3 months as this is not a high priority for us.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span>
</p>
<p> </p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
Alan
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span>
</p>
<p> </p>
<p></p>
<div>
<div style="border:none;border-top:solid #B5C4DF 1.0pt;padding:3.0pt 0in 0in 0in">
<p class="MsoNormal">
<b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
From:
</span></b>
<span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
OpenERP Enterprise [mailto:[email protected]]
<br><b>Sent:</b> Monday, 11 March, 2013 14:47<br><b>To:</b> Alan Widmer<br><b>Subject:</b> Re: your OpenERP.com registration
</span>
</p>
<p></p>
<p></p>
</div>
</div>
<p class="MsoNormal"></p>
<p> </p>
<p>Hello Alan Widmer, </p>
<p></p>
<p>I noticed you recently downloaded OpenERP. </p>
<p></p>
<p>
Uou mentioned you wish to use OpenERP in your own company. Please let me more about your
business needs and requirements? When will you be available to discuss about your project?
</p>
<p></p>
<p>Thanks for your interest in OpenERP, </p>
<p></p>
<p>Feel free to contact me if you have any questions, </p>
<p></p>
<p>Looking forward to hear from you soon. </p>
<p></p>
<pre><p> </p></pre>
<pre>--<p></p></pre>
<pre>Nicolas<p></p></pre>
<pre><a href="http://openerp.com">http://openerp.com</a><p></p></pre>
<pre>Belgium: +32.81.81.37.00<p></p></pre>
<pre>U.S.: +1 (650) 307-6736<p></p></pre>
<pre>India: +91 (79) 40 500 100<p></p></pre>
<pre> <p></p></pre>
</div>
</div>"""
MSOFFICE_1_IN = ['Our requirements are simple. Just looking to replace some spreadsheets for tracking quotes and possibly using the timecard module.']
MSOFFICE_1_OUT = ['I noticed you recently downloaded OpenERP.', 'Uou mentioned you wish to use OpenERP in your own company.', 'Belgium: +32.81.81.37.00']
MSOFFICE_2 = """
<div>
<div class="WordSection1">
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Nicolas,</span></p><p></p>
<p></p>
<p class="MsoNormal" style="text-indent:.5in">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">We are currently investigating the possibility of moving away from our current ERP </span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Thank You</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Matt</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<div>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Raoul Petitpoil</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Poil Industries</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Information Technology</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">920 Super Street</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Sanchez, Pa 17046 USA</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Tel: xxx.xxx</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Fax: xxx.xxx</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Email: </span>
<a href="mailto:[email protected]">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:blue">[email protected]</span>
</a>
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">www.poilindustries.com</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">www.superproducts.com</span></p><p></p>
<p></p>
</div>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<div>
<div style="border:none;border-top:solid #B5C4DF 1.0pt;padding:3.0pt 0in 0in 0in">
<p class="MsoNormal">
<b>
<span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">From:</span>
</b>
<span style="font-size:10.0pt;font-family:"Tahoma","sans-serif""> OpenERP Enterprise [mailto:[email protected]] <br><b>Sent:</b> Wednesday, April 17, 2013 1:31 PM<br><b>To:</b> Matt Witters<br><b>Subject:</b> Re: your OpenERP.com registration</span></p><p></p>
<p></p>
</div>
</div>
<p class="MsoNormal"></p>
<p> </p>
<p>Hello Raoul Petitpoil, </p>
<p></p>
<p>I noticed you recently downloaded OpenERP. </p>
<p></p>
<p>You indicated that you wish to use OpenERP in your own company. We would like to know more about your your business needs and requirements, and see how we can help you. When would you be available to discuss your project ? </p>
<p></p>
<p>Best regards, </p>
<p></p>
<pre> <p> </p>
</pre>
<pre>--<p></p></pre>
<pre>Nicolas<p></p></pre>
<pre> <a href="http://openerp.com">http://openerp.com</a>
<p></p>
</pre>
<pre>Belgium: +32.81.81.37.00<p></p></pre>
<pre>U.S.: +1 (650) 307-6736<p></p></pre>
<pre>India: +91 (79) 40 500 100<p></p></pre>
<pre> <p></p></pre>
</div>
</div>"""
MSOFFICE_2_IN = ['We are currently investigating the possibility']
MSOFFICE_2_OUT = ['I noticed you recently downloaded OpenERP.', 'You indicated that you wish', 'Belgium: +32.81.81.37.00']
MSOFFICE_3 = """<div>
<div class="WordSection1">
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Hi Nicolas !</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<p class="MsoNormal">
<span lang="EN-US" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Yes I’d be glad to hear about your offers as we struggle every year with the planning/approving of LOA. </span></p><p></p>
<p></p>
<p class="MsoNormal">
<span lang="EN-US" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">I saw your boss yesterday on tv and immediately wanted to test the interface. </span></p><p></p>
<p></p>
<p class="MsoNormal">
<span lang="EN-US" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<div>
<p class="MsoNormal">
<b>
<span lang="NL-BE" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">Bien à vous, </span></b></p><p></p><b>
</b>
<p></p>
<p class="MsoNormal">
<b>
<span lang="NL-BE" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">Met vriendelijke groeten, </span></b></p><p></p><b>
</b>
<p></p>
<p class="MsoNormal">
<b>
<span lang="EN-GB" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">Best regards,</span></b></p><p></p><b>
</b>
<p></p>
<p class="MsoNormal">
<b>
<span lang="EN-GB" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">
</span></b></p><p><b> </b></p><b>
</b>
<p></p>
<p class="MsoNormal">
<b>
<span lang="EN-GB" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">R. Petitpoil <br></span>
</b>
<span lang="EN-GB" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">Human Resource Manager<b><br><br>Field Resource s.a n.v. <i> <br></i></b>Hermesstraat 6A <br>1930 Zaventem</span>
<span lang="EN-GB" style="font-size:8.0pt;font-family:"Tahoma","sans-serif";color:gray"><br></span>
<b>
<span lang="FR" style="font-size:10.0pt;font-family:Wingdings;color:#1F497D">(</span>
</b>
<b>
<span lang="FR" style="font-size:9.0pt;font-family:Wingdings;color:#1F497D"> </span>
</b>
<b>
<span lang="EN-GB" style="font-size:8.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">xxx.xxx </span>
</b>
<b>
<span lang="EN-GB" style="font-size:9.0pt;font-family:"Trebuchet MS","sans-serif";color:gray"><br></span>
</b>
<b>
<span lang="FR" style="font-size:10.0pt;font-family:"Wingdings 2";color:#1F497D">7</span>
</b>
<b>
<span lang="FR" style="font-size:9.0pt;font-family:"Wingdings 2";color:#1F497D"> </span>
</b>
<b>
<span lang="EN-GB" style="font-size:8.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">+32 2 727.05.91<br></span>
</b>
<span lang="EN-GB" style="font-size:24.0pt;font-family:Webdings;color:green">P</span>
<span lang="EN-GB" style="font-size:8.0pt;font-family:"Tahoma","sans-serif";color:green"> <b> </b></span>
<b>
<span lang="EN-GB" style="font-size:9.0pt;font-family:"Trebuchet MS","sans-serif";color:green">Please consider the environment before printing this email.</span>
</b>
<span lang="EN-GB" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:navy"> </span>
<span lang="EN-GB" style="font-family:"Calibri","sans-serif";color:navy">
</span></p><p></p>
<p></p>
</div>
<p class="MsoNormal">
<span lang="EN-US" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<div>
<div style="border:none;border-top:solid #B5C4DF 1.0pt;padding:3.0pt 0cm 0cm 0cm">
<p class="MsoNormal">
<b>
<span lang="FR" style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">De :</span>
</b>
<span lang="FR" style="font-size:10.0pt;font-family:"Tahoma","sans-serif""> OpenERP Enterprise [mailto:[email protected]] <br><b>Envoyé :</b> jeudi 18 avril 2013 11:31<br><b>À :</b> Paul Richard<br><b>Objet :</b> Re: your OpenERP.com registration</span></p><p></p>
<p></p>
</div>
</div>
<p class="MsoNormal"></p>
<p> </p>
<p>Hello Raoul PETITPOIL, </p>
<p></p>
<p>I noticed you recently registered to our OpenERP Online solution. </p>
<p></p>
<p>You indicated that you wish to use OpenERP in your own company. We would like to know more about your your business needs and requirements, and see how we can help you. When would you be available to discuss your project ? </p>
<p></p>
<p>Best regards, </p>
<p></p>
<pre> <p> </p>
</pre>
<pre>--<p></p></pre>
<pre>Nicolas<p></p></pre>
<pre> <a href="http://openerp.com">http://openerp.com</a>
<p></p>
</pre>
<pre>Belgium: +32.81.81.37.00<p></p></pre>
<pre>U.S.: +1 (650) 307-6736<p></p></pre>
<pre>India: +91 (79) 40 500 100<p></p></pre>
<pre> <p></p></pre>
</div>
</div>"""
MSOFFICE_3_IN = ['I saw your boss yesterday']
MSOFFICE_3_OUT = ['I noticed you recently downloaded OpenERP.', 'You indicated that you wish', 'Belgium: +32.81.81.37.00']
# ------------------------------------------------------------
# Test cases coming from bugs
# ------------------------------------------------------------
# bug: read more not apparent, strange message in read more span
BUG1 = """<pre>Hi Migration Team,
Paragraph 1, blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah.
Paragraph 2, blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah.
Paragraph 3, blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah.
Thanks.
Regards,
--
Olivier Laurent
Migration Manager
OpenERP SA
Chaussée de Namur, 40
B-1367 Gérompont
Tel: +32.81.81.37.00
Web: http://www.openerp.com</pre>"""
BUG_1_IN = [
'Hi Migration Team',
'Paragraph 1'
]
BUG_1_OUT = [
'Olivier Laurent',
'Chaussée de Namur',
'81.81.37.00',
'openerp.com',
]
BUG2 = """
<div>
<br>
<div class="moz-forward-container"><br>
<br>
-------- Original Message --------
<table class="moz-email-headers-table" border="0" cellpadding="0" cellspacing="0">
<tbody>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Subject:
</th>
<td>Fwd: TR: OpenERP S.A. Payment Reminder</td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Date: </th>
<td>Wed, 16 Oct 2013 14:11:13 +0200</td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">From: </th>
<td>Christine Herrmann <a class="moz-txt-link-rfc2396E" href="mailto:[email protected]"><[email protected]></a></td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">To: </th>
<td><a class="moz-txt-link-abbreviated" href="mailto:[email protected]">[email protected]</a></td>
</tr>
</tbody>
</table>
<br>
<br>
<br>
<div class="moz-forward-container"><br>
<br>
-------- Message original --------
<table class="moz-email-headers-table" border="0" cellpadding="0" cellspacing="0">
<tbody>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Sujet:
</th>
<td>TR: OpenERP S.A. Payment Reminder</td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Date :
</th>
<td>Wed, 16 Oct 2013 10:34:45 -0000</td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">De : </th>
<td>Ida Siwatala <a class="moz-txt-link-rfc2396E" href="mailto:[email protected]"><[email protected]></a></td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Répondre
à : </th>
<td><a class="moz-txt-link-abbreviated" href="mailto:[email protected]">[email protected]</a></td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Pour :
</th>
<td>Christine Herrmann (che) <a class="moz-txt-link-rfc2396E" href="mailto:[email protected]"><[email protected]></a></td>
</tr>
</tbody>
</table>
<br>
<br>
<div>
<div class="WordSection1">
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Bonjour,</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Pourriez-vous
me faire un retour sur ce point.</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Cordialement</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<div>
<div style="border:none;border-top:solid #B5C4DF
1.0pt;padding:3.0pt 0cm 0cm 0cm">
<p class="MsoNormal"><b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">De :</span></b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
Ida Siwatala [<a class="moz-txt-link-freetext" href="mailto:[email protected]">mailto:[email protected]</a>]
<br>
<b>Envoyé :</b> vendredi 4 octobre 2013 20:03<br>
<b>À :</b> 'Followers of
INZO-services-8-all-e-Maxime-Lisbonne-77176-Savigny-le-temple-France'<br>
<b>Objet :</b> RE: OpenERP S.A. Payment Reminder</span></p>
</div>
</div>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Bonsoir,</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Je
me permets de revenir vers vous par écrit , car j’ai
fait 2 appels vers votre service en exposant mon
problème, mais je n’ai pas eu de retour.</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Cela
fait un mois que j’ai fait la souscription de votre
produit, mais je me rends compte qu’il est pas adapté à
ma situation ( fonctionnalité manquante et surtout je
n’ai pas beaucoup de temps à passer à résoudre des
bugs). </span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">C’est
pourquoi , j’ai demandé qu’un accord soit trouvé avec
vous pour annuler le contrat (tout en vous payant le
mois d’utilisation de septembre).</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Pourriez-vous
me faire un retour sur ce point.</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Cordialement,</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Ida
Siwatala</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">De :</span></b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
<a href="mailto:[email protected]">[email protected]</a>
[<a href="mailto:[email protected]">mailto:[email protected]</a>]
<br>
<b>Envoyé :</b> vendredi 4 octobre 2013 17:41<br>
<b>À :</b> <a href="mailto:[email protected]">[email protected]</a><br>
<b>Objet :</b> OpenERP S.A. Payment Reminder</span></p>
<p> </p>
<div>
<p style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222">Dear
INZO services,</span></p>
<p style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222">Exception
made if there was a mistake of ours, it seems that the
following amount stays unpaid. Please, take
appropriate measures in order to carry out this
payment in the next 8 days. </span></p>
<p class="MsoNormal" style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222"></span></p>
<p> </p>
<table class="MsoNormalTable" style="width:100.0%;border:outset 1.5pt" width="100%" border="1" cellpadding="0">
<tbody>
<tr>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Date de facturation</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Description</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Reference</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Due Date</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Amount (€)</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Lit.</p>
</td>
</tr>
<tr>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>2013-09-24</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>2013/1121</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>Enterprise - Inzo Services
- Juillet 2013</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>2013-09-24</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>420.0</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt"><br>
</td>
</tr>
<tr>
<td style="padding:.75pt .75pt .75pt .75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
</tr>
</tbody>
</table>
<p class="MsoNormal" style="text-align:center;background:white" align="center"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222">Amount
due : 420.00 € </span></p>
<p style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222">Would
your payment have been carried out after this mail was
sent, please ignore this message. Do not hesitate to
contact our accounting department. </span></p>
<p class="MsoNormal" style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222"><br>
Best Regards, <br>
Aurore Lesage <br>
OpenERP<br>
Chaussée de Namur, 40 <br>
B-1367 Grand Rosières <br>
Tel: +32.81.81.37.00 - Fax: +32.81.73.35.01 <br>
E-mail : <a href="mailto:[email protected]">[email protected]</a> <br>
Web: <a href="http://www.openerp.com">http://www.openerp.com</a></span></p>
</div>
</div>
</div>
--<br>
INZO services <small>Sent by <a style="color:inherit" href="http://www.openerp.com">OpenERP
S.A.</a> using <a style="color:inherit" href="https://www.openerp.com/">OpenERP</a>.</small>
<small>Access your messages and documents <a style="color:inherit" href="https://accounts.openerp.com?db=openerp#action=mail.action_mail_redirect&login=che&message_id=5750830">in
OpenERP</a></small> <br>
<pre class="moz-signature" cols="72">--
Christine Herrmann
OpenERP
Chaussée de Namur, 40
B-1367 Grand Rosières
Tel: +32.81.81.37.00 - Fax: +32.81.73.35.01
Web: <a class="moz-txt-link-freetext" href="http://www.openerp.com">http://www.openerp.com</a> </pre>
<br>
</div>
<br>
<br>
</div>
<br>
</div>"""
BUG_2_IN = [
'read more',
'...',
]
BUG_2_OUT = [
'Fwd: TR: OpenERP S.A'
'fait un mois'
]
# BUG 20/08/2014: READ MORE NOT APPEARING
BUG3 = """<div class="oe_msg_body_long" style="/* display: none; */"><p>OpenERP has been upgraded to version 8.0.</p>
<h2>What's new in this upgrade?</h2>
<div class="document">
<ul>
<li><p class="first">New Warehouse Management System:</p>
<blockquote>
<p>Schedule your picking, packing, receptions and internal moves automatically with Odoo using
your own routing rules. Define push and pull rules to organize a warehouse or to manage
product moves between several warehouses. Track in detail all stock moves, not only in your
warehouse but wherever else it's taken as well (customers, suppliers or manufacturing
locations).</p>
</blockquote>
</li>
<li><p class="first">New Product Configurator</p>
</li>
<li><p class="first">Documentation generation from website forum:</p>
<blockquote>
<p>New module to generate a documentation from questions and responses from your forum.
The documentation manager can define a table of content and any user, depending their karma,
can link a question to an entry of this TOC.</p>
</blockquote>
</li>
<li><p class="first">New kanban view of documents (resumes and letters in recruitement, project documents...)</p>
</li>
<li><p class="first">E-Commerce:</p>
<blockquote>
<ul class="simple">
<li>Manage TIN in contact form for B2B.</li>
<li>Dedicated salesteam to easily manage leads and orders.</li>
</ul>
</blockquote>
</li>
<li><p class="first">Better Instant Messaging.</p>
</li>
<li><p class="first">Faster and Improved Search view: Search drawer now appears on top of the results, and is open
by default in reporting views</p>
</li>
<li><p class="first">Improved User Interface:</p>
<blockquote>
<ul class="simple">
<li>Popups has changed to be more responsive on tablets and smartphones.</li>
<li>New Stat Buttons: Forms views have now dynamic buttons showing some statistics abouts linked models.</li>
<li>Color code to check in one look availability of components in an MRP order.</li>
<li>Unified menu bar allows you to switch easily between the frontend (website) and backend</li>
<li>Results panel is now scrollable independently of the menu bars, keeping the navigation,
search bar and view switcher always within reach.</li>
</ul>
</blockquote>
</li>
<li><p class="first">User signature is now in HTML.</p>
</li>
<li><p class="first">New development API.</p>
</li>
<li><p class="first">Remove support for Outlook and Thunderbird plugins</p>
</li>
</ul>
</div>
<p>Enjoy the new OpenERP Online!</p><span class="oe_mail_reduce"><a href="#">read less</a></span></div>"""
BUG_3_IN = [
'read more',
'...',
]
BUG_3_OUT = [
'New kanban view of documents'
]
| agpl-3.0 | 7,526,739,916,708,156,000 | 1,780,429,247,265,313,800 | 47.574468 | 564 | 0.589855 | false |
IODisrupt/OmegaBot | cogs/image.py | 1 | 4412 | import discord
from discord.ext import commands
from random import randint
import aiohttp
import random
class Image:
"""Image related commands."""
def __init__(self, bot):
self.bot = bot
#Reserved for further ... stuff
"""Commands section"""
@commands.command(no_pm=True)
async def imgur(self, *text):
"""Retrieves a random imgur picture
imgur search [keyword] - Retrieves first hit of search query.
imgur [subreddit section] [top or new] - Retrieves top 3 hottest or latest pictures of today for given a subreddit section, e.g. 'funny'."""
imgurclient = ImgurClient("1fd3ef04daf8cab", "f963e574e8e3c17993c933af4f0522e1dc01e230")
if text == ():
rand = randint(0, 59) #60 results per generated page
items = imgurclient.gallery_random(page=0)
await self.bot.say(items[rand].link)
elif text[0] == "search":
items = imgurclient.gallery_search(" ".join(text[1:len(text)]), advanced=None, sort='time', window='all', page=0)
if len(items) < 1:
await self.bot.say("Your search terms gave no results.")
else:
await self.bot.say(items[0].link)
elif text[0] != ():
try:
if text[1] == "top":
imgSort = "top"
elif text[1] == "new":
imgSort = "time"
else:
await self.bot.say("Only top or new is a valid subcommand.")
return
items = imgurclient.subreddit_gallery(text[0], sort=imgSort, window='day', page=0)
if (len(items) < 3):
await self.bot.say("This subreddit section does not exist, try 'funny'")
else:
await self.bot.say("{} {} {}".format(items[0].link, items[1].link, items[2].link))
except:
await self.bot.say("Type help imgur for details.")
@commands.command(no_pm=True)
async def gif(self, *text):
"""Retrieves first search result from giphy
gif [keyword]"""
if len(text) > 0:
if len(text[0]) > 1 and len(text[0]) < 20:
try:
msg = "+".join(text)
search = "http://api.giphy.com/v1/gifs/search?q=" + msg + "&api_key=dc6zaTOxFJmzC"
async with aiohttp.get(search) as r:
result = await r.json()
if result["data"] != []:
url = result["data"][0]["url"]
await self.bot.say(url)
else:
await self.bot.say("Your search terms gave no results.")
except:
await self.bot.say("Error.")
else:
await self.bot.say("Invalid search.")
else:
await self.bot.say("gif [text]")
@commands.command(no_pm=True)
async def gifr(self, *text):
"""Retrieves a random gif from a giphy search
gifr [keyword]"""
random.seed()
if len(text) > 0:
if len(text[0]) > 1 and len(text[0]) < 20:
try:
msg = "+".join(text)
search = "http://api.giphy.com/v1/gifs/search?q=" + msg + "&api_key=dc6zaTOxFJmzC"
async with aiohttp.get(search) as r:
result = await r.json()
if result["data"] != []:
maxarray = len(result)
url = result["data"][random.randint(0,maxarray)]["url"]
await self.bot.say(url)
else:
await self.bot.say("Your search terms gave no results.")
except:
await self.bot.say("Error.")
else:
await self.bot.say("Invalid search.")
else:
await self.bot.say("gifr [text]")
class ModuleNotFound(Exception):
def __init__(self, m):
self.message = m
def __str__(self):
return self.message
def setup(bot):
global ImgurClient
try:
from imgurpython import ImgurClient
except:
raise ModuleNotFound("imgurpython is not installed. Do 'pip3 install imgurpython' to use this cog.")
bot.add_cog(Image(bot))
| gpl-3.0 | 5,957,932,776,820,038,000 | -2,741,927,806,889,928,000 | 38.392857 | 148 | 0.504306 | false |
hwu25/AppPkg | Applications/Python/Python-2.7.2/Lib/test/test_shlex.py | 13 | 5491 | # -*- coding: iso-8859-1 -*-
import unittest
import shlex
from test import test_support
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# The original test data set was from shellwords, by Hartmut Goebel.
data = r"""x|x|
foo bar|foo|bar|
foo bar|foo|bar|
foo bar |foo|bar|
foo bar bla fasel|foo|bar|bla|fasel|
x y z xxxx|x|y|z|xxxx|
\x bar|\|x|bar|
\ x bar|\|x|bar|
\ bar|\|bar|
foo \x bar|foo|\|x|bar|
foo \ x bar|foo|\|x|bar|
foo \ bar|foo|\|bar|
foo "bar" bla|foo|"bar"|bla|
"foo" "bar" "bla"|"foo"|"bar"|"bla"|
"foo" bar "bla"|"foo"|bar|"bla"|
"foo" bar bla|"foo"|bar|bla|
foo 'bar' bla|foo|'bar'|bla|
'foo' 'bar' 'bla'|'foo'|'bar'|'bla'|
'foo' bar 'bla'|'foo'|bar|'bla'|
'foo' bar bla|'foo'|bar|bla|
blurb foo"bar"bar"fasel" baz|blurb|foo"bar"bar"fasel"|baz|
blurb foo'bar'bar'fasel' baz|blurb|foo'bar'bar'fasel'|baz|
""|""|
''|''|
foo "" bar|foo|""|bar|
foo '' bar|foo|''|bar|
foo "" "" "" bar|foo|""|""|""|bar|
foo '' '' '' bar|foo|''|''|''|bar|
\""|\|""|
"\"|"\"|
"foo\ bar"|"foo\ bar"|
"foo\\ bar"|"foo\\ bar"|
"foo\\ bar\"|"foo\\ bar\"|
"foo\\" bar\""|"foo\\"|bar|\|""|
"foo\\ bar\" dfadf"|"foo\\ bar\"|dfadf"|
"foo\\\ bar\" dfadf"|"foo\\\ bar\"|dfadf"|
"foo\\\x bar\" dfadf"|"foo\\\x bar\"|dfadf"|
"foo\x bar\" dfadf"|"foo\x bar\"|dfadf"|
\''|\|''|
'foo\ bar'|'foo\ bar'|
'foo\\ bar'|'foo\\ bar'|
"foo\\\x bar\" df'a\ 'df'|"foo\\\x bar\"|df'a|\|'df'|
\"foo"|\|"foo"|
\"foo"\x|\|"foo"|\|x|
"foo\x"|"foo\x"|
"foo\ "|"foo\ "|
foo\ xx|foo|\|xx|
foo\ x\x|foo|\|x|\|x|
foo\ x\x\""|foo|\|x|\|x|\|""|
"foo\ x\x"|"foo\ x\x"|
"foo\ x\x\\"|"foo\ x\x\\"|
"foo\ x\x\\""foobar"|"foo\ x\x\\"|"foobar"|
"foo\ x\x\\"\''"foobar"|"foo\ x\x\\"|\|''|"foobar"|
"foo\ x\x\\"\'"fo'obar"|"foo\ x\x\\"|\|'"fo'|obar"|
"foo\ x\x\\"\'"fo'obar" 'don'\''t'|"foo\ x\x\\"|\|'"fo'|obar"|'don'|\|''|t'|
'foo\ bar'|'foo\ bar'|
'foo\\ bar'|'foo\\ bar'|
foo\ bar|foo|\|bar|
foo#bar\nbaz|foobaz|
:-) ;-)|:|-|)|;|-|)|
áéíóú|á|é|í|ó|ú|
"""
posix_data = r"""x|x|
foo bar|foo|bar|
foo bar|foo|bar|
foo bar |foo|bar|
foo bar bla fasel|foo|bar|bla|fasel|
x y z xxxx|x|y|z|xxxx|
\x bar|x|bar|
\ x bar| x|bar|
\ bar| bar|
foo \x bar|foo|x|bar|
foo \ x bar|foo| x|bar|
foo \ bar|foo| bar|
foo "bar" bla|foo|bar|bla|
"foo" "bar" "bla"|foo|bar|bla|
"foo" bar "bla"|foo|bar|bla|
"foo" bar bla|foo|bar|bla|
foo 'bar' bla|foo|bar|bla|
'foo' 'bar' 'bla'|foo|bar|bla|
'foo' bar 'bla'|foo|bar|bla|
'foo' bar bla|foo|bar|bla|
blurb foo"bar"bar"fasel" baz|blurb|foobarbarfasel|baz|
blurb foo'bar'bar'fasel' baz|blurb|foobarbarfasel|baz|
""||
''||
foo "" bar|foo||bar|
foo '' bar|foo||bar|
foo "" "" "" bar|foo||||bar|
foo '' '' '' bar|foo||||bar|
\"|"|
"\""|"|
"foo\ bar"|foo\ bar|
"foo\\ bar"|foo\ bar|
"foo\\ bar\""|foo\ bar"|
"foo\\" bar\"|foo\|bar"|
"foo\\ bar\" dfadf"|foo\ bar" dfadf|
"foo\\\ bar\" dfadf"|foo\\ bar" dfadf|
"foo\\\x bar\" dfadf"|foo\\x bar" dfadf|
"foo\x bar\" dfadf"|foo\x bar" dfadf|
\'|'|
'foo\ bar'|foo\ bar|
'foo\\ bar'|foo\\ bar|
"foo\\\x bar\" df'a\ 'df"|foo\\x bar" df'a\ 'df|
\"foo|"foo|
\"foo\x|"foox|
"foo\x"|foo\x|
"foo\ "|foo\ |
foo\ xx|foo xx|
foo\ x\x|foo xx|
foo\ x\x\"|foo xx"|
"foo\ x\x"|foo\ x\x|
"foo\ x\x\\"|foo\ x\x\|
"foo\ x\x\\""foobar"|foo\ x\x\foobar|
"foo\ x\x\\"\'"foobar"|foo\ x\x\'foobar|
"foo\ x\x\\"\'"fo'obar"|foo\ x\x\'fo'obar|
"foo\ x\x\\"\'"fo'obar" 'don'\''t'|foo\ x\x\'fo'obar|don't|
"foo\ x\x\\"\'"fo'obar" 'don'\''t' \\|foo\ x\x\'fo'obar|don't|\|
'foo\ bar'|foo\ bar|
'foo\\ bar'|foo\\ bar|
foo\ bar|foo bar|
foo#bar\nbaz|foo|baz|
:-) ;-)|:-)|;-)|
áéíóú|áéíóú|
"""
class ShlexTest(unittest.TestCase):
def setUp(self):
self.data = [x.split("|")[:-1]
for x in data.splitlines()]
self.posix_data = [x.split("|")[:-1]
for x in posix_data.splitlines()]
for item in self.data:
item[0] = item[0].replace(r"\n", "\n")
for item in self.posix_data:
item[0] = item[0].replace(r"\n", "\n")
def splitTest(self, data, comments):
for i in range(len(data)):
l = shlex.split(data[i][0], comments=comments)
self.assertEqual(l, data[i][1:],
"%s: %s != %s" %
(data[i][0], l, data[i][1:]))
def oldSplit(self, s):
ret = []
lex = shlex.shlex(StringIO(s))
tok = lex.get_token()
while tok:
ret.append(tok)
tok = lex.get_token()
return ret
def testSplitPosix(self):
"""Test data splitting with posix parser"""
self.splitTest(self.posix_data, comments=True)
def testCompat(self):
"""Test compatibility interface"""
for i in range(len(self.data)):
l = self.oldSplit(self.data[i][0])
self.assertEqual(l, self.data[i][1:],
"%s: %s != %s" %
(self.data[i][0], l, self.data[i][1:]))
# Allow this test to be used with old shlex.py
if not getattr(shlex, "split", None):
for methname in dir(ShlexTest):
if methname.startswith("test") and methname != "testCompat":
delattr(ShlexTest, methname)
def test_main():
test_support.run_unittest(ShlexTest)
if __name__ == "__main__":
test_main()
| bsd-2-clause | -6,064,839,853,698,465,000 | -4,259,884,957,557,224,000 | 26.748691 | 76 | 0.50774 | false |
SophieBartmann/Faust-Bot | FaustBot/Modules/JokeObserver.py | 1 | 6933 | import random
import time
from FaustBot.Communication import Connection
from FaustBot.Modules.PrivMsgObserverPrototype import PrivMsgObserverPrototype
jokes = [['Was ist orange und geht über die Berge?'
,'Eine Wanderine.']
,['Was ist orange und schaut durchs Schlüsselloch?'
,'Eine Spannderine.']
,['Was ist violett und sitzt in der Kirche ganz vorne?'
,'Eine Frommbeere.']
,['Was ist grün und liegt im Sarg?'
,'Ein Sterbschen.']
,['Was ist bunt und läuft über den Tisch davon?'
,'Ein Fluchtsalat.']
,['Was ist braun und schwimmt im Wasser?'
,'Ein U-Brot.']
,['Was ist schwarz/weiß und hüpft von Eisscholle zu Eisscholle?'
,'Ein Springuin.']
,['Was ist rot und sitzt auf dem WC?'
,'Eine Klomate!']
,['Was ist braun und fährt einen verschneiten Hang hinunter?'
,'Ein Snowbrot.']
,['Was ist braun und späht durchs Schlafzimmerfenster?'
,'Ein Spannzapfen.']
,['Was ist weiß und springt im Wald umher?'
,'Ein Jumpignon.']
,['Was ist braun, süß und rennt durch den Wald?'
,'Eine Joggolade.']
,['Was ist braun und sitzt hinter Gittern?'
,'Eine Knastanie.']
,['Was ist rot, rund und hat ein Maschinengewehr?'
,'Ein Rambodischen.']
,['Was ist braun, knusprig und läuft mit dem Korb durch den Wald?'
,'Brotkäppchen.']
,['Was ist braun, klebrig und läuft in der Wüste umher?'
,'Ein Karamel.']
,['Was ist rot, sitzt in einer Konservendose und spielt Musik?'
,'Ein Radioli.']
,['Was ist grün und radelt durch die Gegend?'
,'Eine Velone.']
,['Was ist orange, tiefergelegt und hat einen Spoiler?'
,'Ein Mantarinchen']
,['Was ist gelb, krumm und schwimmt auf dem Wasser?'
,'Eine Schwanane']
,['Was ist orange und steckt traurig in der Erde?'
,'Ein Trübchen.']
,['Was ist orange, sauer und kann keine Minute ruhig sitzen?'
,'Eine Zappelsine.']
,['Was ist haarig und wird in der Pfanne frittiert?'
,'Bartkartoffeln.']
,['Was ist gesund und kräftig und spielt den Beleidigten?'
,'Ein Schmollkornbrot.']
,['Was steht im Schlafzimmer des Metzgers neben dem Bett?'
,'Ein Schlachttischlämpchen.']
,['Was ist grün, sauer und versteckt sich vor der Polizei?'
,'Ein Essig-Schurke.']
,['Was ist orange, rund und versteckt sich vor der Polizei?'
,'Ein Vandalinchen.']
,['Was ist grün und schaut durchs Schlüsselloch?'
,'Ein Spionat']
,['Was ist groß, grau und telefoniert aus Afrika?'
,'Ein Telefant.']
,['Was ist gelb und flattert im Wind?'
,'Eine Fahnane.']
,['Was ist grün und klopft an die Tür?'
,'Ein Klopfsalat.']
,['Was ist braun, sehr zäh und fliegt umher?'
,'Eine Ledermaus.']
,['Was macht "Muh" und hilft beim Anziehen?'
,'Ein Kuhlöffel.']
,['Was ist viereckig, hat Noppen und einen Sprachfehler?'
,'Ein Legosteniker.']
,['Was ist gelb und immer bekifft?'
,'Ein Bong-Frites.']
,['Was ist grün, glücklich und hüpft von Grashalm zu Grashalm?'
,'Eine Freuschrecke.']
,['Was ist ist braun, hat einen Beutel und hängt am Baum?'
,'Ein Hänguruh.']
,['Was ist orange-rot und riskiert alles?'
,'Eine Mutorange']
,['Was ist gelb, ölig und und sitzt in der Kirche in der ersten Reihe?'
,'Eine Frommfrites']
,['Was ist grün und irrt durch Istanbul?'
,'Ein Gürke']
,['Was ist hellbraun und hangelt sich von Tortenstück zu Tortenstück?'
,'Ein Tarzipan.']
,['Was ist braun und klebt an der Wand?'
,'Ein Klebkuchen']
,['Was ist rot und läuft die Straße auf und ab?'
,'Eine Hagenutte.']
,['Was ist weiss und läuft die Straße auf und ab?'
,'Schneeflittchen.']
,['Was ist grün und läuft die Straße auf und ab?'
,'Eine Frosch-tituierte.']
,['Was ist braun und trägt Strapse?'
,'Ein Haselnüttchen.']
,['Was ist gelb und steht frankiert und abgestempelt am Strassenrand?'
,'Eine Postituierte.']
,['Was leuchtet und geht fremd?'
,'Ein Schlampion.']
,['Was ist gelb und rutscht den Hang hinunter?'
,'Ein Cremeschlitten.']
,['Was ist weiss und tanzt ums Feuer?'
,'Rumpelpilzchen.']
,['Was ist weiss und liegt schnarchend auf der Wiese?'
,'Ein Schlaf.']
,['Was ist gelb, saftig und sitzt bei jedem Fussballspiel vor dem Fernseher?'
,'Eine Fananas.']
,['Was ist rosa und schwimmt im Wasser?'
,'Eine Meerjungsau.']
,['Was ist durchsichtig, stinkt und es ist ihm alles egal?'
,'Ein Schnurz.']
,['Was ist unordentlich und gibt Licht?'
,'Eine Schlampe.']
,['Was ist blöd, süß und bunt?'
,'Ein Dummibärchen.']
,['Was trägt einen Frack und hilft im Haushalt?'
,'Ein Diener Schnitzel.']
,['Was ist silbrig, sticht und hat Spass daran?'
,'Eine Sadistel.']
,['Was ist gelb und kann schießen?'
,'Eine Banone']
,['Was kommt nach Elch?'
,'Zwölch']
,['Was liegt am Strand und spricht undeutlich?'
,'Eine Nuschel']
,['Was hüpft über die Wiese und raucht?'
,'Ein Kaminchen']
,['Was ist knusprig und liegt unterm Baum?'
,'Schattenplätzle']
,['Kleines Schwein das nach Hilfe schreit?'
,'Ein Notrufsäule']
,['Was liegt am Strand und hat Schnupfen?'
,'Eine Niesmuschel']
,['Was ist ein Cowboy ohne Pferd?'
,'Ein Sattelschlepper']
,['Was ist grün und trägt Kopftuch?'
,'Eine Gürkin']
,['Was ist rot und sitzt unterm Tisch?'
,'Ne Paprikantin']
,['Was ist schwarz-weiß und kommt nicht vom Fleck?'
,'Ein Klebra']
,['Was ist rosa, quiekt und wird zum Hausbau verwendet?'
,'Ein Ziegelschwein']
,['Wer ist bei jeder Wanderung betrunken?'
,'Der Schlucksack']
,['Was ist rot und wiehert?'
,'Die Pferdbeere']
,['Was ist weiß, blau, grün und steht auf der Wiese?'
,'Eine Schlumpfdotterblume']
,['Was kaut und hat immer Verspätung?'
,'Die Essbahn']
,['Was fährt unter der Erde und macht Muh?'
,'Die Kuhbahn']
,['Was wühlt den Himmel auf?'
,'Ein Pflugzeug']
,['Welche Frucht wächst im Gerichtssaal?'
,'Advokado']
,['Wie nennt man einen “scharfen” Mann mit Kilt?'
,'Chilischotte']
,['Was lebt im Meer und kann gut rechnen?'
,'Der Octoplus']
,['Was ist tiefergelegt und schwimmt unter wasser?'
,'Der Tunefisch']
,['Was ist unter der Erde und stinkt?'
,'Eine Furzel']
,['Von was wird man nachts beobachtet?'
,'Vom Spannbettlaken']
,['Wo wohnen die meisten Katzen?'
,'Im Miezhaus']
,['Warum ging der Luftballon kaputt?'
,'Aus Platzgründen']
,['Wie nennt man einen ausgehungerten Frosch?'
,'Magerquak']
,['Was macht ein Dieb im Zirkus?'
,'Clown']
,['Was macht ein Clown im Büro?'
,'Faxen']
,['Wie nennt man eine Zauberin in der Wüste?'
,'Sand Witch']
,['Wo betrinkt sich eine Mücke?'
,'In Sekt']
,['Warum können Seeräuber keine Kreise berechnen?'
,'Weil sie pi raten']
,['Was sitzt in der Savanne und wäscht sich?'
,'Die Hygiäne']
,['Was sitzt im Dschungel und spielt unfair?'
,'Mogli']
,['Wie nennt man den Paarungsruf von Leutstofflampen?'
,'Neonröhren']]
class JokeObserver(PrivMsgObserverPrototype):
@staticmethod
def cmd():
return [".joke"]
@staticmethod
def help():
return ".joke erzählt einen Flachwitz"
def update_on_priv_msg(self, data: dict, connection: Connection):
if data['message'].find('.joke') == -1:
return
joke = random.choice(jokes)
connection.send_back(joke[0], data)
time.sleep(30)
connection.send_back(joke[1], data)
| gpl-3.0 | -6,023,490,619,578,700,000 | -6,499,404,054,048,015,000 | 31.779904 | 78 | 0.703547 | false |
jendap/tensorflow | tensorflow/__init__.py | 29 | 1685 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Bring in all of the public TensorFlow interface into this
# module.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-bad-import-order
from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import
from tensorflow.python.util.lazy_loader import LazyLoader
contrib = LazyLoader('contrib', globals(), 'tensorflow.contrib')
del LazyLoader
from tensorflow.python.platform import flags # pylint: disable=g-import-not-at-top
from tensorflow.python.platform import app # pylint: disable=g-import-not-at-top
app.flags = flags
del absolute_import
del division
del print_function
# These symbols appear because we import the python package which
# in turn imports from tensorflow.core and tensorflow.python. They
# must come from this module. So python adds these symbols for the
# resolution to succeed.
# pylint: disable=undefined-variable
del python
del core
# pylint: enable=undefined-variable
| apache-2.0 | -7,349,235,000,991,879,000 | 5,385,219,331,095,604,000 | 36.444444 | 83 | 0.743027 | false |
gangadhar-kadam/verve_erp | erpnext/controllers/stock_controller.py | 6 | 11036 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint, flt, cstr
from frappe import msgprint, _
import frappe.defaults
from erpnext.controllers.accounts_controller import AccountsController
from erpnext.accounts.general_ledger import make_gl_entries, delete_gl_entries, process_gl_map
class StockController(AccountsController):
def make_gl_entries(self, repost_future_gle=True):
if self.docstatus == 2:
delete_gl_entries(voucher_type=self.doctype, voucher_no=self.name)
if cint(frappe.defaults.get_global_default("auto_accounting_for_stock")):
warehouse_account = get_warehouse_account()
if self.docstatus==1:
gl_entries = self.get_gl_entries(warehouse_account)
make_gl_entries(gl_entries)
if repost_future_gle:
items, warehouses = self.get_items_and_warehouses()
update_gl_entries_after(self.posting_date, self.posting_time, warehouses, items,
warehouse_account)
def get_gl_entries(self, warehouse_account=None, default_expense_account=None,
default_cost_center=None):
if not warehouse_account:
warehouse_account = get_warehouse_account()
sle_map = self.get_stock_ledger_details()
voucher_details = self.get_voucher_details(default_expense_account, default_cost_center, sle_map)
gl_list = []
warehouse_with_no_account = []
for detail in voucher_details:
sle_list = sle_map.get(detail.name)
if sle_list:
for sle in sle_list:
if warehouse_account.get(sle.warehouse):
# from warehouse account
self.check_expense_account(detail)
gl_list.append(self.get_gl_dict({
"account": warehouse_account[sle.warehouse],
"against": detail.expense_account,
"cost_center": detail.cost_center,
"remarks": self.get("remarks") or "Accounting Entry for Stock",
"debit": flt(sle.stock_value_difference, 2)
}))
# to target warehouse / expense account
gl_list.append(self.get_gl_dict({
"account": detail.expense_account,
"against": warehouse_account[sle.warehouse],
"cost_center": detail.cost_center,
"remarks": self.get("remarks") or "Accounting Entry for Stock",
"credit": flt(sle.stock_value_difference, 2)
}))
elif sle.warehouse not in warehouse_with_no_account:
warehouse_with_no_account.append(sle.warehouse)
if warehouse_with_no_account:
msgprint(_("No accounting entries for the following warehouses") + ": \n" +
"\n".join(warehouse_with_no_account))
return process_gl_map(gl_list)
def get_voucher_details(self, default_expense_account, default_cost_center, sle_map):
if self.doctype == "Stock Reconciliation":
return [frappe._dict({ "name": voucher_detail_no, "expense_account": default_expense_account,
"cost_center": default_cost_center }) for voucher_detail_no, sle in sle_map.items()]
else:
details = self.get("items")
if default_expense_account or default_cost_center:
for d in details:
if default_expense_account and not d.get("expense_account"):
d.expense_account = default_expense_account
if default_cost_center and not d.get("cost_center"):
d.cost_center = default_cost_center
return details
def get_items_and_warehouses(self):
items, warehouses = [], []
if hasattr(self, "items"):
item_doclist = self.get("items")
elif self.doctype == "Stock Reconciliation":
import json
item_doclist = []
data = json.loads(self.reconciliation_json)
for row in data[data.index(self.head_row)+1:]:
d = frappe._dict(zip(["item_code", "warehouse", "qty", "valuation_rate"], row))
item_doclist.append(d)
if item_doclist:
for d in item_doclist:
if d.item_code and d.item_code not in items:
items.append(d.item_code)
if d.get("warehouse") and d.warehouse not in warehouses:
warehouses.append(d.warehouse)
if self.doctype == "Stock Entry":
if d.get("s_warehouse") and d.s_warehouse not in warehouses:
warehouses.append(d.s_warehouse)
if d.get("t_warehouse") and d.t_warehouse not in warehouses:
warehouses.append(d.t_warehouse)
return items, warehouses
def get_stock_ledger_details(self):
stock_ledger = {}
for sle in frappe.db.sql("""select warehouse, stock_value_difference,
voucher_detail_no, item_code, posting_date, actual_qty
from `tabStock Ledger Entry` where voucher_type=%s and voucher_no=%s""",
(self.doctype, self.name), as_dict=True):
stock_ledger.setdefault(sle.voucher_detail_no, []).append(sle)
return stock_ledger
def make_adjustment_entry(self, expected_gle, voucher_obj):
from erpnext.accounts.utils import get_stock_and_account_difference
account_list = [d.account for d in expected_gle]
acc_diff = get_stock_and_account_difference(account_list, expected_gle[0].posting_date)
cost_center = self.get_company_default("cost_center")
stock_adjustment_account = self.get_company_default("stock_adjustment_account")
gl_entries = []
for account, diff in acc_diff.items():
if diff:
gl_entries.append([
# stock in hand account
voucher_obj.get_gl_dict({
"account": account,
"against": stock_adjustment_account,
"debit": diff,
"remarks": "Adjustment Accounting Entry for Stock",
}),
# account against stock in hand
voucher_obj.get_gl_dict({
"account": stock_adjustment_account,
"against": account,
"credit": diff,
"cost_center": cost_center or None,
"remarks": "Adjustment Accounting Entry for Stock",
}),
])
if gl_entries:
from erpnext.accounts.general_ledger import make_gl_entries
make_gl_entries(gl_entries)
def check_expense_account(self, item):
if not item.get("expense_account"):
frappe.throw(_("Expense or Difference account is mandatory for Item {0} as it impacts overall stock value").format(item.item_code))
else:
is_expense_account = frappe.db.get_value("Account",
item.get("expense_account"), "report_type")=="Profit and Loss"
if self.doctype not in ("Purchase Receipt", "Stock Reconciliation", "Stock Entry") and not is_expense_account:
frappe.throw(_("Expense / Difference account ({0}) must be a 'Profit or Loss' account")
.format(item.get("expense_account")))
if is_expense_account and not item.get("cost_center"):
frappe.throw(_("{0} {1}: Cost Center is mandatory for Item {2}").format(
_(self.doctype), self.name, item.get("item_code")))
def get_sl_entries(self, d, args):
sl_dict = frappe._dict({
"item_code": d.get("item_code", None),
"warehouse": d.get("warehouse", None),
"posting_date": self.posting_date,
"posting_time": self.posting_time,
"voucher_type": self.doctype,
"voucher_no": self.name,
"voucher_detail_no": d.name,
"actual_qty": (self.docstatus==1 and 1 or -1)*flt(d.get("stock_qty")),
"stock_uom": d.get("stock_uom"),
"incoming_rate": 0,
"company": self.company,
"fiscal_year": self.fiscal_year,
"batch_no": cstr(d.get("batch_no")).strip(),
"serial_no": d.get("serial_no"),
"project": d.get("project_name"),
"is_cancelled": self.docstatus==2 and "Yes" or "No"
})
sl_dict.update(args)
return sl_dict
def make_sl_entries(self, sl_entries, is_amended=None, allow_negative_stock=False):
from erpnext.stock.stock_ledger import make_sl_entries
make_sl_entries(sl_entries, is_amended, allow_negative_stock)
def make_gl_entries_on_cancel(self):
if frappe.db.sql("""select name from `tabGL Entry` where voucher_type=%s
and voucher_no=%s""", (self.doctype, self.name)):
self.make_gl_entries()
def get_serialized_items(self):
serialized_items = []
item_codes = list(set([d.item_code for d in self.get("items")]))
if item_codes:
serialized_items = frappe.db.sql_list("""select name from `tabItem`
where has_serial_no='Yes' and name in ({})""".format(", ".join(["%s"]*len(item_codes))),
tuple(item_codes))
return serialized_items
def update_gl_entries_after(posting_date, posting_time, for_warehouses=None, for_items=None,
warehouse_account=None):
def _delete_gl_entries(voucher_type, voucher_no):
frappe.db.sql("""delete from `tabGL Entry`
where voucher_type=%s and voucher_no=%s""", (voucher_type, voucher_no))
if not warehouse_account:
warehouse_account = get_warehouse_account()
future_stock_vouchers = get_future_stock_vouchers(posting_date, posting_time, for_warehouses, for_items)
gle = get_voucherwise_gl_entries(future_stock_vouchers, posting_date)
for voucher_type, voucher_no in future_stock_vouchers:
existing_gle = gle.get((voucher_type, voucher_no), [])
voucher_obj = frappe.get_doc(voucher_type, voucher_no)
expected_gle = voucher_obj.get_gl_entries(warehouse_account)
if expected_gle:
if not existing_gle or not compare_existing_and_expected_gle(existing_gle,
expected_gle):
_delete_gl_entries(voucher_type, voucher_no)
voucher_obj.make_gl_entries(repost_future_gle=False)
else:
_delete_gl_entries(voucher_type, voucher_no)
def compare_existing_and_expected_gle(existing_gle, expected_gle):
matched = True
for entry in expected_gle:
for e in existing_gle:
if entry.account==e.account and entry.against_account==e.against_account \
and (not entry.cost_center or not e.cost_center or entry.cost_center==e.cost_center) \
and (entry.debit != e.debit or entry.credit != e.credit):
matched = False
break
return matched
def get_future_stock_vouchers(posting_date, posting_time, for_warehouses=None, for_items=None):
future_stock_vouchers = []
values = []
condition = ""
if for_items:
condition += " and item_code in ({})".format(", ".join(["%s"] * len(for_items)))
values += for_items
if for_warehouses:
condition += " and warehouse in ({})".format(", ".join(["%s"] * len(for_warehouses)))
values += for_warehouses
for d in frappe.db.sql("""select distinct sle.voucher_type, sle.voucher_no
from `tabStock Ledger Entry` sle
where timestamp(sle.posting_date, sle.posting_time) >= timestamp(%s, %s) {condition}
order by timestamp(sle.posting_date, sle.posting_time) asc, name asc""".format(condition=condition),
tuple([posting_date, posting_time] + values), as_dict=True):
future_stock_vouchers.append([d.voucher_type, d.voucher_no])
return future_stock_vouchers
def get_voucherwise_gl_entries(future_stock_vouchers, posting_date):
gl_entries = {}
if future_stock_vouchers:
for d in frappe.db.sql("""select * from `tabGL Entry`
where posting_date >= %s and voucher_no in (%s)""" %
('%s', ', '.join(['%s']*len(future_stock_vouchers))),
tuple([posting_date] + [d[1] for d in future_stock_vouchers]), as_dict=1):
gl_entries.setdefault((d.voucher_type, d.voucher_no), []).append(d)
return gl_entries
def get_warehouse_account():
warehouse_account = dict(frappe.db.sql("""select warehouse, name from tabAccount
where account_type = 'Warehouse' and ifnull(warehouse, '') != ''"""))
return warehouse_account
| agpl-3.0 | -5,614,257,735,374,155,000 | 3,448,003,356,684,375,000 | 37.055172 | 134 | 0.692189 | false |
buuck/root | interpreter/llvm/src/tools/clang/utils/ABITest/Enumeration.py | 110 | 7814 | """Utilities for enumeration of finite and countably infinite sets.
"""
###
# Countable iteration
# Simplifies some calculations
class Aleph0(int):
_singleton = None
def __new__(type):
if type._singleton is None:
type._singleton = int.__new__(type)
return type._singleton
def __repr__(self): return '<aleph0>'
def __str__(self): return 'inf'
def __cmp__(self, b):
return 1
def __sub__(self, b):
raise ValueError,"Cannot subtract aleph0"
__rsub__ = __sub__
def __add__(self, b):
return self
__radd__ = __add__
def __mul__(self, b):
if b == 0: return b
return self
__rmul__ = __mul__
def __floordiv__(self, b):
if b == 0: raise ZeroDivisionError
return self
__rfloordiv__ = __floordiv__
__truediv__ = __floordiv__
__rtuediv__ = __floordiv__
__div__ = __floordiv__
__rdiv__ = __floordiv__
def __pow__(self, b):
if b == 0: return 1
return self
aleph0 = Aleph0()
def base(line):
return line*(line+1)//2
def pairToN((x,y)):
line,index = x+y,y
return base(line)+index
def getNthPairInfo(N):
# Avoid various singularities
if N==0:
return (0,0)
# Gallop to find bounds for line
line = 1
next = 2
while base(next)<=N:
line = next
next = line << 1
# Binary search for starting line
lo = line
hi = line<<1
while lo + 1 != hi:
#assert base(lo) <= N < base(hi)
mid = (lo + hi)>>1
if base(mid)<=N:
lo = mid
else:
hi = mid
line = lo
return line, N - base(line)
def getNthPair(N):
line,index = getNthPairInfo(N)
return (line - index, index)
def getNthPairBounded(N,W=aleph0,H=aleph0,useDivmod=False):
"""getNthPairBounded(N, W, H) -> (x, y)
Return the N-th pair such that 0 <= x < W and 0 <= y < H."""
if W <= 0 or H <= 0:
raise ValueError,"Invalid bounds"
elif N >= W*H:
raise ValueError,"Invalid input (out of bounds)"
# Simple case...
if W is aleph0 and H is aleph0:
return getNthPair(N)
# Otherwise simplify by assuming W < H
if H < W:
x,y = getNthPairBounded(N,H,W,useDivmod=useDivmod)
return y,x
if useDivmod:
return N%W,N//W
else:
# Conceptually we want to slide a diagonal line across a
# rectangle. This gives more interesting results for large
# bounds than using divmod.
# If in lower left, just return as usual
cornerSize = base(W)
if N < cornerSize:
return getNthPair(N)
# Otherwise if in upper right, subtract from corner
if H is not aleph0:
M = W*H - N - 1
if M < cornerSize:
x,y = getNthPair(M)
return (W-1-x,H-1-y)
# Otherwise, compile line and index from number of times we
# wrap.
N = N - cornerSize
index,offset = N%W,N//W
# p = (W-1, 1+offset) + (-1,1)*index
return (W-1-index, 1+offset+index)
def getNthPairBoundedChecked(N,W=aleph0,H=aleph0,useDivmod=False,GNP=getNthPairBounded):
x,y = GNP(N,W,H,useDivmod)
assert 0 <= x < W and 0 <= y < H
return x,y
def getNthNTuple(N, W, H=aleph0, useLeftToRight=False):
"""getNthNTuple(N, W, H) -> (x_0, x_1, ..., x_W)
Return the N-th W-tuple, where for 0 <= x_i < H."""
if useLeftToRight:
elts = [None]*W
for i in range(W):
elts[i],N = getNthPairBounded(N, H)
return tuple(elts)
else:
if W==0:
return ()
elif W==1:
return (N,)
elif W==2:
return getNthPairBounded(N, H, H)
else:
LW,RW = W//2, W - (W//2)
L,R = getNthPairBounded(N, H**LW, H**RW)
return (getNthNTuple(L,LW,H=H,useLeftToRight=useLeftToRight) +
getNthNTuple(R,RW,H=H,useLeftToRight=useLeftToRight))
def getNthNTupleChecked(N, W, H=aleph0, useLeftToRight=False, GNT=getNthNTuple):
t = GNT(N,W,H,useLeftToRight)
assert len(t) == W
for i in t:
assert i < H
return t
def getNthTuple(N, maxSize=aleph0, maxElement=aleph0, useDivmod=False, useLeftToRight=False):
"""getNthTuple(N, maxSize, maxElement) -> x
Return the N-th tuple where len(x) < maxSize and for y in x, 0 <=
y < maxElement."""
# All zero sized tuples are isomorphic, don't ya know.
if N == 0:
return ()
N -= 1
if maxElement is not aleph0:
if maxSize is aleph0:
raise NotImplementedError,'Max element size without max size unhandled'
bounds = [maxElement**i for i in range(1, maxSize+1)]
S,M = getNthPairVariableBounds(N, bounds)
else:
S,M = getNthPairBounded(N, maxSize, useDivmod=useDivmod)
return getNthNTuple(M, S+1, maxElement, useLeftToRight=useLeftToRight)
def getNthTupleChecked(N, maxSize=aleph0, maxElement=aleph0,
useDivmod=False, useLeftToRight=False, GNT=getNthTuple):
# FIXME: maxsize is inclusive
t = GNT(N,maxSize,maxElement,useDivmod,useLeftToRight)
assert len(t) <= maxSize
for i in t:
assert i < maxElement
return t
def getNthPairVariableBounds(N, bounds):
"""getNthPairVariableBounds(N, bounds) -> (x, y)
Given a finite list of bounds (which may be finite or aleph0),
return the N-th pair such that 0 <= x < len(bounds) and 0 <= y <
bounds[x]."""
if not bounds:
raise ValueError,"Invalid bounds"
if not (0 <= N < sum(bounds)):
raise ValueError,"Invalid input (out of bounds)"
level = 0
active = range(len(bounds))
active.sort(key=lambda i: bounds[i])
prevLevel = 0
for i,index in enumerate(active):
level = bounds[index]
W = len(active) - i
if level is aleph0:
H = aleph0
else:
H = level - prevLevel
levelSize = W*H
if N<levelSize: # Found the level
idelta,delta = getNthPairBounded(N, W, H)
return active[i+idelta],prevLevel+delta
else:
N -= levelSize
prevLevel = level
else:
raise RuntimError,"Unexpected loop completion"
def getNthPairVariableBoundsChecked(N, bounds, GNVP=getNthPairVariableBounds):
x,y = GNVP(N,bounds)
assert 0 <= x < len(bounds) and 0 <= y < bounds[x]
return (x,y)
###
def testPairs():
W = 3
H = 6
a = [[' ' for x in range(10)] for y in range(10)]
b = [[' ' for x in range(10)] for y in range(10)]
for i in range(min(W*H,40)):
x,y = getNthPairBounded(i,W,H)
x2,y2 = getNthPairBounded(i,W,H,useDivmod=True)
print i,(x,y),(x2,y2)
a[y][x] = '%2d'%i
b[y2][x2] = '%2d'%i
print '-- a --'
for ln in a[::-1]:
if ''.join(ln).strip():
print ' '.join(ln)
print '-- b --'
for ln in b[::-1]:
if ''.join(ln).strip():
print ' '.join(ln)
def testPairsVB():
bounds = [2,2,4,aleph0,5,aleph0]
a = [[' ' for x in range(15)] for y in range(15)]
b = [[' ' for x in range(15)] for y in range(15)]
for i in range(min(sum(bounds),40)):
x,y = getNthPairVariableBounds(i, bounds)
print i,(x,y)
a[y][x] = '%2d'%i
print '-- a --'
for ln in a[::-1]:
if ''.join(ln).strip():
print ' '.join(ln)
###
# Toggle to use checked versions of enumeration routines.
if False:
getNthPairVariableBounds = getNthPairVariableBoundsChecked
getNthPairBounded = getNthPairBoundedChecked
getNthNTuple = getNthNTupleChecked
getNthTuple = getNthTupleChecked
if __name__ == '__main__':
testPairs()
testPairsVB()
| lgpl-2.1 | 3,977,085,858,711,639,000 | -7,621,378,598,458,754,000 | 27.311594 | 93 | 0.559381 | false |
guker/GroundHog | groundhog/layers/rconv_layers.py | 19 | 11513 | """
Recursive Convolutional layers.
TODO: write more documentation
"""
__docformat__ = 'restructedtext en'
__authors__ = ("KyungHyun Cho ")
__contact__ = "Kyunghyun Cho <[email protected]>"
import numpy
import copy
import theano
import theano.tensor as TT
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from groundhog import utils
from groundhog.utils import sample_weights, \
sample_weights_classic,\
sample_weights_orth, \
init_bias, \
constant_shape, \
sample_zeros
from basic import Layer
class RecursiveConvolutionalLayer(Layer):
"""
(Binary) Recursive Convolutional Layer
"""
def __init__(self, rng,
n_hids=500,
scale=.01,
sparsity = -1,
activation = TT.tanh,
activ_noise=0.,
weight_noise=False,
bias_fn='init_bias',
bias_scale = 0.,
dropout = 1.,
init_fn='sample_weights',
kind_reg = None,
grad_scale = 1.,
profile = 0,
gating = False, # NOT USED
reseting = False, # NOT USED
gater_activation = TT.nnet.sigmoid, # NOT USED
reseter_activation = TT.nnet.sigmoid, # NOT USED
name=None):
"""
:type rng: numpy random generator
:param rng: numpy random generator
:type n_in: int
:param n_in: number of inputs units
:type n_hids: int
:param n_hids: Number of hidden units on each layer of the MLP
:type activation: string/function or list of
:param activation: Activation function for the embedding layers. If
a list it needs to have a value for each layer. If not, the same
activation will be applied to all layers
:type scale: float or list of
:param scale: depending on the initialization function, it can be
the standard deviation of the Gaussian from which the weights
are sampled or the largest singular value. If a single value it
will be used for each layer, otherwise it has to have one value
for each layer
:type sparsity: int or list of
:param sparsity: if a single value, it will be used for each layer,
otherwise it has to be a list with as many values as layers. If
negative, it means the weight matrix is dense. Otherwise it
means this many randomly selected input units are connected to
an output unit
:type weight_noise: bool
:param weight_noise: If true, the model is used with weight noise
(and the right shared variable are constructed, to keep track of the
noise)
:type dropout: float
:param dropout: the probability with which hidden units are dropped
from the hidden layer. If set to 1, dropout is not used
:type init_fn: string or function
:param init_fn: function used to initialize the weights of the
layer. We recommend using either `sample_weights_classic` or
`sample_weights` defined in the utils
:type bias_fn: string or function
:param bias_fn: function used to initialize the biases. We recommend
using `init_bias` defined in the utils
:type bias_scale: float
:param bias_scale: argument passed to `bias_fn`, depicting the scale
of the initial bias
:type grad_scale: float or theano scalar
:param grad_scale: factor with which the gradients with respect to
the parameters of this layer are scaled. It is used for
differentiating between the different parameters of a model.
:type name: string
:param name: name of the layer (used to name parameters). NB: in
this library names are very important because certain parts of the
code relies on name to disambiguate between variables, therefore
each layer should have a unique name.
"""
self.grad_scale = grad_scale
if type(init_fn) is str or type(init_fn) is unicode:
init_fn = eval(init_fn)
if type(bias_fn) is str or type(bias_fn) is unicode:
bias_fn = eval(bias_fn)
if type(activation) is str or type(activation) is unicode:
activation = eval(activation)
self.scale = scale
self.sparsity = sparsity
self.activation = activation
self.n_hids = n_hids
self.bias_scale = bias_scale
self.bias_fn = bias_fn
self.init_fn = init_fn
self.weight_noise = weight_noise
self.activ_noise = activ_noise
self.profile = profile
self.dropout = dropout
assert rng is not None, "random number generator should not be empty!"
super(RecursiveConvolutionalLayer, self).__init__(self.n_hids,
self.n_hids, rng, name)
self.trng = RandomStreams(self.rng.randint(int(1e6)))
self.params = []
self._init_params()
def _init_params(self):
# Left weight matrix
self.W_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="W_%s"%self.name)
self.params = [self.W_hh]
# Right weight matrix
self.U_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="U_%s"%self.name)
self.params += [self.U_hh]
# Bias
self.b_hh = theano.shared(
self.bias_fn(self.n_hids,
self.bias_scale,
self.rng),
name='b_%s' %self.name)
self.params += [self.b_hh]
# gaters
self.GW_hh = theano.shared(
numpy.float32(0.01 * self.rng.randn(self.n_hids, 3)),
name="GW_%s"%self.name)
self.params += [self.GW_hh]
self.GU_hh = theano.shared(
numpy.float32(0.01 * self.rng.randn(self.n_hids, 3)),
name="GU_%s"%self.name)
self.params += [self.GU_hh]
self.Gb_hh = theano.shared(
self.bias_fn(3,
self.bias_scale,
self.rng),
name='Gb_%s' %self.name)
self.params += [self.Gb_hh]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hh = theano.shared(self.W_hh.get_value()*0, name='noise_'+self.W_hh.name)
self.nU_hh = theano.shared(self.U_hh.get_value()*0, name='noise_'+self.U_hh.name)
self.nb_hh = theano.shared(self.b_hh.get_value()*0, name='noise_'+self.b_hh.name)
self.noise_params = [self.nW_hh,self.nU_hh,self.nb_hh]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def step_fprop(self, mask_t, prev_level, return_gates = False):
if self.weight_noise and use_noise and self.noise_params:
W_hh = self.W_hh + self.nW_hh
U_hh = self.U_hh + self.nU_hh
b_hh = self.b_hh + self.nb_hh
else:
W_hh = self.W_hh
U_hh = self.U_hh
b_hh = self.b_hh
GW_hh = self.GW_hh
GU_hh = self.GU_hh
Gb_hh = self.Gb_hh
if prev_level.ndim == 3:
b_hh = b_hh.dimshuffle('x','x',0)
else:
b_hh = b_hh.dimshuffle('x',0)
lower_level = prev_level
prev_shifted = TT.zeros_like(prev_level)
prev_shifted = TT.set_subtensor(prev_shifted[1:], prev_level[:-1])
lower_shifted = prev_shifted
prev_shifted = TT.dot(prev_shifted, U_hh)
prev_level = TT.dot(prev_level, W_hh)
new_act = self.activation(prev_level + prev_shifted + b_hh)
gater = TT.dot(lower_shifted, GU_hh) + \
TT.dot(lower_level, GW_hh) + Gb_hh
if prev_level.ndim == 3:
gater_shape = gater.shape
gater = gater.reshape((gater_shape[0] * gater_shape[1], 3))
gater = TT.nnet.softmax(gater)
if prev_level.ndim == 3:
gater = gater.reshape((gater_shape[0], gater_shape[1], 3))
if prev_level.ndim == 3:
gater_new = gater[:,:,0].dimshuffle(0,1,'x')
gater_left = gater[:,:,1].dimshuffle(0,1,'x')
gater_right = gater[:,:,2].dimshuffle(0,1,'x')
else:
gater_new = gater[:,0].dimshuffle(0,'x')
gater_left = gater[:,1].dimshuffle(0,'x')
gater_right = gater[:,2].dimshuffle(0,'x')
act = new_act * gater_new + \
lower_shifted * gater_left + \
lower_level * gater_right
if mask_t:
if prev_level.ndim == 3:
mask_t = mask_t.dimshuffle('x',0,'x')
else:
mask_t = mask_t.dimshuffle('x', 0)
new_level = TT.switch(mask_t, act, lower_level)
else:
new_level = act
if return_gates:
return new_level, gater
return new_level
def fprop(self,
state_below,
mask=None,
nsteps=None,
batch_size=None,
use_noise=True,
truncate_gradient=-1,
no_noise_bias = False,
**kwargs
):
if theano.config.floatX=='float32':
floatX = numpy.float32
else:
floatX = numpy.float64
if nsteps is None:
nsteps = state_below.shape[0]
if batch_size and batch_size != 1:
nsteps = nsteps / batch_size
if batch_size is None and state_below.ndim == 3:
batch_size = state_below.shape[1]
if state_below.ndim == 2 and \
(not isinstance(batch_size,int) or batch_size > 1):
state_below = state_below.reshape((nsteps, batch_size, self.n_in))
if mask == None:
mask = TT.alloc(1., nsteps, 1)
rval = []
rval, updates = theano.scan(self.step_fprop,
sequences = [mask[1:]],
outputs_info = [state_below],
name='layer_%s'%self.name,
profile=self.profile,
n_steps = nsteps-1)
seqlens = TT.cast(mask.sum(axis=0), 'int64')-1
roots = rval[-1]
if state_below.ndim == 3:
def _grab_root(seqlen,one_sample,prev_sample):
return one_sample[seqlen]
roots, updates = theano.scan(_grab_root,
sequences = [seqlens, roots.dimshuffle(1,0,2)],
outputs_info = [TT.alloc(0., self.n_hids)],
name='grab_root_%s'%self.name,
profile=self.profile)
roots = roots.dimshuffle('x', 0, 1)
else:
roots = roots[seqlens] # there should be only one, so it's fine.
# Note that roots has only a single timestep
new_h = roots
self.out = roots
self.rval = roots
self.updates =updates
return self.out
| bsd-3-clause | -7,880,989,807,757,476,000 | 34,343,096,873,051,452 | 35.090909 | 93 | 0.541822 | false |
dudepare/django | tests/template_tests/filter_tests/test_autoescape.py | 513 | 1342 | from django.test import SimpleTestCase
from ..utils import SafeClass, UnsafeClass, setup
class AutoescapeStringfilterTests(SimpleTestCase):
"""
Filters decorated with stringfilter still respect is_safe.
"""
@setup({'autoescape-stringfilter01': '{{ unsafe|capfirst }}'})
def test_autoescape_stringfilter01(self):
output = self.engine.render_to_string('autoescape-stringfilter01', {'unsafe': UnsafeClass()})
self.assertEqual(output, 'You & me')
@setup({'autoescape-stringfilter02': '{% autoescape off %}{{ unsafe|capfirst }}{% endautoescape %}'})
def test_autoescape_stringfilter02(self):
output = self.engine.render_to_string('autoescape-stringfilter02', {'unsafe': UnsafeClass()})
self.assertEqual(output, 'You & me')
@setup({'autoescape-stringfilter03': '{{ safe|capfirst }}'})
def test_autoescape_stringfilter03(self):
output = self.engine.render_to_string('autoescape-stringfilter03', {'safe': SafeClass()})
self.assertEqual(output, 'You > me')
@setup({'autoescape-stringfilter04': '{% autoescape off %}{{ safe|capfirst }}{% endautoescape %}'})
def test_autoescape_stringfilter04(self):
output = self.engine.render_to_string('autoescape-stringfilter04', {'safe': SafeClass()})
self.assertEqual(output, 'You > me')
| bsd-3-clause | -5,289,175,519,784,739,000 | 4,211,015,430,322,258,000 | 45.275862 | 105 | 0.683308 | false |
yamahata/neutron | neutron/neutron_plugin_base_v2.py | 6 | 13983 | # Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
v2 Neutron Plug-in API specification.
:class:`NeutronPluginBaseV2` provides the definition of minimum set of
methods that needs to be implemented by a v2 Neutron Plug-in.
"""
from abc import ABCMeta, abstractmethod
import six
@six.add_metaclass(ABCMeta)
class NeutronPluginBaseV2(object):
@abstractmethod
def create_subnet(self, context, subnet):
"""Create a subnet.
Create a subnet, which represents a range of IP addresses
that can be allocated to devices
:param context: neutron api request context
:param subnet: dictionary describing the subnet, with keys
as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object
in :file:`neutron/api/v2/attributes.py`. All keys will
be populated.
"""
pass
@abstractmethod
def update_subnet(self, context, id, subnet):
"""Update values of a subnet.
:param context: neutron api request context
:param id: UUID representing the subnet to update.
:param subnet: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for
'allow_put' as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`.
"""
pass
@abstractmethod
def get_subnet(self, context, id, fields=None):
"""Retrieve a subnet.
:param context: neutron api request context
:param id: UUID representing the subnet to fetch.
:param fields: a list of strings that are valid keys in a
subnet dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
@abstractmethod
def get_subnets(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Retrieve a list of subnets.
The contents of the list depends on
the identity of the user making the request (as indicated by the
context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a subnet as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP`
object in :file:`neutron/api/v2/attributes.py`.
Values in this dictiontary are an iterable containing
values that will be used for an exact match comparison
for that value. Each result returned by this
function will have matched one of the values for each
key in filters.
:param fields: a list of strings that are valid keys in a
subnet dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
def get_subnets_count(self, context, filters=None):
"""Return the number of subnets.
The result depends on the identity of
the user making the request (as indicated by the context) as well as
any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Values in this
dictiontary are an iterable containing values that
will be used for an exact match comparison for that
value. Each result returned by this function will
have matched one of the values for each key in filters.
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError
@abstractmethod
def delete_subnet(self, context, id):
"""Delete a subnet.
:param context: neutron api request context
:param id: UUID representing the subnet to delete.
"""
pass
@abstractmethod
def create_network(self, context, network):
"""Create a network.
Create a network, which represents an L2 network segment which
can have a set of subnets and ports associated with it.
:param context: neutron api request context
:param network: dictionary describing the network, with keys
as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP` object
in :file:`neutron/api/v2/attributes.py`. All keys will
be populated.
"""
pass
@abstractmethod
def update_network(self, context, id, network):
"""Update values of a network.
:param context: neutron api request context
:param id: UUID representing the network to update.
:param network: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for
'allow_put' as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`.
"""
pass
@abstractmethod
def get_network(self, context, id, fields=None):
"""Retrieve a network.
:param context: neutron api request context
:param id: UUID representing the network to fetch.
:param fields: a list of strings that are valid keys in a
network dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
@abstractmethod
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Retrieve a list of networks.
The contents of the list depends on
the identity of the user making the request (as indicated by the
context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Values in this
dictiontary are an iterable containing values that will
be used for an exact match comparison for that value.
Each result returned by this function will have matched
one of the values for each key in filters.
:param fields: a list of strings that are valid keys in a
network dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
def get_networks_count(self, context, filters=None):
"""Return the number of networks.
The result depends on the identity
of the user making the request (as indicated by the context) as well
as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object
in :file:`neutron/api/v2/attributes.py`. Values in
this dictiontary are an iterable containing values that
will be used for an exact match comparison for that
value. Each result returned by this function will have
matched one of the values for each key in filters.
NOTE: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError
@abstractmethod
def delete_network(self, context, id):
"""Delete a network.
:param context: neutron api request context
:param id: UUID representing the network to delete.
"""
pass
@abstractmethod
def create_port(self, context, port):
"""Create a port.
Create a port, which is a connection point of a device (e.g., a VM
NIC) to attach to a L2 neutron network.
:param context: neutron api request context
:param port: dictionary describing the port, with keys as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. All keys will be
populated.
"""
pass
@abstractmethod
def update_port(self, context, id, port):
"""Update values of a port.
:param context: neutron api request context
:param id: UUID representing the port to update.
:param port: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for
'allow_put' as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP`
object in :file:`neutron/api/v2/attributes.py`.
"""
pass
@abstractmethod
def get_port(self, context, id, fields=None):
"""Retrieve a port.
:param context: neutron api request context
:param id: UUID representing the port to fetch.
:param fields: a list of strings that are valid keys in a port
dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
@abstractmethod
def get_ports(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Retrieve a list of ports.
The contents of the list depends on the identity of the user making
the request (as indicated by the context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a port as listed in the :obj:`RESOURCE_ATTRIBUTE_MAP`
object in :file:`neutron/api/v2/attributes.py`. Values
in this dictiontary are an iterable containing values
that will be used for an exact match comparison for
that value. Each result returned by this function will
have matched one of the values for each key in filters.
:param fields: a list of strings that are valid keys in a
port dictionary as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Only these fields
will be returned.
"""
pass
def get_ports_count(self, context, filters=None):
"""Return the number of ports.
The result depends on the identity of the user making the request
(as indicated by the context) as well as any filters.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a network as listed in the
:obj:`RESOURCE_ATTRIBUTE_MAP` object in
:file:`neutron/api/v2/attributes.py`. Values in this
dictiontary are an iterable containing values that will
be used for an exact match comparison for that value.
Each result returned by this function will have matched
one of the values for each key in filters.
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError
@abstractmethod
def delete_port(self, context, id):
"""Delete a port.
:param context: neutron api request context
:param id: UUID representing the port to delete.
"""
pass
def start_rpc_listener(self):
"""Start the rpc listener.
Most plugins start an RPC listener implicitly on initialization. In
order to support multiple process RPC, the plugin needs to expose
control over when this is started.
.. note:: this method is optional, as it was not part of the originally
defined plugin API.
"""
raise NotImplementedError
| apache-2.0 | 3,239,462,693,028,753,000 | -5,657,906,916,306,780,000 | 40.369822 | 79 | 0.5885 | false |
agx/git-buildpackage | tests/component/__init__.py | 1 | 9974 | # vim: set fileencoding=utf-8 :
#
# (C) 2012 Intel Corporation <[email protected]>
# 2013,2017 Guido Günther <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, please see
# <http://www.gnu.org/licenses/>
"""
Module for testing individual command line tools of the git-buildpackage suite
"""
import hashlib
import os
import shutil
import tempfile
import unittest
from unittest import skipUnless
from nose import SkipTest
from nose.tools import eq_, ok_ # pylint: disable=E0611
from .. testutils import GbpLogTester
from gbp.git import GitRepository, GitRepositoryError
__all__ = ['ComponentTestGitRepository', 'ComponentTestBase', 'GbpLogTester', 'skipUnless']
class ComponentTestGitRepository(GitRepository):
"""Git repository class for component tests"""
def submodule_status(self):
"""
Determine submodules and their status
"""
out, err, ret = self._git_inout('submodule', ['status'],
capture_stderr=True)
if ret:
raise GitRepositoryError("Cannot get submodule status: %s" %
err.strip())
submodules = {}
for line in out.decode().splitlines():
module = line.strip()
# Uninitialized
status = module[0]
if status == '-':
sha1, path = module[1:].rsplit(' ', 1)
else:
commitpath = module[1:].rsplit(' ', 1)[0]
sha1, path = commitpath.split(' ', 1)
submodules[path] = (status, sha1)
return submodules
@classmethod
def check_testdata(cls, data):
"""Check whether the testdata is current"""
try:
repo = cls('.')
except GitRepositoryError:
raise SkipTest("Skipping '%s', since this is not a git checkout."
% __name__)
submodules = repo.submodule_status()
try:
status = submodules[data]
except KeyError:
raise SkipTest("Skipping '%s', testdata directory not a known "
"submodule." % __name__)
if status[0] == '-':
raise SkipTest("Skipping '%s', testdata directory not initialized. "
"Consider doing 'git submodule update'" % __name__)
def ls_tree(self, treeish):
"""List contents (blobs) in a git treeish"""
objs = self.list_tree(treeish, True)
blobs = [obj[3] for obj in objs if obj[1] == 'blob']
return set(blobs)
def get_head_author_subject(self):
out, err, ret = self._git_inout('format-patch', ['-1', '--stdout', '--subject-prefix='],
capture_stderr=True)
if ret:
raise GitRepositoryError("Cannot get head author/subject: %s" %
err.strip())
output = out.decode('utf-8')
for line in output.split('\n'):
line = line.strip()
if not line:
# end of headers
break
if line.startswith('From:'):
author = line.replace('From:', '').strip()
elif line.startswith('Subject:'):
subject = line.replace('Subject:', '').strip()
return author, subject
class ComponentTestBase(unittest.TestCase, GbpLogTester):
"""Base class for testing cmdline tools of git-buildpackage"""
@classmethod
def setUpClass(cls):
"""Test class case setup"""
# Don't let git see that we're (possibly) under a git directory
cls.orig_env = os.environ.copy()
os.environ['GIT_CEILING_DIRECTORIES'] = os.getcwd()
# Create a top-level tmpdir for the test
cls._tmproot = tempfile.mkdtemp(prefix='gbp_%s_' % cls.__name__,
dir='.')
cls._tmproot = os.path.abspath(cls._tmproot)
# Prevent local config files from messing up the tests
os.environ['GBP_CONF_FILES'] = ':'.join(['%(top_dir)s/.gbp.conf',
'%(top_dir)s/debian/gbp.conf',
'%(git_dir)s/gbp.conf'])
@classmethod
def tearDownClass(cls):
"""Test class case teardown"""
# Return original environment
os.environ.clear()
os.environ.update(cls.orig_env)
# Remove top-level tmpdir
if not os.getenv("GBP_TESTS_NOCLEAN"):
shutil.rmtree(cls._tmproot)
def __init__(self, methodName='runTest'):
"""Object initialization"""
self._orig_dir = None
self._tmpdir = None
unittest.TestCase.__init__(self, methodName)
GbpLogTester.__init__(self)
def setUp(self):
"""Test case setup"""
# Change to a temporary directory
self._orig_dir = os.getcwd()
self._tmpdir = tempfile.mkdtemp(prefix='tmp_%s_' % self._testMethodName,
dir=self._tmproot)
os.chdir(self._tmpdir)
self._capture_log(True)
def tearDown(self):
"""Test case teardown"""
# Restore original working dir
os.chdir(self._orig_dir)
if not os.getenv("GBP_TESTS_NOCLEAN"):
shutil.rmtree(self._tmpdir)
self._capture_log(False)
@staticmethod
def check_files(reference, filelist):
"""Compare two file lists"""
extra = set(filelist) - set(reference)
missing = set(reference) - set(filelist)
assert_msg = "Unexpected files: %s, Missing files: %s" % \
(list(extra), list(missing))
assert not extra and not missing, assert_msg
@classmethod
def check_tags(cls, repo, tags):
local_tags = repo.tags
assert_msg = "Tags: expected %s, found %s" % (tags,
local_tags)
eq_(set(local_tags), set(tags), assert_msg)
@classmethod
def _check_repo_state(cls, repo, current_branch, branches, files=None,
dirs=None, tags=None, clean=True):
"""
Check that repository is clean and given branches, tags, files
and dirs exist
"""
branch = repo.branch
eq_(branch, current_branch)
ok_(repo.is_clean())
local_branches = repo.get_local_branches()
assert_msg = "Branches: expected %s, found %s" % (branches,
local_branches)
eq_(set(local_branches), set(branches), assert_msg)
if files is not None or dirs is not None:
# Get files of the working copy recursively
local_f = set()
local_d = set()
for dirpath, dirnames, filenames in os.walk(repo.path):
# Skip git dir(s)
if '.git' in dirnames:
dirnames.remove('.git')
for filename in filenames:
local_f.add(os.path.relpath(os.path.join(dirpath, filename),
repo.path))
for dirname in dirnames:
local_d.add(os.path.relpath(os.path.join(dirpath, dirname),
repo.path) + '/')
if files is not None:
cls.check_files(files, local_f)
if dirs is not None:
cls.check_files(dirs, local_d)
if tags is not None:
cls.check_tags(repo, tags)
if clean:
clean, files = repo.is_clean()
ok_(clean, "Repo has uncommitted files %s" % files)
@classmethod
def rem_refs(cls, repo, refs):
"""Remember the SHA1 of the given refs"""
rem = []
for name in refs:
rem.append((name, repo.rev_parse(name)))
return rem
@classmethod
def check_refs(cls, repo, rem):
"""
Check that the heads given n (head, sha1) tuples are
still pointing to the given sha1
"""
for (h, s) in rem:
n = repo.rev_parse(h)
ok_(n == s, "Head '%s' points to %s' instead of '%s'" % (h, n, s))
@staticmethod
def hash_file(filename):
h = hashlib.md5()
with open(filename, 'rb') as f:
buf = f.read()
h.update(buf)
return h.hexdigest()
@staticmethod
def check_hook_vars(name, expected):
"""
Check that a hook had the given vars in
it's environment.
This assumes the hook was set too
printenv > hookname.out
"""
with open('%s.out' % name, encoding='utf-8') as f:
parsed = dict([line[:-1].split('=', 1) for line in f.readlines() if line.startswith("GBP_")])
for var in expected:
if len(var) == 2:
k, v = var
else:
k, v = var, None
ok_(k in parsed, "%s not found in %s" % (k, parsed))
if v is not None:
ok_(v == parsed[k],
"Got %s not expected value %s for %s" % (parsed[k], v, k))
@staticmethod
def add_file(repo, name, content=None):
with open(name, 'w') as f:
f.write(' ' or content)
repo.add_files(name)
repo.commit_files(name, 'New file %s' % name)
| gpl-2.0 | -6,114,709,728,545,881,000 | -2,344,148,236,249,030,700 | 35.937037 | 105 | 0.538554 | false |
LaoZhongGu/kbengine | kbe/src/lib/python/Lib/ctypes/macholib/dylib.py | 320 | 1828 | """
Generic dylib path manipulation
"""
import re
__all__ = ['dylib_info']
DYLIB_RE = re.compile(r"""(?x)
(?P<location>^.*)(?:^|/)
(?P<name>
(?P<shortname>\w+?)
(?:\.(?P<version>[^._]+))?
(?:_(?P<suffix>[^._]+))?
\.dylib$
)
""")
def dylib_info(filename):
"""
A dylib name can take one of the following four forms:
Location/Name.SomeVersion_Suffix.dylib
Location/Name.SomeVersion.dylib
Location/Name_Suffix.dylib
Location/Name.dylib
returns None if not found or a mapping equivalent to:
dict(
location='Location',
name='Name.SomeVersion_Suffix.dylib',
shortname='Name',
version='SomeVersion',
suffix='Suffix',
)
Note that SomeVersion and Suffix are optional and may be None
if not present.
"""
is_dylib = DYLIB_RE.match(filename)
if not is_dylib:
return None
return is_dylib.groupdict()
def test_dylib_info():
def d(location=None, name=None, shortname=None, version=None, suffix=None):
return dict(
location=location,
name=name,
shortname=shortname,
version=version,
suffix=suffix
)
assert dylib_info('completely/invalid') is None
assert dylib_info('completely/invalide_debug') is None
assert dylib_info('P/Foo.dylib') == d('P', 'Foo.dylib', 'Foo')
assert dylib_info('P/Foo_debug.dylib') == d('P', 'Foo_debug.dylib', 'Foo', suffix='debug')
assert dylib_info('P/Foo.A.dylib') == d('P', 'Foo.A.dylib', 'Foo', 'A')
assert dylib_info('P/Foo_debug.A.dylib') == d('P', 'Foo_debug.A.dylib', 'Foo_debug', 'A')
assert dylib_info('P/Foo.A_debug.dylib') == d('P', 'Foo.A_debug.dylib', 'Foo', 'A', 'debug')
if __name__ == '__main__':
test_dylib_info()
| lgpl-3.0 | 6,356,891,639,138,222,000 | -6,270,528,475,239,926,000 | 28.015873 | 96 | 0.573304 | false |
mitodl/ansible | examples/scripts/yaml_to_ini.py | 175 | 7609 | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ansible.constants as C
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible import errors
from ansible import utils
import os
import yaml
import sys
class InventoryParserYaml(object):
''' Host inventory parser for ansible '''
def __init__(self, filename=C.DEFAULT_HOST_LIST):
sys.stderr.write("WARNING: YAML inventory files are deprecated in 0.6 and will be removed in 0.7, to migrate" +
" download and run https://github.com/ansible/ansible/blob/devel/examples/scripts/yaml_to_ini.py\n")
fh = open(filename)
data = fh.read()
fh.close()
self._hosts = {}
self._parse(data)
def _make_host(self, hostname):
if hostname in self._hosts:
return self._hosts[hostname]
else:
host = Host(hostname)
self._hosts[hostname] = host
return host
# see file 'test/yaml_hosts' for syntax
def _parse(self, data):
# FIXME: refactor into subfunctions
all = Group('all')
ungrouped = Group('ungrouped')
all.add_child_group(ungrouped)
self.groups = dict(all=all, ungrouped=ungrouped)
grouped_hosts = []
yaml = utils.parse_yaml(data)
# first add all groups
for item in yaml:
if type(item) == dict and 'group' in item:
group = Group(item['group'])
for subresult in item.get('hosts',[]):
if type(subresult) in [ str, unicode ]:
host = self._make_host(subresult)
group.add_host(host)
grouped_hosts.append(host)
elif type(subresult) == dict:
host = self._make_host(subresult['host'])
vars = subresult.get('vars',{})
if type(vars) == list:
for subitem in vars:
for (k,v) in subitem.items():
host.set_variable(k,v)
elif type(vars) == dict:
for (k,v) in subresult.get('vars',{}).items():
host.set_variable(k,v)
else:
raise errors.AnsibleError("unexpected type for variable")
group.add_host(host)
grouped_hosts.append(host)
vars = item.get('vars',{})
if type(vars) == dict:
for (k,v) in item.get('vars',{}).items():
group.set_variable(k,v)
elif type(vars) == list:
for subitem in vars:
if type(subitem) != dict:
raise errors.AnsibleError("expected a dictionary")
for (k,v) in subitem.items():
group.set_variable(k,v)
self.groups[group.name] = group
all.add_child_group(group)
# add host definitions
for item in yaml:
if type(item) in [ str, unicode ]:
host = self._make_host(item)
if host not in grouped_hosts:
ungrouped.add_host(host)
elif type(item) == dict and 'host' in item:
host = self._make_host(item['host'])
vars = item.get('vars', {})
if type(vars)==list:
varlist, vars = vars, {}
for subitem in varlist:
vars.update(subitem)
for (k,v) in vars.items():
host.set_variable(k,v)
groups = item.get('groups', {})
if type(groups) in [ str, unicode ]:
groups = [ groups ]
if type(groups)==list:
for subitem in groups:
if subitem in self.groups:
group = self.groups[subitem]
else:
group = Group(subitem)
self.groups[group.name] = group
all.add_child_group(group)
group.add_host(host)
grouped_hosts.append(host)
if host not in grouped_hosts:
ungrouped.add_host(host)
# make sure ungrouped.hosts is the complement of grouped_hosts
ungrouped_hosts = [host for host in ungrouped.hosts if host not in grouped_hosts]
if __name__ == "__main__":
if len(sys.argv) != 2:
print "usage: yaml_to_ini.py /path/to/ansible/hosts"
sys.exit(1)
result = ""
original = sys.argv[1]
yamlp = InventoryParserYaml(filename=sys.argv[1])
dirname = os.path.dirname(original)
group_names = [ g.name for g in yamlp.groups.values() ]
for group_name in sorted(group_names):
record = yamlp.groups[group_name]
if group_name == 'all':
continue
hosts = record.hosts
result = result + "[%s]\n" % record.name
for h in hosts:
result = result + "%s\n" % h.name
result = result + "\n"
groupfiledir = os.path.join(dirname, "group_vars")
if not os.path.exists(groupfiledir):
print "* creating: %s" % groupfiledir
os.makedirs(groupfiledir)
groupfile = os.path.join(groupfiledir, group_name)
print "* writing group variables for %s into %s" % (group_name, groupfile)
groupfh = open(groupfile, 'w')
groupfh.write(yaml.dump(record.get_variables()))
groupfh.close()
for (host_name, host_record) in yamlp._hosts.iteritems():
hostfiledir = os.path.join(dirname, "host_vars")
if not os.path.exists(hostfiledir):
print "* creating: %s" % hostfiledir
os.makedirs(hostfiledir)
hostfile = os.path.join(hostfiledir, host_record.name)
print "* writing host variables for %s into %s" % (host_record.name, hostfile)
hostfh = open(hostfile, 'w')
hostfh.write(yaml.dump(host_record.get_variables()))
hostfh.close()
# also need to keep a hash of variables per each host
# and variables per each group
# and write those to disk
newfilepath = os.path.join(dirname, "hosts.new")
fdh = open(newfilepath, 'w')
fdh.write(result)
fdh.close()
print "* COMPLETE: review your new inventory file and replace your original when ready"
print "* new inventory file saved as %s" % newfilepath
print "* edit group specific variables in %s/group_vars/" % dirname
print "* edit host specific variables in %s/host_vars/" % dirname
# now need to write this to disk as (oldname).new
# and inform the user
| gpl-3.0 | -2,810,486,857,112,769,000 | -4,981,837,217,029,216,000 | 35.936893 | 119 | 0.540938 | false |
johnkingsley/aprinter | config_system/utils/nix_utils.py | 1 | 2105 | # Copyright (c) 2015 Ambroz Bizjak
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def escape_string_for_nix(data):
return '"{}"'.format(''.join(('\\{}'.format(c) if c in ('"', '\\', '$') else c) for c in data))
def convert_bool_for_nix(value):
return 'true' if value else 'false'
def convert_for_nix(value):
if type(value) is str:
return escape_string_for_nix(value)
if type(value) is bool:
return convert_bool_for_nix(value)
if type(value) is list:
if len(value) == 0:
return '[]'
else:
return '[ {} ]'.format(' '.join(convert_for_nix(e) for e in value))
if type(value) is dict:
if len(value) == 0:
return '{}'
else:
return '{{ {} }}'.format(' '.join('{} = {};'.format(convert_for_nix(k), convert_for_nix(v)) for k, v in sorted(value.iteritems())))
| bsd-2-clause | -7,212,576,906,596,338,000 | 4,110,604,414,461,869,600 | 47.953488 | 143 | 0.691686 | false |
PepperPD/edx-pepper-platform | cms/djangoapps/contentstore/features/component.py | 7 | 4905 | #pylint: disable=C0111
#pylint: disable=W0621
from lettuce import world, step
from nose.tools import assert_true # pylint: disable=E0611
DATA_LOCATION = 'i4x://edx/templates'
@step(u'I am editing a new unit')
def add_unit(step):
css_selectors = ['a.new-courseware-section-button', 'input.new-section-name-save', 'a.new-subsection-item',
'input.new-subsection-name-save', 'div.section-item a.expand-collapse-icon', 'a.new-unit-item']
for selector in css_selectors:
world.css_click(selector)
@step(u'I add the following components:')
def add_components(step):
for component in [step_hash['Component'] for step_hash in step.hashes]:
assert component in COMPONENT_DICTIONARY
for css in COMPONENT_DICTIONARY[component]['steps']:
world.css_click(css)
@step(u'I see the following components')
def check_components(step):
for component in [step_hash['Component'] for step_hash in step.hashes]:
assert component in COMPONENT_DICTIONARY
assert_true(COMPONENT_DICTIONARY[component]['found_func'](), "{} couldn't be found".format(component))
@step(u'I delete all components')
def delete_all_components(step):
for _ in range(len(COMPONENT_DICTIONARY)):
world.css_click('a.delete-button')
@step(u'I see no components')
def see_no_components(steps):
assert world.is_css_not_present('li.component')
@step(u'I delete a component')
def delete_one_component(step):
world.css_click('a.delete-button')
@step(u'I edit and save a component')
def edit_and_save_component(step):
world.css_click('.edit-button')
world.css_click('.save-button')
def step_selector_list(data_type, path, index=1):
selector_list = ['a[data-type="{}"]'.format(data_type)]
if index != 1:
selector_list.append('a[id="ui-id-{}"]'.format(index))
if path is not None:
selector_list.append('a[data-location="{}/{}/{}"]'.format(DATA_LOCATION, data_type, path))
return selector_list
def found_text_func(text):
return lambda: world.browser.is_text_present(text)
def found_css_func(css):
return lambda: world.is_css_present(css, wait_time=2)
COMPONENT_DICTIONARY = {
'Discussion': {
'steps': step_selector_list('discussion', None),
'found_func': found_css_func('section.xmodule_DiscussionModule')
},
'Blank HTML': {
'steps': step_selector_list('html', 'Blank_HTML_Page'),
#this one is a blank html so a more refined search is being done
'found_func': lambda: '\n \n' in [x.html for x in world.css_find('section.xmodule_HtmlModule')]
},
'LaTex': {
'steps': step_selector_list('html', 'E-text_Written_in_LaTeX'),
'found_func': found_text_func('EXAMPLE: E-TEXT PAGE')
},
'Blank Problem': {
'steps': step_selector_list('problem', 'Blank_Common_Problem'),
'found_func': found_text_func('BLANK COMMON PROBLEM')
},
'Dropdown': {
'steps': step_selector_list('problem', 'Dropdown'),
'found_func': found_text_func('DROPDOWN')
},
'Multi Choice': {
'steps': step_selector_list('problem', 'Multiple_Choice'),
'found_func': found_text_func('MULTIPLE CHOICE')
},
'Numerical': {
'steps': step_selector_list('problem', 'Numerical_Input'),
'found_func': found_text_func('NUMERICAL INPUT')
},
'Text Input': {
'steps': step_selector_list('problem', 'Text_Input'),
'found_func': found_text_func('TEXT INPUT')
},
'Advanced': {
'steps': step_selector_list('problem', 'Blank_Advanced_Problem', index=2),
'found_func': found_text_func('BLANK ADVANCED PROBLEM')
},
'Circuit': {
'steps': step_selector_list('problem', 'Circuit_Schematic_Builder', index=2),
'found_func': found_text_func('CIRCUIT SCHEMATIC BUILDER')
},
'Custom Python': {
'steps': step_selector_list('problem', 'Custom_Python-Evaluated_Input', index=2),
'found_func': found_text_func('CUSTOM PYTHON-EVALUATED INPUT')
},
'Image Mapped': {
'steps': step_selector_list('problem', 'Image_Mapped_Input', index=2),
'found_func': found_text_func('IMAGE MAPPED INPUT')
},
'Math Input': {
'steps': step_selector_list('problem', 'Math_Expression_Input', index=2),
'found_func': found_text_func('MATH EXPRESSION INPUT')
},
'Problem LaTex': {
'steps': step_selector_list('problem', 'Problem_Written_in_LaTeX', index=2),
'found_func': found_text_func('PROBLEM WRITTEN IN LATEX')
},
'Adaptive Hint': {
'steps': step_selector_list('problem', 'Problem_with_Adaptive_Hint', index=2),
'found_func': found_text_func('PROBLEM WITH ADAPTIVE HINT')
},
'Video': {
'steps': step_selector_list('video', None),
'found_func': found_css_func('section.xmodule_VideoModule')
}
}
| agpl-3.0 | -1,986,470,218,396,612,900 | -3,256,979,404,607,050,000 | 34.80292 | 115 | 0.634047 | false |
bkrukowski/phantomjs | src/breakpad/src/tools/gyp/pylib/gyp/generator/make.py | 137 | 42339 | #!/usr/bin/python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This is all roughly based on the Makefile system used by the Linux
# kernel, but is a non-recursive make -- we put the entire dependency
# graph in front of make and let it figure it out.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level Makefile. This means that all
# variables in .mk-files clobber one another. Be careful to use :=
# where appropriate for immediate evaluation, and similarly to watch
# that you're not relying on a variable value to last beween different
# .mk files.
#
# TODOs:
#
# Global settings and utility functions are currently stuffed in the
# toplevel Makefile. It may make sense to generate some .mk files on
# the side to keep the the files readable.
import gyp
import gyp.common
import os.path
# Debugging-related imports -- remove me once we're solid.
import code
import pprint
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'OS': 'linux',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.so',
'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/geni',
'SHARED_INTERMEDIATE_DIR': '$(obj)/gen',
'PRODUCT_DIR': '$(builddir)',
'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)',
'LIB_DIR': '$(obj).$(TOOLSET)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(abspath $<)',
# These appear unused -- ???
'RULE_INPUT_EXT': 'XXXEXT$(suffix $^)',
'RULE_INPUT_NAME': 'XXXNAME$(notdir $(basename $^)0',
'CONFIGURATION_NAME': '$(BUILDTYPE)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
def ensure_directory_exists(path):
dir = os.path.dirname(path)
if dir and not os.path.exists(dir):
os.makedirs(dir)
# Header of toplevel Makefile.
# This should go into the build tree, but it's easier to keep it here for now.
SHARED_HEADER = ("""\
# We borrow heavily from the kernel build setup, though we are simpler since
# we don't have Kconfig tweaking settings on us.
# The implicit make rules have it looking for RCS files, among other things.
# We instead explicitly write all the rules we care about.
# It's even quicker (saves ~200ms) to pass -r on the command line.
MAKEFLAGS=-r
# The V=1 flag on command line makes us verbosely print command lines.
ifdef V
quiet=
else
quiet=quiet_
endif
# Specify BUILDTYPE=Release on the command line for a release build.
BUILDTYPE ?= __default_configuration__
# Directory all our build output goes into.
# Note that this must be two directories beneath src/ for unit tests to pass,
# as they reach into the src/ directory for data with relative paths.
builddir ?= $(builddir_name)/$(BUILDTYPE)
abs_builddir := $(abspath $(builddir))
# Object output directory.
obj := $(builddir)/obj
abs_obj := $(abspath $(obj))
# We build up a list of each target that we want to be generated by default.
all_targets :=
# We build up a list of every single one of the targets so we can slurp in the
# generated dependency rule Makefiles in one pass.
all_deps :=
# C++ apps need to be linked with g++. Not sure what's appropriate.
LINK ?= $(CXX)
CC.target ?= $(CC)
CXX.target ?= $(CXX)
LINK.target ?= $(LINK)
AR.target ?= $(AR)
RANLIB.target ?= ranlib
CC.host ?= gcc
CXX.host ?= g++
LINK.host ?= g++
AR.host ?= ar
RANLIB.host ?= ranlib
# Flags to make gcc output dependency info. Note that you need to be
# careful here to use the flags that ccache and distcc can understand.
# We write to a temporary dep file first and then rename at the end
# so we can't end up with a broken dep file.
depfile = [email protected]
DEPFLAGS = -MMD -MF $(depfile).tmp
# We have to fixup the deps output in a few ways.
# First, the file output should to mention the proper .o file.
# ccache or distcc lose the path to the target, so we convert a rule of
# the form:
# foobar.o: DEP1 DEP2
# into
# path/to/foobar.o: DEP1 DEP2
# Additionally, we want to make missing files not cause us to needlessly
# rebuild. We want to rewrite
# foobar.o: DEP1 DEP2 \\
# DEP3
# to
# DEP1 DEP2:
# DEP3:
# so if the files are missing, they're just considered phony rules.
# We have to do some pretty insane escaping to get those backslashes
# and dollar signs past make, the shell, and sed at the same time."""
r"""
define fixup_dep
sed -i -e "s|^$(notdir $@)|$@|" $(depfile).tmp
sed -e "s|^[^:]*: *||" -e "s| *\\\\$$||" -e 's|^ *||' \
-e "/./s|$$|:|" $(depfile).tmp >> $(depfile).tmp
cat $(depfile).tmp >> $(depfile)
rm -f $(depfile).tmp
endef
"""
"""
# Command definitions:
# - cmd_foo is the actual command to run;
# - quiet_cmd_foo is the brief-output summary of the command.
quiet_cmd_cc = CC($(TOOLSET)) $@
cmd_cc = $(CC.$(TOOLSET)) $(CFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_cxx = CXX($(TOOLSET)) $@
cmd_cxx = $(CXX.$(TOOLSET)) $(CXXFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_alink = AR+RANLIB($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) rc $@ $(filter %.o,$^) && $(RANLIB.$(TOOLSET)) $@
quiet_cmd_touch = TOUCH $@
cmd_touch = touch $@
quiet_cmd_copy = COPY $@
cmd_copy = ln -f $< $@ || cp -af $< $@
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(LDFLAGS) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
# Shared-object link (for generating .so).
# Set SONAME to the library filename so our binaries don't reference the local,
# absolute paths used on the link command-line.
# TODO: perhaps this can share with the LINK command above?
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(LDFLAGS) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
"""
r"""
# Define an escape_quotes function to escape single quotes.
# This allows us to handle quotes properly as long as we always use
# use single quotes and escape_quotes.
escape_quotes = $(subst ','\'',$(1))
# This comment is here just to include a ' to unconfuse syntax highlighting.
# Define an escape_vars function to escape '$' variable syntax.
# This allows us to read/write command lines wth shell variables (e.g.
# $LD_LIBRARY_PATH), without triggering make substitution.
escape_vars = $(subst $$,$$$$,$(1))
"""
"""
# Helper to compare the command we're about to run against the command
# we logged the last time we ran the command. Produces an empty
# string (false) when the commands match.
# Tricky point: Make has no string-equality test function.
# The kernel uses the following, but it seems like it would have false
# positives, where one string reordered its arguments.
# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\
# $(filter-out $(cmd_$@), $(cmd_$(1))))
# We instead substitute each for the empty string into the other, and
# say they're equal if both substitutions produce the empty string.
command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$@)),\\
$(subst $(cmd_$@),,$(cmd_$(1))))
# Helper that is non-empty when a prerequisite changes.
# Normally make does this implicitly, but we force rules to always run
# so we can check their command lines.
# $? -- new prerequisites
# $| -- order-only dependencies
prereq_changed = $(filter-out $|,$?)
# do_cmd: run a command via the above cmd_foo names, if necessary.
# Should always run for a given target to handle command-line changes.
# Second argument, if non-zero, makes it do C/C++ dependency munging.
define do_cmd
$(if $(or $(command_changed),$(prereq_changed)),
@echo ' $($(quiet)cmd_$(1))'
@mkdir -p $(dir $@)
@$(cmd_$(1))
@echo '$(call escape_vars,$(call escape_quotes,cmd_$@ := $(cmd_$(1))))' > $(depfile)
@$(if $(2),$(fixup_dep))
)
endef
# Declare "all" target first so it is the default, even though we don't have the
# deps yet.
.PHONY: all
all:
# make looks for ways to re-generate included makefiles, but in our case, we
# don't have a direct way. Explicitly telling make that it has nothing to do
# for them makes it go faster.
%.d: ;
# Use FORCE_DO_CMD to force a target to run. Should be coupled with
# do_cmd.
.PHONY: FORCE_DO_CMD
FORCE_DO_CMD:
""")
ROOT_HEADER_SUFFIX_RULES = ("""\
# Suffix rules, putting all outputs into $(obj).
$(obj).$(TOOLSET)/%.o: $(srcdir)/%.c FORCE_DO_CMD
@$(call do_cmd,cc,1)
$(obj).$(TOOLSET)/%.o: $(srcdir)/%.s FORCE_DO_CMD
@$(call do_cmd,cc)
$(obj).$(TOOLSET)/%.o: $(srcdir)/%.S FORCE_DO_CMD
@$(call do_cmd,cc)
$(obj).$(TOOLSET)/%.o: $(srcdir)/%.cpp FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/%.o: $(srcdir)/%.cc FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/%.o: $(srcdir)/%.cxx FORCE_DO_CMD
@$(call do_cmd,cxx,1)
# Try building from generated source, too.
$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.c FORCE_DO_CMD
@$(call do_cmd,cc,1)
$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.cc FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.cpp FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/%.o: $(obj)/%.c FORCE_DO_CMD
@$(call do_cmd,cc,1)
$(obj).$(TOOLSET)/%.o: $(obj)/%.cc FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/%.o: $(obj)/%.cpp FORCE_DO_CMD
@$(call do_cmd,cxx,1)
""")
SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\
# Suffix rules, putting all outputs into $(obj).
""")
SHARED_HEADER_SUFFIX_RULES_SRCDIR = {
'.c': ("""\
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(srcdir)/%.c FORCE_DO_CMD
@$(call do_cmd,cc,1)
"""),
'.s': ("""\
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(srcdir)/%.s FORCE_DO_CMD
@$(call do_cmd,cc)
"""),
'.S': ("""\
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(srcdir)/%.S FORCE_DO_CMD
@$(call do_cmd,cc)
"""),
'.cpp': ("""\
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(srcdir)/%.cpp FORCE_DO_CMD
@$(call do_cmd,cxx,1)
"""),
'.cc': ("""\
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(srcdir)/%.cc FORCE_DO_CMD
@$(call do_cmd,cxx,1)
"""),
'.cxx': ("""\
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(srcdir)/%.cxx FORCE_DO_CMD
@$(call do_cmd,cxx,1)
"""),
}
SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\
# Try building from generated source, too.
""")
SHARED_HEADER_SUFFIX_RULES_OBJDIR1 = {
'.c': ("""\
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj).$(TOOLSET)/%.c FORCE_DO_CMD
@$(call do_cmd,cc,1)
"""),
'.cc': ("""\
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj).$(TOOLSET)/%.cc FORCE_DO_CMD
@$(call do_cmd,cxx,1)
"""),
'.cpp': ("""\
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj).$(TOOLSET)/%.cpp FORCE_DO_CMD
@$(call do_cmd,cxx,1)
"""),
}
SHARED_HEADER_SUFFIX_RULES_OBJDIR2 = {
'.c': ("""\
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj)/%.c FORCE_DO_CMD
@$(call do_cmd,cc,1)
"""),
'.cc': ("""\
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj)/%.cc FORCE_DO_CMD
@$(call do_cmd,cxx,1)
"""),
'.cpp': ("""\
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj)/%.cpp FORCE_DO_CMD
@$(call do_cmd,cxx,1)
"""),
}
SHARED_HEADER_SUFFIX_RULES = (
SHARED_HEADER_SUFFIX_RULES_COMMENT1 +
''.join(SHARED_HEADER_SUFFIX_RULES_SRCDIR.values()) +
SHARED_HEADER_SUFFIX_RULES_COMMENT2 +
''.join(SHARED_HEADER_SUFFIX_RULES_OBJDIR1.values()) +
''.join(SHARED_HEADER_SUFFIX_RULES_OBJDIR2.values())
)
# This gets added to the very beginning of the Makefile.
SHARED_HEADER_SRCDIR = ("""\
# The source directory tree.
srcdir := %s
""")
SHARED_HEADER_BUILDDIR_NAME = ("""\
# The name of the builddir.
builddir_name ?= %s
""")
SHARED_FOOTER = """\
# Now that we've included the sub-makefiles, we can define the rule depending on
# all_targets.
all: $(all_targets)
# Add in dependency-tracking rules. $(all_deps) is the list of every single
# target in our tree. First, only consider targets that already have been
# built, as unbuilt targets will be built regardless of dependency info:
all_deps := $(wildcard $(sort $(all_deps)))
# Of those, only consider the ones with .d (dependency) info:
d_files := $(wildcard $(foreach f,$(all_deps),$(f).d))
ifneq ($(d_files),)
include $(d_files)
endif
"""
header = """\
# This file is generated by gyp; do not edit.
"""
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
for res in (filename.endswith(e) for e
in ['.c', '.cc', '.cpp', '.cxx', '.s', '.S']):
if res:
return True
return False
def Target(filename):
"""Translate a compilable filename to its .o target."""
return os.path.splitext(filename)[0] + '.o'
def QuoteIfNecessary(string):
if '"' in string:
string = '"' + string.replace('"', '\\"') + '"'
return string
srcdir_prefix = ''
def Sourceify(path):
"""Convert a path to its source directory form."""
if '$(' in path:
return path
if os.path.isabs(path):
return path
return srcdir_prefix + path
# Map from qualified target to path to output.
target_outputs = {}
# Map from qualified target to a list of all linker dependencies,
# transitively expanded.
# Used in building shared-library-based executables.
target_link_deps = {}
class MakefileWriter:
"""MakefileWriter packages up the writing of one target-specific foobar.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
print 'Generating %s' % output_filename
ensure_directory_exists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
extra_link_deps = []
self.output = self.ComputeOutput(spec)
self._INSTALLABLE_TARGETS = ('executable', 'loadable_module',
'shared_library')
if self.type in self._INSTALLABLE_TARGETS:
self.alias = os.path.basename(self.output)
else:
self.alias = self.output
self.WriteLn("TOOLSET := " + self.toolset)
self.WriteLn("TARGET := " + self.target)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs,
part_of_all)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs, part_of_all)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs, part_of_all)
all_sources = spec.get('sources', []) + extra_sources
if all_sources:
self.WriteSources(configs, deps, all_sources,
extra_outputs, extra_link_deps, part_of_all)
sources = filter(Compilable, all_sources)
if sources:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1)
extensions = set([os.path.splitext(s)[1] for s in sources])
for ext in extensions:
if ext in SHARED_HEADER_SUFFIX_RULES_SRCDIR:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_SRCDIR[ext])
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2)
for ext in extensions:
if ext in SHARED_HEADER_SUFFIX_RULES_OBJDIR1:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_OBJDIR1[ext])
for ext in extensions:
if ext in SHARED_HEADER_SUFFIX_RULES_OBJDIR2:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_OBJDIR2[ext])
self.WriteLn('# End of this set of suffix rules')
self.WriteTarget(spec, configs, deps,
extra_link_deps + link_deps, extra_outputs, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = self.alias
# Update global list of link dependencies.
if self.type == 'static_library':
target_link_deps[qualified_target] = [self.output]
elif self.type == 'shared_library':
# Anyone that uses us transitively depend on all of our link
# dependencies.
target_link_deps[qualified_target] = [self.output] + link_deps
self.fp.close()
def WriteActions(self, actions, extra_sources, extra_outputs, part_of_all):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
part_of_all: flag indicating this target is part of 'all'
"""
for action in actions:
name = self.target + '_' + action['action_name']
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
# Write the actual command.
command = gyp.common.EncodePOSIXShellList(action['action'])
if 'message' in action:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message']))
else:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name))
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
# Set LD_LIBRARY_PATH in case the action runs an executable from this
# build which links to shared libs from this build.
if self.path:
cd_action = 'cd %s; ' % Sourceify(self.path)
else:
cd_action = ''
# actions run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn('cmd_%s = export LD_LIBRARY_PATH=$(builddir)/lib.host:'
'$(builddir)/lib.target:$$LD_LIBRARY_PATH; %s%s'
% (name, cd_action, command))
self.WriteLn()
outputs = map(self.Absolutify, outputs)
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the obj
# variable for the action rule with an absolute version so that the output
# goes in the right place.
self.WriteMakeRule(outputs, ['obj := $(abs_obj)'])
self.WriteMakeRule(outputs, ['builddir := $(abs_builddir)'])
self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)),
part_of_all=part_of_all, command=name)
# Stuff the outputs in a variable so we can refer to them later.
outputs_variable = 'action_%s_outputs' % name
self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs)))
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs, part_of_all):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
part_of_all: flag indicating this target is part of 'all'
"""
for rule in rules:
name = self.target + '_' + rule['rule_name']
count = 0
self.WriteLn('### Generated for rule %s:' % name)
all_outputs = []
for rule_source in rule['rule_sources']:
dirs = set()
rule_source_basename = os.path.basename(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root)
for out in rule['outputs']]
for out in outputs:
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.append(out)
all_outputs += outputs
inputs = map(Sourceify, map(self.Absolutify, [rule_source] +
rule.get('inputs', [])))
actions = ['$(call do_cmd,%s_%d)' % (name, count)]
if name == 'resources_grit':
# HACK: This is ugly. Grit intentionally doesn't touch the
# timestamp of its output file when the file doesn't change,
# which is fine in hash-based dependency systems like scons
# and forge, but not kosher in the make world. After some
# discussion, hacking around it here seems like the least
# amount of pain.
actions += ['@touch --no-create $@']
self.WriteMakeRule(outputs, ['obj := $(abs_obj)'])
self.WriteMakeRule(outputs, ['builddir := $(abs_builddir)'])
self.WriteMakeRule(outputs, inputs + ['FORCE_DO_CMD'], actions)
if part_of_all:
self.WriteLn('all_targets += %s' % ' '.join(outputs))
self.WriteLn('all_deps += %s' % ' '.join(outputs))
action = [self.ExpandInputRoot(ac, rule_source_root)
for ac in rule['action']]
mkdirs = ''
if len(dirs) > 0:
mkdirs = 'mkdir -p %s; ' % ' '.join(dirs)
if self.path:
cd_action = 'cd %s; ' % Sourceify(self.path)
else:
cd_action = ''
self.WriteLn(
"cmd_%(name)s_%(count)d = %(cd_action)s%(mkdirs)s%(action)s" % {
'action': gyp.common.EncodePOSIXShellList(action),
'cd_action': cd_action,
'count': count,
'mkdirs': mkdirs,
'name': name,
})
self.WriteLn(
'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % {
'count': count,
'name': name,
})
self.WriteLn()
count += 1
outputs_variable = 'rule_%s_outputs' % name
self.WriteList(all_outputs, outputs_variable)
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn('### Finished generating for rule: %s' % name)
self.WriteLn()
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs, part_of_all):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Generated for copy rule.')
variable = self.target + '_copies'
outputs = []
for copy in copies:
for path in copy['files']:
path = Sourceify(self.Absolutify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.Absolutify(os.path.join(copy['destination'],
filename)))
self.WriteDoCmd([output], [path], 'copy', part_of_all)
outputs.append(output)
self.WriteLn('%s = %s' % (variable, ' '.join(outputs)))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteSources(self, configs, deps, sources,
extra_outputs, extra_link_deps,
part_of_all):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
configs, deps, sources: input from gyp.
extra_outputs: a list of extra outputs this action should be dependent on;
used to serialize action/rules before compilation
extra_link_deps: a list that will be filled in with any outputs of
compilation (to be used in link lines)
part_of_all: flag indicating this target is part of 'all'
"""
# Write configuration-specific variables for CFLAGS, etc.
for configname in sorted(configs.keys()):
config = configs[configname]
self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D')
self.WriteLn("# Flags passed to both C and C++ files.");
self.WriteList(config.get('cflags'), 'CFLAGS_%s' % configname)
self.WriteLn("# Flags passed to only C (and not C++) files.");
self.WriteList(config.get('cflags_c'), 'CFLAGS_C_%s' % configname)
self.WriteLn("# Flags passed to only C++ (and not C) files.");
self.WriteList(config.get('cflags_cc'), 'CFLAGS_CC_%s' % configname)
includes = config.get('include_dirs')
if includes:
includes = map(Sourceify, map(self.Absolutify, includes))
self.WriteList(includes, 'INCS_%s' % configname, prefix='-I')
sources = filter(Compilable, sources)
objs = map(self.Objectify, map(self.Absolutify, map(Target, sources)))
self.WriteList(objs, 'OBJS')
if part_of_all:
self.WriteLn('# Add to the list of dependencies for the default target')
self.WriteLn('all_targets += $(OBJS)')
self.WriteLn()
self.WriteLn('# Add to the list of files we specially track '
'dependencies for.')
self.WriteLn('all_deps += $(OBJS)')
self.WriteLn()
# Make sure our dependencies are built first.
if deps:
self.WriteMakeRule(['$(OBJS)'], deps,
comment = 'Make sure our dependencies are built '
'before any of us.',
order_only = True)
# Make sure the actions and rules run first.
# If they generate any extra headers etc., the per-.o file dep tracking
# will catch the proper rebuilds, so order only is still ok here.
if extra_outputs:
self.WriteMakeRule(['$(OBJS)'], extra_outputs,
comment = 'Make sure our actions/rules run '
'before any of us.',
order_only = True)
if objs:
extra_link_deps.append('$(OBJS)')
self.WriteLn("""\
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.""")
self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)")
self.WriteLn("$(OBJS): CFLAGS := $(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE)) "
"$(DEFS_$(BUILDTYPE)) $(INCS_$(BUILDTYPE))")
self.WriteLn("$(OBJS): CXXFLAGS := $(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE)) "
"$(DEFS_$(BUILDTYPE)) $(INCS_$(BUILDTYPE))")
self.WriteLn()
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
output = None
target = spec['target_name']
path = spec.get('product_dir', os.path.join('$(obj).' + self.toolset,
self.path))
if self.type == 'static_library':
target = 'lib%s.a' % (target[:3] == 'lib' and [target[3:]] or [target])[0]
elif self.type in ('loadable_module', 'shared_library'):
target = 'lib%s.so' % (target[:3] == 'lib' and [target[3:]] or [target])[0]
path = spec.get('product_dir', os.path.join('$(builddir)', 'lib.' +
self.toolset, self.path))
elif self.type == 'none':
target = '%s.stamp' % target
elif self.type == 'settings':
return None
elif self.type == 'executable':
target = spec.get('product_name', target)
path = spec.get('product_dir', os.path.join('$(builddir)'))
else:
print ("ERROR: What output file should be generated?",
"typ", self.type, "target", target)
return os.path.join(path, target)
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.extend(target_link_deps[dep])
deps.extend(link_deps)
# TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?
# This hack makes it work:
# link_deps.extend(spec.get('libraries', []))
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteTarget(self, spec, configs, deps, link_deps, extra_outputs,
part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
extra_outputs: any extra outputs that our target should depend on
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if extra_outputs:
self.WriteMakeRule([self.output], extra_outputs,
comment = 'Build our special outputs first.',
order_only = True)
if self.type not in ('settings', 'none'):
for configname in sorted(configs.keys()):
config = configs[configname]
self.WriteList(config.get('ldflags'), 'LDFLAGS_%s' % configname)
self.WriteList(spec.get('libraries'), 'LIBS')
self.WriteLn('%s: LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' % self.output)
self.WriteLn('%s: LIBS := $(LIBS)' % self.output)
if self.type == 'executable':
self.WriteDoCmd([self.output], link_deps, 'link', part_of_all)
elif self.type == 'static_library':
self.WriteDoCmd([self.output], link_deps, 'alink', part_of_all)
elif self.type in ('loadable_module', 'shared_library'):
self.WriteDoCmd([self.output], link_deps, 'solink', part_of_all)
elif self.type == 'none':
# Write a stamp line.
self.WriteDoCmd([self.output], deps, 'touch', part_of_all)
elif self.type == 'settings':
# Only used for passing flags around.
pass
else:
print "WARNING: no output for", self.type, target
# Add an alias for each target (if there are any outputs).
if self.output and self.output != self.target:
self.WriteMakeRule([self.target], [self.output],
comment='Add target alias')
# Add special-case rules for our installable targets.
# 1) They need to install to the build dir or "product" dir.
# 2) They get shortcuts for building (e.g. "make chrome").
# 3) They are part of "make all".
if self.type in self._INSTALLABLE_TARGETS:
if self.type in ('shared_library'):
file_desc = 'shared library'
# Install all shared libs into a common directory (per toolset) for
# convenient access with LD_LIBRARY_PATH.
binpath = '$(builddir)/lib.%s/%s' % (self.toolset, self.alias)
else:
file_desc = 'executable'
binpath = '$(builddir)/' + self.alias
installable_deps = [self.output]
if binpath != self.output:
self.WriteDoCmd([binpath], [self.output], 'copy',
comment = 'Copy this to the %s output path.' %
file_desc, part_of_all=part_of_all)
installable_deps.append(binpath)
if self.output != self.alias:
self.WriteMakeRule([self.alias], installable_deps,
comment = 'Short alias for building this %s.' %
file_desc, phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [binpath],
comment = 'Add %s to "all" target.' % file_desc,
phony = True)
def WriteList(self, list, variable=None, prefix=''):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
self.fp.write(variable + " := ")
if list:
list = [QuoteIfNecessary(prefix + l) for l in list]
self.fp.write(" \\\n\t".join(list))
self.fp.write("\n\n")
def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None):
"""Write a Makefile rule that uses do_cmd.
This makes the outputs dependent on the command line that was run,
as well as support the V= make command line flag.
"""
self.WriteMakeRule(outputs, inputs,
actions = ['$(call do_cmd,%s)' % command],
comment = comment,
force = True)
if part_of_all:
# Add our outputs to the list of dependencies of the default target
self.WriteLn('all_targets += %s' % ' '.join(outputs))
# Add our outputs to the list of targets we read depfiles from.
self.WriteLn('all_deps += %s' % ' '.join(outputs))
def WriteMakeRule(self, outputs, inputs, actions=None, comment=None,
order_only=False, force=False, phony=False):
"""Write a Makefile rule, with some extra tricks.
outputs: a list of outputs for the rule (note: this is not directly
supported by make; see comments below)
inputs: a list of inputs for the rule
actions: a list of shell commands to run for the rule
comment: a comment to put in the Makefile above the rule (also useful
for making this Python script's code self-documenting)
order_only: if true, makes the dependency order-only
force: if true, include FORCE_DO_CMD as an order-only dep
phony: if true, the rule does not actually generate the named output, the
output is just a name to run the rule
"""
if comment:
self.WriteLn('# ' + comment)
if phony:
self.WriteLn('.PHONY: ' + ' '.join(outputs))
# TODO(evanm): just make order_only a list of deps instead of these hacks.
if order_only:
order_insert = '| '
else:
order_insert = ''
if force:
force_append = ' FORCE_DO_CMD'
else:
force_append = ''
if actions:
self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0])
self.WriteLn('%s: %s%s%s' % (outputs[0], order_insert, ' '.join(inputs),
force_append))
if actions:
for action in actions:
self.WriteLn('\t%s' % action)
if len(outputs) > 1:
# If we have more than one output, a rule like
# foo bar: baz
# that for *each* output we must run the action, potentially
# in parallel. That is not what we're trying to write -- what
# we want is that we run the action once and it generates all
# the files.
# http://www.gnu.org/software/hello/manual/automake/Multiple-Outputs.html
# discusses this problem and has this solution:
# 1) Write the naive rule that would produce parallel runs of
# the action.
# 2) Make the outputs seralized on each other, so we won't start
# a a parallel run until the first run finishes, at which point
# we'll have generated all the outputs and we're done.
self.WriteLn('%s: %s' % (' '.join(outputs[1:]), outputs[0]))
self.WriteLn()
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def Objectify(self, path):
"""Convert a path to its output directory form."""
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset)
return path
return '$(obj).%s/$(TARGET)/%s' % (self.toolset, path)
def Absolutify(self, path):
"""Convert a subdirectory-relative path into a base-relative path.
Skips over paths that contain variables."""
if '$(' in path:
return path
return os.path.normpath(os.path.join(self.path, path))
def FixupArgPath(self, arg):
if '/' in arg or '.h.' in arg:
return self.Absolutify(arg)
return arg
def ExpandInputRoot(self, template, expansion):
if '%(INPUT_ROOT)s' not in template:
return template
path = template % { 'INPUT_ROOT': expansion }
if not os.path.dirname(path):
# If it's just the file name, turn it into a path so FixupArgPath()
# will know to Absolutify() it.
path = os.path.join('.', path)
return path
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'Makefile' + options.suffix
makefile_path = os.path.join(options.depth, makefile_name)
if options.generator_output:
global srcdir_prefix
makefile_path = os.path.join(options.generator_output, makefile_path)
srcdir = gyp.common.RelativePath(srcdir, options.generator_output)
srcdir_prefix = '$(srcdir)/'
ensure_directory_exists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(SHARED_HEADER_SRCDIR % srcdir)
root_makefile.write(SHARED_HEADER_BUILDDIR_NAME % builddir_name)
root_makefile.write(SHARED_HEADER.replace('__default_configuration__',
default_configuration))
for toolset in toolsets:
root_makefile.write('TOOLSET := %s\n' % toolset)
root_makefile.write(ROOT_HEADER_SUFFIX_RULES)
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = []
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
build_files.add(gyp.common.RelativePath(build_file, options.depth))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file), options.depth)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets found "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the .mk file in the base_path directory.
output_file = os.path.join(options.depth,
base_path,
target + '.' + toolset + options.suffix + '.mk')
if options.generator_output:
output_file = os.path.join(options.generator_output, output_file)
spec = target_dicts[qualified_target]
configs = spec['configurations']
writer = MakefileWriter()
writer.Write(qualified_target, base_path, output_file, spec, configs,
part_of_all=qualified_target in needed_targets)
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
submakefile_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.append('include ' + submakefile_path + '\n')
# Write out the sorted list of includes.
include_list.sort()
root_makefile.write('\n')
for include in include_list:
root_makefile.write(include)
root_makefile.write('\n')
# Write the target to regenerate the Makefile.
if generator_flags.get('auto_regeneration', True):
build_files_args = [gyp.common.RelativePath(filename, options.depth)
for filename in params['build_files_arg']]
root_makefile.write("%s: %s\n\t%s\n" % (
makefile_name,
' '.join(map(Sourceify, build_files)),
gyp.common.EncodePOSIXShellList(
[gyp.common.FixIfRelativePath(params['gyp_binary'], options.depth),
'-fmake'] +
gyp.RegenerateFlags(options) +
build_files_args)))
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
| bsd-3-clause | 5,577,840,718,304,472,000 | 6,559,626,015,178,639,000 | 36.335979 | 147 | 0.622806 | false |
ahmadshahwan/cohorte-runtime | python/src/lib/python/unidecode/x00a.py | 252 | 4121 | data = (
'[?]', # 0x00
'[?]', # 0x01
'N', # 0x02
'[?]', # 0x03
'[?]', # 0x04
'a', # 0x05
'aa', # 0x06
'i', # 0x07
'ii', # 0x08
'u', # 0x09
'uu', # 0x0a
'[?]', # 0x0b
'[?]', # 0x0c
'[?]', # 0x0d
'[?]', # 0x0e
'ee', # 0x0f
'ai', # 0x10
'[?]', # 0x11
'[?]', # 0x12
'oo', # 0x13
'au', # 0x14
'k', # 0x15
'kh', # 0x16
'g', # 0x17
'gh', # 0x18
'ng', # 0x19
'c', # 0x1a
'ch', # 0x1b
'j', # 0x1c
'jh', # 0x1d
'ny', # 0x1e
'tt', # 0x1f
'tth', # 0x20
'dd', # 0x21
'ddh', # 0x22
'nn', # 0x23
't', # 0x24
'th', # 0x25
'd', # 0x26
'dh', # 0x27
'n', # 0x28
'[?]', # 0x29
'p', # 0x2a
'ph', # 0x2b
'b', # 0x2c
'bb', # 0x2d
'm', # 0x2e
'y', # 0x2f
'r', # 0x30
'[?]', # 0x31
'l', # 0x32
'll', # 0x33
'[?]', # 0x34
'v', # 0x35
'sh', # 0x36
'[?]', # 0x37
's', # 0x38
'h', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'\'', # 0x3c
'[?]', # 0x3d
'aa', # 0x3e
'i', # 0x3f
'ii', # 0x40
'u', # 0x41
'uu', # 0x42
'[?]', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'[?]', # 0x46
'ee', # 0x47
'ai', # 0x48
'[?]', # 0x49
'[?]', # 0x4a
'oo', # 0x4b
'au', # 0x4c
'', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'khh', # 0x59
'ghh', # 0x5a
'z', # 0x5b
'rr', # 0x5c
'[?]', # 0x5d
'f', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'0', # 0x66
'1', # 0x67
'2', # 0x68
'3', # 0x69
'4', # 0x6a
'5', # 0x6b
'6', # 0x6c
'7', # 0x6d
'8', # 0x6e
'9', # 0x6f
'N', # 0x70
'H', # 0x71
'', # 0x72
'', # 0x73
'G.E.O.', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'N', # 0x81
'N', # 0x82
'H', # 0x83
'[?]', # 0x84
'a', # 0x85
'aa', # 0x86
'i', # 0x87
'ii', # 0x88
'u', # 0x89
'uu', # 0x8a
'R', # 0x8b
'[?]', # 0x8c
'eN', # 0x8d
'[?]', # 0x8e
'e', # 0x8f
'ai', # 0x90
'oN', # 0x91
'[?]', # 0x92
'o', # 0x93
'au', # 0x94
'k', # 0x95
'kh', # 0x96
'g', # 0x97
'gh', # 0x98
'ng', # 0x99
'c', # 0x9a
'ch', # 0x9b
'j', # 0x9c
'jh', # 0x9d
'ny', # 0x9e
'tt', # 0x9f
'tth', # 0xa0
'dd', # 0xa1
'ddh', # 0xa2
'nn', # 0xa3
't', # 0xa4
'th', # 0xa5
'd', # 0xa6
'dh', # 0xa7
'n', # 0xa8
'[?]', # 0xa9
'p', # 0xaa
'ph', # 0xab
'b', # 0xac
'bh', # 0xad
'm', # 0xae
'ya', # 0xaf
'r', # 0xb0
'[?]', # 0xb1
'l', # 0xb2
'll', # 0xb3
'[?]', # 0xb4
'v', # 0xb5
'sh', # 0xb6
'ss', # 0xb7
's', # 0xb8
'h', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'\'', # 0xbc
'\'', # 0xbd
'aa', # 0xbe
'i', # 0xbf
'ii', # 0xc0
'u', # 0xc1
'uu', # 0xc2
'R', # 0xc3
'RR', # 0xc4
'eN', # 0xc5
'[?]', # 0xc6
'e', # 0xc7
'ai', # 0xc8
'oN', # 0xc9
'[?]', # 0xca
'o', # 0xcb
'au', # 0xcc
'', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'AUM', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'RR', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'0', # 0xe6
'1', # 0xe7
'2', # 0xe8
'3', # 0xe9
'4', # 0xea
'5', # 0xeb
'6', # 0xec
'7', # 0xed
'8', # 0xee
'9', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| apache-2.0 | -4,149,074,082,274,026,500 | 2,404,876,429,553,381,000 | 15.035019 | 19 | 0.303082 | false |
yang1fan2/nematus | test/config.py | 1 | 1647 | import numpy
import os
import sys
SRC = "de"
TGT = "en"
DATA_DIR = "data/"
from nematus.nmt import train
if __name__ == '__main__':
validerr = train(saveto='models/model.npz',
reload_=False,
dim_word=256,
dim=256,
n_words=20000,
n_words_src=30000,
decay_c=0.,
clip_c=100.,
lrate=0.001,
sort_by_length=False,
optimizer='adam',
maxlen=200,
batch_size=32,
valid_batch_size=32,
datasets=[DATA_DIR + '/train.en-de.low.filt.' + SRC, DATA_DIR + '/train.en-de.low.filt.' + TGT],
valid_datasets=[DATA_DIR + '/valid.en-de.low.' + SRC, DATA_DIR + '/valid.en-de.low.' + TGT],
dictionaries=[DATA_DIR + '/train.en-de.low.filt.' + SRC + '.json',DATA_DIR + '/train.en-de.low.filt.' + TGT + '.json'],
validFreq=2500,
dispFreq=1000,
saveFreq=3000,
sampleFreq=10000,
use_dropout=False,
dropout_embedding=0., # dropout for input embeddings (0: no dropout)
dropout_hidden=0.5, # dropout for hidden layers (0: no dropout)
dropout_source=0., # dropout source words (0: no dropout)
dropout_target=0., # dropout target words (0: no dropout)
overwrite=True)#,
#external_validation_script='./validate.sh')
print validerr
| bsd-3-clause | 1,291,967,040,862,252,800 | -811,057,960,565,358,200 | 38.214286 | 139 | 0.460231 | false |
flowersteam/SESM | SESM/pykinect.py | 2 | 3387 | import zmq
import numpy
import threading
from collections import namedtuple
Point2D = namedtuple('Point2D', ('x', 'y'))
Point3D = namedtuple('Point3D', ('x', 'y', 'z'))
Quaternion = namedtuple('Quaternion', ('x', 'y', 'z', 'w'))
torso_joints = ('hip_center', 'spine', 'shoulder_center', 'head')
left_arm_joints = ('shoulder_left', 'elbow_left', 'wrist_left', 'hand_left')
right_arm_joints = ('shoulder_right', 'elbow_right', 'wrist_right', 'hand_right')
left_leg_joints = ('hip_left', 'knee_left', 'ankle_left', 'foot_left')
right_leg_joints = ('hip_right', 'knee_right', 'ankle_right', 'foot_right')
skeleton_joints = torso_joints + left_arm_joints + right_arm_joints + left_leg_joints + right_leg_joints
class Skeleton(namedtuple('Skeleton', ('timestamp', 'user_id') + skeleton_joints)):
joints = skeleton_joints
@property
def to_np(self):
l = []
for j in self.joints:
p = getattr(self, j).position
l.append((p.x, p.y, p.z))
return numpy.array(l)
Joint = namedtuple('Joint', ('position', 'orientation', 'pixel_coordinate'))
class KinectSensor(object):
def __init__(self, addr, port):
self._lock = threading.Lock()
self._skeleton = None
context = zmq.Context()
self.socket = context.socket(zmq.REQ)
self.socket.connect('tcp://{}:{}'.format(addr, port))
t = threading.Thread(target=self.get_skeleton)
t.daemon = True
t.start()
@property
def tracked_skeleton(self):
with self._lock:
return self._skeleton
@tracked_skeleton.setter
def tracked_skeleton(self, skeleton):
with self._lock:
self._skeleton = skeleton
def get_skeleton(self):
while True:
self.socket.send('Hello')
md = self.socket.recv_json()
msg = self.socket.recv()
skeleton_array = numpy.frombuffer(buffer(msg), dtype=md['dtype'])
skeleton_array = skeleton_array.reshape(md['shape'])
joints = []
for i in range(len(skeleton_joints)):
x, y, z, w = skeleton_array[i][0:4]
position = Point3D(x / w, y / w, z / w)
pixel_coord = Point2D(*skeleton_array[i][4:6])
orientation = Quaternion(*skeleton_array[i][6:10])
joints.append(Joint(position, orientation, pixel_coord))
self.tracked_skeleton = Skeleton(md['timestamp'], md['user_index'], *joints)
def draw_position(skel, ax):
xy, zy = [], []
if not skel:
return
for j in skeleton_joints:
p = getattr(skel, j).position
xy.append((p.x, p.y))
zy.append((p.z, p.y))
ax.set_xlim(-2, 5)
ax.set_ylim(-1.5, 1.5)
ax.scatter(zip(*xy)[0], zip(*xy)[1], 30, 'b')
ax.scatter(zip(*zy)[0], zip(*zy)[1], 30, 'r')
if __name__ == '__main__':
import time
import matplotlib.pyplot as plt
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
kinect_sensor = KinectSensor('193.50.110.210', 9999)
import skelangle
kinect_angle = skelangle.AngleFromSkel()
try:
while True:
ax.clear()
draw_position(kinect_sensor.tracked_skeleton, ax)
plt.draw()
time.sleep(0.1)
except KeyboardInterrupt:
plt.close('all')
| gpl-3.0 | -4,997,253,977,170,286,000 | -1,470,668,855,239,652,600 | 27.948718 | 104 | 0.570416 | false |
gautam1858/tensorflow | tensorflow/contrib/tensorrt/test/rank_two_test.py | 2 | 3365 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class RankTwoTest(trt_test.TfTrtIntegrationTestBase):
def GetParams(self):
"""Test for rank 2 input in TF-TRT."""
input_names = ["input", "input2"]
# Two paths: first with rank 2 input, second with rank 4 input.
input_dims = [[12, 5], [12, 5, 2, 2]]
output_name = "output"
g = ops.Graph()
with g.as_default():
outputs = []
for i in range(2):
x = array_ops.placeholder(
dtype=dtypes.float32, shape=input_dims[i], name=input_names[i])
c = constant_op.constant(1.0, name="c%d_1" % i)
q = math_ops.add(x, c, name="add%d_1" % i)
q = math_ops.abs(q, name="abs%d_1" % i)
c = constant_op.constant(2.2, name="c%d_2" % i)
q = math_ops.add(q, c, name="add%d_2" % i)
q = math_ops.abs(q, name="abs%d_2" % i)
c = constant_op.constant(3.0, name="c%d_3" % i)
q = math_ops.add(q, c, name="add%d_3" % i)
if i == 0:
axis = constant_op.constant(-1, dtype=dtypes.int32, name="axis")
for j in range(2):
q = array_ops.expand_dims(q, axis, name="expand%d_%d" % (i, j))
q = self.trt_incompatible_op(q)
q = gen_math_ops.reciprocal(q, name="reciprocal%d" % i)
outputs.append(q)
# Combine both paths
q = math_ops.add(outputs[0], outputs[1], name="add")
array_ops.squeeze(q, name=output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=input_names,
input_dims=[input_dims],
output_names=[output_name],
expected_output_dims=[[input_dims[1]]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": [
"add0_1", "add0_2", "add0_3", "c0_1", "c0_2", "c0_3", "abs0_1",
"abs0_2", "expand0_0", "expand0_1", "axis"
],
"TRTEngineOp_1": [
"add", "add1_1", "add1_2", "add1_3", "c1_1", "c1_2", "c1_3",
"abs1_1", "abs1_2", "reciprocal0", "reciprocal1"
],
}
if __name__ == "__main__":
test.main()
| apache-2.0 | 5,466,509,857,943,919,000 | -1,331,641,466,277,350,000 | 38.588235 | 85 | 0.61159 | false |
olsaki/ansible-modules-extras | network/f5/bigip_monitor_tcp.py | 8 | 17582 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, serge van Ginderachter <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_monitor_tcp
short_description: "Manages F5 BIG-IP LTM tcp monitors"
description:
- "Manages F5 BIG-IP LTM tcp monitors via iControl SOAP API"
version_added: "1.4"
author: Serge van Ginderachter
notes:
- "Requires BIG-IP software version >= 11"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
- "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx"
requirements:
- bigsuds
options:
server:
description:
- BIG-IP host
required: true
default: null
user:
description:
- BIG-IP username
required: true
default: null
password:
description:
- BIG-IP password
required: true
default: null
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 2.0
state:
description:
- Monitor state
required: false
default: 'present'
choices: ['present', 'absent']
name:
description:
- Monitor name
required: true
default: null
aliases: ['monitor']
partition:
description:
- Partition for the monitor
required: false
default: 'Common'
type:
description:
- The template type of this monitor template
required: false
default: 'tcp'
choices: [ 'TTYPE_TCP', 'TTYPE_TCP_ECHO', 'TTYPE_TCP_HALF_OPEN']
parent:
description:
- The parent template of this monitor template
required: false
default: 'tcp'
choices: [ 'tcp', 'tcp_echo', 'tcp_half_open']
parent_partition:
description:
- Partition for the parent monitor
required: false
default: 'Common'
send:
description:
- The send string for the monitor call
required: true
default: none
receive:
description:
- The receive string for the monitor call
required: true
default: none
ip:
description:
- IP address part of the ipport definition. The default API setting
is "0.0.0.0".
required: false
default: none
port:
description:
- port address part op the ipport definition. The default API
setting is 0.
required: false
default: none
interval:
description:
- The interval specifying how frequently the monitor instance
of this template will run. By default, this interval is used for up and
down states. The default API setting is 5.
required: false
default: none
timeout:
description:
- The number of seconds in which the node or service must respond to
the monitor request. If the target responds within the set time
period, it is considered up. If the target does not respond within
the set time period, it is considered down. You can change this
number to any number you want, however, it should be 3 times the
interval number of seconds plus 1 second. The default API setting
is 16.
required: false
default: none
time_until_up:
description:
- Specifies the amount of time in seconds after the first successful
response before a node will be marked up. A value of 0 will cause a
node to be marked up immediately after a valid response is received
from the node. The default API setting is 0.
required: false
default: none
'''
EXAMPLES = '''
- name: BIGIP F5 | Create TCP Monitor
local_action:
module: bigip_monitor_tcp
state: present
server: "{{ f5server }}"
user: "{{ f5user }}"
password: "{{ f5password }}"
name: "{{ item.monitorname }}"
type: tcp
send: "{{ item.send }}"
receive: "{{ item.receive }}"
with_items: f5monitors-tcp
- name: BIGIP F5 | Create TCP half open Monitor
local_action:
module: bigip_monitor_tcp
state: present
server: "{{ f5server }}"
user: "{{ f5user }}"
password: "{{ f5password }}"
name: "{{ item.monitorname }}"
type: tcp
send: "{{ item.send }}"
receive: "{{ item.receive }}"
with_items: f5monitors-halftcp
- name: BIGIP F5 | Remove TCP Monitor
local_action:
module: bigip_monitor_tcp
state: absent
server: "{{ f5server }}"
user: "{{ f5user }}"
password: "{{ f5password }}"
name: "{{ monitorname }}"
with_flattened:
- f5monitors-tcp
- f5monitors-halftcp
'''
try:
import bigsuds
except ImportError:
bigsuds_found = False
else:
bigsuds_found = True
TEMPLATE_TYPE = DEFAULT_TEMPLATE_TYPE = 'TTYPE_TCP'
TEMPLATE_TYPE_CHOICES = ['tcp', 'tcp_echo', 'tcp_half_open']
DEFAULT_PARENT = DEFAULT_TEMPLATE_TYPE_CHOICE = DEFAULT_TEMPLATE_TYPE.replace('TTYPE_', '').lower()
# ===========================================
# bigip_monitor module generic methods.
# these should be re-useable for other monitor types
#
def bigip_api(bigip, user, password):
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
return api
def disable_ssl_cert_validation():
# You probably only want to do this for testing and never in production.
# From https://www.python.org/dev/peps/pep-0476/#id29
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
def check_monitor_exists(module, api, monitor, parent):
# hack to determine if monitor exists
result = False
try:
ttype = api.LocalLB.Monitor.get_template_type(template_names=[monitor])[0]
parent2 = api.LocalLB.Monitor.get_parent_template(template_names=[monitor])[0]
if ttype == TEMPLATE_TYPE and parent == parent2:
result = True
else:
module.fail_json(msg='Monitor already exists, but has a different type (%s) or parent(%s)' % (ttype, parent))
except bigsuds.OperationFailed, e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def create_monitor(api, monitor, template_attributes):
try:
api.LocalLB.Monitor.create_template(templates=[{'template_name': monitor, 'template_type': TEMPLATE_TYPE}], template_attributes=[template_attributes])
except bigsuds.OperationFailed, e:
if "already exists" in str(e):
return False
else:
# genuine exception
raise
return True
def delete_monitor(api, monitor):
try:
api.LocalLB.Monitor.delete_template(template_names=[monitor])
except bigsuds.OperationFailed, e:
# maybe it was deleted since we checked
if "was not found" in str(e):
return False
else:
# genuine exception
raise
return True
def check_string_property(api, monitor, str_property):
try:
return str_property == api.LocalLB.Monitor.get_template_string_property([monitor], [str_property['type']])[0]
except bigsuds.OperationFailed, e:
# happens in check mode if not created yet
if "was not found" in str(e):
return True
else:
# genuine exception
raise
return True
def set_string_property(api, monitor, str_property):
api.LocalLB.Monitor.set_template_string_property(template_names=[monitor], values=[str_property])
def check_integer_property(api, monitor, int_property):
try:
return int_property == api.LocalLB.Monitor.get_template_integer_property([monitor], [int_property['type']])[0]
except bigsuds.OperationFailed, e:
# happens in check mode if not created yet
if "was not found" in str(e):
return True
else:
# genuine exception
raise
return True
def set_integer_property(api, monitor, int_property):
api.LocalLB.Monitor.set_template_int_property(template_names=[monitor], values=[int_property])
def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties):
changed = False
for str_property in template_string_properties:
if str_property['value'] is not None and not check_string_property(api, monitor, str_property):
if not module.check_mode:
set_string_property(api, monitor, str_property)
changed = True
for int_property in template_integer_properties:
if int_property['value'] is not None and not check_integer_property(api, monitor, int_property):
if not module.check_mode:
set_integer_property(api, monitor, int_property)
changed = True
return changed
def get_ipport(api, monitor):
return api.LocalLB.Monitor.get_template_destination(template_names=[monitor])[0]
def set_ipport(api, monitor, ipport):
try:
api.LocalLB.Monitor.set_template_destination(template_names=[monitor], destinations=[ipport])
return True, ""
except bigsuds.OperationFailed, e:
if "Cannot modify the address type of monitor" in str(e):
return False, "Cannot modify the address type of monitor if already assigned to a pool."
else:
# genuine exception
raise
# ===========================================
# main loop
#
# writing a module for other monitor types should
# only need an updated main() (and monitor specific functions)
def main():
# begin monitor specific stuff
module = AnsibleModule(
argument_spec = dict(
server = dict(required=True),
user = dict(required=True),
password = dict(required=True),
validate_certs = dict(default='yes', type='bool'),
partition = dict(default='Common'),
state = dict(default='present', choices=['present', 'absent']),
name = dict(required=True),
type = dict(default=DEFAULT_TEMPLATE_TYPE_CHOICE, choices=TEMPLATE_TYPE_CHOICES),
parent = dict(default=DEFAULT_PARENT),
parent_partition = dict(default='Common'),
send = dict(required=False),
receive = dict(required=False),
ip = dict(required=False),
port = dict(required=False, type='int'),
interval = dict(required=False, type='int'),
timeout = dict(required=False, type='int'),
time_until_up = dict(required=False, type='int', default=0)
),
supports_check_mode=True
)
server = module.params['server']
user = module.params['user']
password = module.params['password']
validate_certs = module.params['validate_certs']
partition = module.params['partition']
parent_partition = module.params['parent_partition']
state = module.params['state']
name = module.params['name']
type = 'TTYPE_' + module.params['type'].upper()
parent = "/%s/%s" % (parent_partition, module.params['parent'])
monitor = "/%s/%s" % (partition, name)
send = module.params['send']
receive = module.params['receive']
ip = module.params['ip']
port = module.params['port']
interval = module.params['interval']
timeout = module.params['timeout']
time_until_up = module.params['time_until_up']
# tcp monitor has multiple types, so overrule
global TEMPLATE_TYPE
TEMPLATE_TYPE = type
# end monitor specific stuff
if not validate_certs:
disable_ssl_cert_validation()
if not bigsuds_found:
module.fail_json(msg="the python bigsuds module is required")
api = bigip_api(server, user, password)
monitor_exists = check_monitor_exists(module, api, monitor, parent)
# ipport is a special setting
if monitor_exists: # make sure to not update current settings if not asked
cur_ipport = get_ipport(api, monitor)
if ip is None:
ip = cur_ipport['ipport']['address']
if port is None:
port = cur_ipport['ipport']['port']
else: # use API defaults if not defined to create it
if interval is None:
interval = 5
if timeout is None:
timeout = 16
if ip is None:
ip = '0.0.0.0'
if port is None:
port = 0
if send is None:
send = ''
if receive is None:
receive = ''
# define and set address type
if ip == '0.0.0.0' and port == 0:
address_type = 'ATYPE_STAR_ADDRESS_STAR_PORT'
elif ip == '0.0.0.0' and port != 0:
address_type = 'ATYPE_STAR_ADDRESS_EXPLICIT_PORT'
elif ip != '0.0.0.0' and port != 0:
address_type = 'ATYPE_EXPLICIT_ADDRESS_EXPLICIT_PORT'
else:
address_type = 'ATYPE_UNSET'
ipport = {'address_type': address_type,
'ipport': {'address': ip,
'port': port}}
template_attributes = {'parent_template': parent,
'interval': interval,
'timeout': timeout,
'dest_ipport': ipport,
'is_read_only': False,
'is_directly_usable': True}
# monitor specific stuff
if type == 'TTYPE_TCP':
template_string_properties = [{'type': 'STYPE_SEND',
'value': send},
{'type': 'STYPE_RECEIVE',
'value': receive}]
else:
template_string_properties = []
template_integer_properties = [{'type': 'ITYPE_INTERVAL',
'value': interval},
{'type': 'ITYPE_TIMEOUT',
'value': timeout},
{'type': 'ITYPE_TIME_UNTIL_UP',
'value': interval}]
# main logic, monitor generic
try:
result = {'changed': False} # default
if state == 'absent':
if monitor_exists:
if not module.check_mode:
# possible race condition if same task
# on other node deleted it first
result['changed'] |= delete_monitor(api, monitor)
else:
result['changed'] |= True
else: # state present
## check for monitor itself
if not monitor_exists: # create it
if not module.check_mode:
# again, check changed status here b/c race conditions
# if other task already created it
result['changed'] |= create_monitor(api, monitor, template_attributes)
else:
result['changed'] |= True
## check for monitor parameters
# whether it already existed, or was just created, now update
# the update functions need to check for check mode but
# cannot update settings if it doesn't exist which happens in check mode
if monitor_exists and not module.check_mode:
result['changed'] |= update_monitor_properties(api, module, monitor,
template_string_properties,
template_integer_properties)
# else assume nothing changed
# we just have to update the ipport if monitor already exists and it's different
if monitor_exists and cur_ipport != ipport:
set_ipport(api, monitor, ipport)
result['changed'] |= True
#else: monitor doesn't exist (check mode) or ipport is already ok
except Exception, e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 | -8,509,099,160,988,134,000 | -3,827,687,838,972,828,700 | 33.47451 | 158 | 0.576044 | false |
artur-shaik/qutebrowser | tests/integration/test_webserver.py | 1 | 2504 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Test the httpbin webserver used for tests."""
import json
import urllib.request
import urllib.error
import pytest
@pytest.mark.parametrize('path, content, expected', [
('/', '<title>httpbin(1): HTTP Client Testing Service</title>', True),
# https://github.com/Runscope/httpbin/issues/245
('/', 'www.google-analytics.com', False),
('/data/hello.txt', 'Hello World!', True),
])
def test_httpbin(httpbin, qtbot, path, content, expected):
with qtbot.waitSignal(httpbin.new_request, raising=True, timeout=100):
url = 'http://localhost:{}{}'.format(httpbin.port, path)
try:
response = urllib.request.urlopen(url)
except urllib.error.HTTPError as e:
# "Though being an exception (a subclass of URLError), an HTTPError
# can also function as a non-exceptional file-like return value
# (the same thing that urlopen() returns)."
# ...wat
print(e.read().decode('utf-8'))
raise
data = response.read().decode('utf-8')
assert httpbin.get_requests() == [httpbin.ExpectedRequest('GET', path)]
assert (content in data) == expected
@pytest.mark.parametrize('line, verb, path, equal', [
({'verb': 'GET', 'path': '/', 'status': 200}, 'GET', '/', True),
({'verb': 'GET', 'path': '/foo/', 'status': 200}, 'GET', '/foo', True),
({'verb': 'GET', 'path': '/', 'status': 200}, 'GET', '/foo', False),
({'verb': 'POST', 'path': '/', 'status': 200}, 'GET', '/', False),
])
def test_expected_request(httpbin, line, verb, path, equal):
expected = httpbin.ExpectedRequest(verb, path)
request = httpbin.Request(json.dumps(line))
assert (expected == request) == equal
| gpl-3.0 | -4,741,970,713,487,407,000 | -5,476,443,823,151,525,000 | 38.125 | 79 | 0.65655 | false |
jbarkschat/Yu-gi-oh_pack_generator | Yu-gi-oh_pack_generator_alpha.py | 1 | 13809 | import urllib.request
from bs4 import BeautifulSoup
from PIL import Image
from tkinter import Tk, Label, Button
import requests
import pprint
import secrets
import os
class RarityUnknown(Exception):
"""Raised in this program when an unhandled rarity is encountered"""
pp = pprint.PrettyPrinter(indent=4)
#rng.randrange(n, m)
rng = secrets.SystemRandom()
def stitch_packs(file1, file2, file3, file4, file5, file6, file7, file8, file9):
"""Merge two images into one, displayed side by side
:param file1: path to first image file
:param file2: path to second image file
:return: the merged Image object
"""
image1 = Image.open(file1)
image2 = Image.open(file2)
image3 = Image.open(file3)
image4 = Image.open(file4)
image5 = Image.open(file5)
image6 = Image.open(file6)
image7 = Image.open(file7)
image8 = Image.open(file8)
image9 = Image.open(file9)
image1 = image1.resize((476, 695))
image2 = image2.resize((476, 695))
image3 = image3.resize((476, 695))
image4 = image4.resize((476, 695))
image5 = image5.resize((476, 695))
image6 = image6.resize((476, 695))
image7 = image7.resize((476, 695))
image8 = image8.resize((476, 695))
image9 = image9.resize((476, 695))
(width1, height1) = image1.size
(width2, height2) = image2.size
(width3, height3) = image3.size
(width4, height4) = image4.size
(width5, height5) = image5.size
(width6, height6) = image6.size
(width7, height7) = image7.size
(width8, height8) = image8.size
(width9, height9) = image9.size
result_width = width1 + width2 + width3
result_height = height1 + height4 + height7
result = Image.new('RGB', (result_width, result_height))
result.paste(im=image1, box=(0, 0))
result.paste(im=image2, box=(width1, 0))
result.paste(im=image3, box=(width1 + width2, 0))
result.paste(im=image4, box=(0, height1))
result.paste(im=image5, box=(width4, height2))
result.paste(im=image6, box=(width4 + width5, height3))
result.paste(im=image7, box=(0, height1 + height4))
result.paste(im=image8, box=(width7, height2 + height5))
result.paste(im=image9, box=(width7 + width8, height3 + height6))
if not os.path.exists("results"):
os.makedirs("results")
result.save("results/composite_image.png")
return "composite_image.png"
def get_card_image(generated_pack):
for card_count, card_name in enumerate(generated_pack, start=0):
url = "http://yugioh.wikia.com/wiki/" + card_name
code = requests.get(url)
text = code.text
soup = BeautifulSoup(text, "lxml")
wiki_images = soup.findAll('img')
image_url_lowres = wiki_images[21].attrs.get("src")
image_url = image_url_lowres.split(sep="/scale")[0]
pp.pprint(image_url)
urllib.request.urlretrieve(image_url, "file_" + str(card_count) + ".png")
return 0
def get_names(url):
#Getting webpage
code = requests.get(url)
#I asssume pulling only text out
text = code.text
#turning the text into a soup object
soup = BeautifulSoup(text, "lxml")
#Finding all strong tags and putting them in a bs4 resultset, which behaves like a list
all_strong_tags = soup.findAll('strong')
#seperating each instance of a strong tag into an entry in a list
strong_tag_list = []
for t in all_strong_tags:
strong_tag_list.append(t)
#Remove 2 junk entries used for other things
all_strong_tags.pop(0)
all_strong_tags.pop(0)
card_name_list = []
card_rarity_list = []
#Iterating through all strong tags, to find and link rarities to names
for tag in all_strong_tags:
#moving up one level from the strong tag
current = tag.parent
#if true, than this element has a rarity above common
if bool(current.find("img")):
card_rarity_list.append(current.find("img").get('alt', ''))
card_name_list.append(tag.text)
#put current strong_tag name into dictionary with the rare tag name
#if false than this element has a common rarity
elif not bool(current.find("img")):
card_rarity_list.append("Common")
card_name_list.append(tag.text)
#put current strong_tag name into dictionary with some kind of indicator of no tag
"""
Old code, functionality absorbed into above for statement
Remove all tag info, and leave just plaintext strings in the card_names list
card_names = []
for number in strong_tag_list:
card_names.append(number.text)
"""
#combining list of names with list of rarities
card_names_and_rarities = [card_name_list, card_rarity_list]
return card_names_and_rarities
def get_sets():
#boilerplate code to get the database of yu-gi-oh sets of packs from konami's site
code = requests.get("https://www.db.yugioh-card.com/yugiohdb/card_list.action?request_locale=en")
text = code.text
soup = BeautifulSoup(text, "lxml")
#The way i'm finding the sets gives me one junk entry for every set entry, so I find the number of entries and divide by two to get the true number
number_of_sets = int((len(soup.findAll('th'))) / 2)
#List of sets of packs
all_sets_list = []
"""
Data structure:
all_sets_list [0] [0] [0]
set select 0 = select Nothing
set select 1 = packs specific pack
set select 2 = ids specific id
"""
#iterate through the sets and put them in the list
for element in range(number_of_sets):
set_data = []
set_name = []
pack_name = []
url_of_packs = []
#(element * 2) + 1 specifies which element to append, the magic numbers select only the odd numbers which contain actual data
set_name.append(soup.findAll('th')[(element * 2) + 1].text)
#find number of strong tags, I need a number because I am manipulating two different things on each loop
current_set = soup.findAll('th')[(element * 2) + 1].parent.parent.parent.parent
num_of_packs_in_set = len(current_set.findAll("strong"))
for pack_index in range(num_of_packs_in_set):
#Each pack gets appended to the packs list
pack_name.append(current_set.findAll("strong")[pack_index].text)
#And then the specific id of the pack gets put into the id_of_packs list
pack_link = current_set.find_all("input", class_="link_value")[pack_index].get("value")
pack_id = pack_link.partition("pid=")[2].partition("&rp")[0]
pack_id = "https://www.db.yugioh-card.com/yugiohdb/card_search.action?ope=1&sess=1&pid=" + pack_id + "&rp=99999"
url_of_packs.append(pack_id)
set_data.append(set_name)
set_data.append(pack_name)
set_data.append(url_of_packs)
all_sets_list.append(set_data)
return all_sets_list
def generate_pack(card_list):
#finding total number of cards in the given pack
card_num_total = int(len(card_list[0]))
common_list = []
rare_list = []
super_rare_list = []
ultra_rare_list = []
secret_rare_list = []
#going through the pack, and popping element zero of both the name a rarity, and then sorting it into the proper rarity list as just the name
for card_num in range(card_num_total):
temp_rarity = card_list[1].pop(0)
temp_name = card_list[0].pop(0)
if temp_rarity == "Common" or temp_rarity == "Starfoil":
common_list.append(temp_name)
elif temp_rarity == "Rare":
rare_list.append(temp_name)
elif temp_rarity == "Super Rare":
super_rare_list.append(temp_name)
elif temp_rarity == "Ultra Rare":
ultra_rare_list.append(temp_name)
elif temp_rarity == "Secret Rare":
secret_rare_list.append(temp_name)
else:
raise RarityUnknown(temp_rarity, "Unknown rarity encountered in this pack")
generated_pack = []
if len(common_list) >= 7 and len(rare_list) >= 1 and len(super_rare_list) >= 1 or len(ultra_rare_list) >= 1 or len(secret_rare_list) >= 1:
#Packs contain 7 commons, a rare, and a foil
#The pops ensure that the same card will never be added to any one pack twice. No idea if that is how it actually works
for common_card_nums in range(7):
number_of_commons = int(len(common_list)) - 1
generated_pack.append(common_list.pop(rng.randrange(0, number_of_commons)))
number_of_rares = int(len(rare_list)) - 1
generated_pack.append(rare_list.pop(rng.randrange(0, number_of_rares)))
foil_chance = rng.randrange(0, 100)
if 1 <= foil_chance <= 8:
number_of_secret_rares = int(len(secret_rare_list)) - 1
generated_pack.append(secret_rare_list.pop(rng.randrange(0, number_of_secret_rares)))
elif 9 <= foil_chance <= 24:
number_of_ultra_rares = int(len(ultra_rare_list)) - 1
generated_pack.append(ultra_rare_list.pop(rng.randrange(0, number_of_ultra_rares)))
elif 25 <= foil_chance <= 100:
number_of_super_rares = int(len(super_rare_list)) - 1
generated_pack.append(super_rare_list.pop(rng.randrange(0, number_of_super_rares)))
#16% for ultra rare, 8% for secret rare, 76% for super rare
#1-8 for secret rare, 9-24 for ultra rare, 25-100 for super rare
elif len(common_list) >= 9:
for common_card_nums in range(9):
number_of_commons = int(len(common_list)) - 1
generated_pack.append(common_list.pop(rng.randrange(0, number_of_commons)))
return generated_pack
'''
sets = get_sets()
for count, set in enumerate(sets, start = 1):
print(count, ":", set[0][0])
user_set_selection = int(input("Please select a number of a set."))
for pack_count, pack in enumerate(sets[user_set_selection - 1][1], start = 1):
print(pack_count, ":", pack)
user_pack_selection = int(input("Please select a number of a pack."))
set_name = sets[user_set_selection - 1][0][0]
pack_name = sets[user_set_selection - 1][1][user_pack_selection - 1]
pack_url = sets[user_set_selection - 1][2][user_pack_selection - 1]
print("Set Name:", set_name)
print("Pack Name:", pack_name)
print("Url:", pack_url)
card_list = get_names(pack_url)
generated_pack = generate_pack(card_list)
pp.pprint(generated_pack)
get_card_image(generated_pack)
print(stitch_packs("file_0.png", "file_1.png", "file_2.png", "file_3.png", "file_4.png", "file_5.png", "file_6.png", "file_7.png", "file_8.png"))
'''
class MyFirstGUI:
def __init__(self, master):
self.master = master
master.title("Yu-gi-oh pack generator")
def greet():
print("Greetings!")
def func_generate_pack(sets, set_number, pack_number):
card_list = get_names(sets[set_number][2][pack_number])
generated_pack = generate_pack(card_list)
get_card_image(generated_pack)
stitch_packs("file_0.png", "file_1.png", "file_2.png", "file_3.png", "file_4.png", "file_5.png", "file_6.png", "file_7.png", "file_8.png")
def set_button_clicked(sets, count):
#return sets[count][0][0]
return count
def pack_button_clicked(sets, count):
return count
def delete_buttons(button_list):
for element in button_list:
element.destroy()
def pack_selection_gui(sets, count, button_list):
selected_set = set_button_clicked(sets, count)
delete_buttons(button_list)
pack_button_list = []
button = Button(root, text="Back to main", command=lambda: set_selection_gui(sets, pack_button_list))
pack_button_list.append(button)
button.pack()
for pack_count, pack in enumerate(sets[selected_set][1], start=0):
button = Button(root, text=pack, command=lambda y=count: selected_pack_gui(sets, count, y, pack_button_list))
pack_button_list.append(button)
button.pack()
def selected_pack_gui(sets, set_number, pack_number, button_list):
delete_buttons(button_list)
pack_option_button_list = []
button = Button(root, text="Back to main", command=lambda: set_selection_gui(sets, pack_option_button_list))
pack_option_button_list.append(button)
button.pack()
button = Button(root, text="Generate pack", command=lambda: func_generate_pack(sets, set_number, pack_number))
pack_option_button_list.append(button)
button.pack()
def set_selection_gui(sets, button_list):
delete_buttons(button_list)
set_button_list = []
for count, set in enumerate(sets, start=0):
button = Button(root, text=set[0][0], command=lambda x=count: pack_selection_gui(sets, x, set_button_list))
set_button_list.append(button)
button.pack()
sets = get_sets()
root = Tk()
my_gui = MyFirstGUI(root)
set_selection_gui(sets, [])
root.mainloop()
"""
To do:
need to figure out how to do the stitching more dynamically, 9 attributs is already too many, structure packs need something more robust for sure
find edge cases in generator function
star pack battle royal, hobby god cards
Fusion enforcers, no commons
Use konami site and figure out how to pick card packs and retain years/pack in the list of links.
pack selection gui
Then either upload to dropbox or maybe to imgur through their api
"""
#testTEsttestTEST | gpl-3.0 | 1,889,143,656,458,790,000 | -9,131,745,172,867,059,000 | 34.537037 | 151 | 0.628793 | false |
morganherlocker/openaddresses | scripts/es/gml_to_csv.py | 40 | 6149 | import subprocess, sys, os, tempfile, shutil, re
import xml.sax
import unicodecsv as csv
lookup = {}
class CSVBuilder(xml.sax.ContentHandler):
def __init__(self, directory, lookup):
self.lookup = lookup.lookup
self.writers = {}
self.object = {}
self.out_dir = directory
if self.out_dir[-1]!='/':
self.out_dir += '/'
self.collecting = False
self.collect_pos = False
self.collect_num = False
self.srs = None
self.strip_hash = re.compile(r'^#')
def startElement(self, name, attrs):
# overall protection from collecting non-address elements
if name == 'AD:Address':
self.object = {}
self.collecting = True
# lat/lon
if name == 'gml:pos' and self.collecting:
self.collect_pos = True
self.object['pos'] = ''
# number
if name == 'AD:designator' and self.collecting:
self.collect_num = True
self.object['number'] = ''
# street name, postal code, admin boundary
if name == 'AD:component' and ('xlink:href' in attrs):
lookup_key = self.strip_hash.sub('', attrs['xlink:href'])
if self.lookup['thoroughfare'].get(lookup_key) is not None:
self.object['street'] = self.lookup['thoroughfare'].get(lookup_key)
elif self.lookup['admin'].get(lookup_key) is not None:
self.object['admin_district'] = self.lookup['admin'].get(lookup_key)
elif self.lookup['postal'].get(lookup_key) is not None:
self.object['postcode'] = self.lookup['postal'].get(lookup_key)
# detect SRS, create CSV writer if necessary
if name == 'gml:Point':
self.srs = attrs.get('srsName', None)
if self.srs is not None:
self.srs = self.srs.split(':')[-1]
if not self.srs in self.writers:
self.writers[self.srs] = csv.DictWriter(open(self.out_dir + 'es-%s.csv' % self.srs, 'a'), ('lon', 'lat', 'number', 'street', 'postcode', 'admin'))
self.writers[self.srs].writeheader()
def characters(self, content):
if self.collect_pos:
self.object['pos'] += content
if self.collect_num:
self.object['number'] += content
def endElement(self, name):
if name == 'gml:pos':
self.collect_pos = False
if name == 'AD:designator':
self.collect_num = False
if name == 'AD:Address':
self.collecting = False
self.writers[self.srs].writerow({
'lon': self.object.get('pos').split(' ')[0],
'lat': self.object.get('pos').split(' ')[1],
'number': self.object.get('number'),
'street': self.object.get('street'),
'postcode': self.object.get('postcode'),
'admin': self.object.get('admin')
})
class LookupBuilder(xml.sax.ContentHandler):
def __init__(self):
self.lookup = {}
self.collect = False
def startElement(self, name, attrs):
if name == 'AD:ThoroughfareName':
self.lookup['_type'] = 'thoroughfare'
if name == 'AD:AdminUnitName':
self.lookup['_type'] = 'admin'
if name == 'AD:PostalDescriptor':
self.lookup['_type'] = 'postal'
# begin collecting text content?
if ((name == 'GN:text') and (self.lookup['_type'] in ('thoroughfare', 'admin'))) or ((name == 'AD:postCode') and (self.lookup['_type'] == 'postal')):
self.collect = True
# xlink target? if so, prepare to collect data
if name in ('AD:ThoroughfareName', 'AD:AdminUnitName', 'AD:PostalDescriptor'):
self.lookup['_next'] = attrs.get('gml:id')
if not self.lookup['_type'] in self.lookup:
self.lookup[self.lookup['_type']] = {}
self.lookup[self.lookup['_type']][self.lookup['_next']] = ''
def endElement(self, name):
if name in ('GN:text', 'AD:postCode'):
self.collect = False
if name in ('AD:ThoroughfareName', 'AD:AdminUnitName', 'AD:PostalDescriptor'):
self.lookup['_next'] = None
self.lookup['_type'] = None
def characters(self, content):
# if collecting text content, stick in appropriate spot
if self.collect:
self.lookup[self.lookup['_type']][self.lookup['_next']] += content
def process_zipfile(in_filename, out_dir):
print 'converting %s, placing output into %s' % (in_filename, out_dir)
temp_dir = tempfile.mkdtemp()
# decompress and fix character encoding
subprocess.call(['unzip', '-d', temp_dir, in_filename])
filename = '%s/%s' % (temp_dir, in_filename.split('/')[-1].replace('.zip', '.gml'))
# build thoroughfare/postcode lookup
lookup = LookupBuilder()
with open(filename, 'r') as gml:
parser = xml.sax.make_parser()
parser.setContentHandler(lookup)
parser.parse(gml)
# generate complete CSV
csv_builder = CSVBuilder(out_dir, lookup)
with open(filename, 'r') as gml:
parser = xml.sax.make_parser()
parser.setContentHandler(csv_builder)
parser.parse(gml)
shutil.rmtree(temp_dir)
def main():
in_dir = os.path.abspath(sys.argv[1])
out_dir = os.path.abspath(sys.argv[2])
filename_mappings = {}
# load dictionary of better filenames
with open('./spain_catastre/gml_urls.txt', 'r') as gml_urls:
urls = gml_urls.readlines()
for url in gml_urls:
filename_mappings[url.split('/')[-1]] = url.split('/')[-2]
for filename in os.listdir(in_dir):
if os.path.isfile('%s/%s' % (in_dir, filename)) and filename.split('.')[-1].lower()=='zip':
process_zipfile('%s/%s' % (in_dir, filename), '%s/%s' % (out_dir, filename.replace('.zip', '.csv')))
if __name__ == '__main__':
in_file = sys.argv[1]
out_dir = sys.argv[2]
process_zipfile(in_file, out_dir) | bsd-3-clause | -7,032,130,540,517,394,000 | 2,129,576,276,524,624,600 | 36.5 | 166 | 0.554887 | false |
felmoltor/NVDparser | nvdparser.py | 2 | 15511 | #!/usr/bin/python
# Author: Felipe Molina (@felmoltor)
# Date: 05/2015
# Summary: Download nvd.nist.gov XML files containing CVE details.
# Parse it and saves in sqlite3 db
# LICENSE: GPLv2
from mechanize import *
from lxml import etree
import os
import datetime
import time
import gzip
import sqlite3
import re
from termcolor import colored
from optparse import OptionParser
DLPAGE="https://nvd.nist.gov/download.cfm"
DBNAME="nvd.vulnerabilities.db"
OUTPUTFILE="%s_output.csv" % (datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
VERSION="0.1"
def printBanner():
print colored("=====================================================","blue","on_white")
print colored("= NVD vulnerability downloader & parser v%s =" % VERSION,"blue","on_white")
print colored("= DOWNLOADER / IMPORTER / EXPORTER =","blue","on_white")
print colored("= Author: Felipe Molina (@felmoltor) =","blue","on_white")
print colored("= Source XML from %s =" % DLPAGE,"blue","on_white")
print colored("=====================================================","blue","on_white")
print
def getoptions():
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Show CVE being parsed by the script [default: False]")
parser.add_option("-d", "--database",metavar="FILE",dest="database", default=DBNAME, help="Database file where to save the vulnerabilities")
parser.add_option("-s", "--sqlquery",dest="sqlquery", default=None, help="SQL query to export from the database")
parser.add_option("-o", "--output",dest="outfile", default=OUTPUTFILE, help="Output file name")
(options,args) = parser.parse_args()
if options.database is None:
parser.error("You have to specify a sqlite3 database file (-d, --database)")
return options
def initDatabase(dbname):
if (not os.path.isfile(dbname)):
print "Database file does not exists. Initializing it"
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS download_dates (
dldate_id INTEGER PRIMARY KEY AUTOINCREMENT,
download_link TEXT,
feed_year TEXT,
feed_size REAL,
last_download INTEGER)''')
# CPE format (like URL)
# cpe:/ {part} : {vendor} : {product} : {version} : {update} : {edition} : {language}
# Part - Determines the platform type using the following codes: a = application, h = hardware, o = operating system
# Vendor - Defines the vendor name as the "highest organization-specific label of the organization's DNS name", which, in our case, would be "Tenable Security".
# Product - Product name as specified in the CPE database, e.g., itunes, quicktime and firefox
# Version - The version numbers as represented by the product itself.
# Update - The CPE name for the update or service pack, such as "Service Pack 3" in the case of Windows XP.
# Edition - The edition of the software, such as "pro" for "Professional Edition". For hardware, this would also denote the architecture, such as "i386".
# Language - For example, "English" or other language as specified by the software.
c.execute('''CREATE TABLE IF NOT EXISTS cpe (
cpe_id INTEGER PRIMARY KEY AUTOINCREMENT,
cpe_text TEXT,
part TEXT,
vendor TEXT,
product TEXT,
version TEXT,
update_date TEXT,
edition TEXT,
language TEXT
)''')
c.execute('''CREATE TABLE IF NOT EXISTS affects_to_cpe (
affects_to_cpe_id INTEGER PRIMARY KEY AUTOINCREMENT,
vuln_id INT,
cpe_id INT,
FOREIGN KEY(vuln_id) REFERENCES vulnerabilities(vuln_id),
FOREIGN KEY(cpe_id) REFERENCES cpe(cpe_id))''')
c.execute('''CREATE TABLE IF NOT EXISTS vulnerabilities (
vuln_id integer PRIMARY KEY AUTOINCREMENT,
cve TEXT UNIQUE,
cvss_score REAL,
cwe TEXT,
summary TEXT,
published_date INT,
modified_date INT,
dldate_id INT,
FOREIGN KEY(dldate_id) REFERENCES download_dates(dldate_id))''')
c.execute('''CREATE INDEX IF NOT EXISTS vulncve_idx ON vulnerabilities(cve)''')
return conn
def closeDatabase(conn):
conn.close()
def storeDownloadDate(conn,dl_link,feed_year,feed_size,last_donwload):
cur = conn.cursor()
res = cur.execute('''INSERT INTO cpe(cpe_text,part,vendor,product,version,update_date,edition,language) VALUES(?,?,?,?,?,?,?,?)''',(cpe_text,part,vendor,product,version,update,edition,language))
pk=cur.lastrowid
conn.commit()
return pk
def searchCPE(conn,cpe_text):
cpe_id = None
cur = conn.cursor()
res = cur.execute('''SELECT cpe_id FROM cpe WHERE cpe_text = ? LIMIT 1''',(cpe_text,))
results = res.fetchall()
if len(results) > 0:
cpe_id = results[0][0]
return cpe_id
def storeDlDate(conn,dllink,feed_year,feed_size):
dlepoch = int(time.time())
cur = conn.cursor()
res = cur.execute('''INSERT INTO download_dates(download_link,feed_year,feed_size,last_download) VALUES(?,?,?,?)''',(dllink,feed_year,feed_size,dlepoch))
pk=cur.lastrowid
conn.commit()
return pk
def storeCPE(conn,cpe_text):
part=vendor=product=version=update=edition=language = '?'
cpesplit=cpe_text.split(":")
# In some cases, the CPE text does not contains all the 7 fields. Check wich is available
if len(cpesplit)>1 and cpesplit[1] is not None:
part=cpesplit[1]
if len(cpesplit)>2 and cpesplit[2] is not None:
vendor=cpesplit[2]
if len(cpesplit)>3 and cpesplit[3] is not None:
product=cpesplit[3]
if len(cpesplit)>4 and cpesplit[4] is not None:
version=cpesplit[4]
if len(cpesplit)>5 and cpesplit[5] is not None:
update=cpesplit[5]
if len(cpesplit)>6 and cpesplit[6] is not None:
edition=cpesplit[6]
if len(cpesplit)>7 and cpesplit[7] is not None:
language=cpesplit[7]
cur = conn.cursor()
res = cur.execute('''INSERT INTO cpe(cpe_text,part,vendor,product,version,update_date,edition,language) VALUES(?,?,?,?,?,?,?,?)''',(cpe_text,part,vendor,product,version,update,edition,language))
pk=cur.lastrowid
conn.commit()
return pk
def storeAffectsToCPE(vulnid,cpeid):
cur = conn.cursor()
res = cur.execute('''INSERT INTO affects_to_cpe(cpe_id,vuln_id) VALUES(?,?)''',(cpeid,vulnid))
pk=cur.lastrowid
conn.commit()
return pk
def storeVuln(cve,cvss_score,cwe,summary,published_date,modified_date,cpetextlist):
cpeid = None
vulnpk = None
cur = conn.cursor()
# datesformat: "2015-05-11T21:59:13.853-04:00"
published_date=published_date.split(".")[0]
modified_date=modified_date.split(".")[0]
pubepoch=int(time.mktime((time.strptime(published_date,"%Y-%m-%dT%H:%M:%S"))))
modepoch=int(time.mktime((time.strptime(modified_date,"%Y-%m-%dT%H:%M:%S"))))
res = cur.execute('''INSERT INTO vulnerabilities(cve,cvss_score,cwe,summary,published_date,modified_date) VALUES(?,?,?,?,?,?)''',(cve,cvss_score,cwe,summary,pubepoch,modepoch))
vulnpk=cur.lastrowid
conn.commit()
# save the cpe list
for cpetext in cpetextlist:
cpeid = searchCPE(conn,cpetext)
if cpeid is None:
cpeid = storeCPE(conn,cpetext)
storeAffectsToCPE(vulnpk,cpeid)
return vulnpk
def hasToBeUpdated(conn,dllink,updatedepoch):
lastdownload = 0
cur = conn.cursor()
res = cur.execute('''SELECT last_download FROM download_dates WHERE download_link = ? LIMIT 1''',(dllink,))
results = res.fetchall()
if len(results) > 0:
lastdownload = results[0][0]
# compare the last time we updated this link with the updated date shown in the web page
return lastdownload < updatedepoch
def isVulnInDatabase(conn,cveid):
cur = conn.cursor()
res = cur.execute('''SELECT vuln_id FROM vulnerabilities WHERE cve = ? LIMIT 1''',(cveid,))
results = res.fetchall()
return len(results) > 0
def wasVulnUpdated(conn,cveid,modified):
modified_date = 0
cur = conn.cursor()
modified=modified.split(".")[0]
modepoch=int(time.mktime((time.strptime(modified,"%Y-%m-%dT%H:%M:%S"))))
res = cur.execute('''SELECT modified_date FROM vulnerabilities WHERE cve = ? LIMIT 1''',(cveid,))
results = res.fetchall()
if len(results) > 0:
modified_date = results[0][0]
return modepoch > modified_date
def updateVuln(conn,cveid,cvss,cwe,summary,published_date,modified_date,cpetextlist):
cpeid = None
vuln_id = None
cur = conn.cursor()
# datesformat: "2015-05-11T21:59:13.853-04:00"
published_date=published_date.split(".")[0]
modified_date=modified_date.split(".")[0]
pubepoch=int(time.mktime((time.strptime(published_date,"%Y-%m-%dT%H:%M:%S"))))
modepoch=int(time.mktime((time.strptime(modified_date,"%Y-%m-%dT%H:%M:%S"))))
res = cur.execute('''SELECT vuln_id FROM vulnerabilities WHERE cve = ? LIMIT 1''',(cveid,))
results = res.fetchall()
if len(results) > 0:
vuln_id = results[0][0]
# Delete the previous affected CPEs and insert the new ones
res = cur.execute('''DELETE FROM affects_to_cpe WHERE vuln_id=?''',(vuln_id,))
res = cur.execute('''UPDATE vulnerabilities
SET cve=?,cvss_score=?,cwe=?,summary=?,published_date=?,modified_date=?
WHERE vuln_id=?
''',(cveid,cvss,cwe,summary,pubepoch,modepoch,vuln_id))
vulnpk=cur.lastrowid
conn.commit()
# save the cpe list
for cpetext in cpetextlist:
cpeid = searchCPE(conn,cpetext)
if cpeid is None:
cpeid = storeCPE(conn,cpetext)
storeAffectsToCPE(vuln_id,cpeid)
return vuln_id
##################
###### MAIN ######
##################
printBanner()
options = getoptions()
# Parse the vuln download page
br = Browser()
conn = initDatabase(options.database)
br.open(DLPAGE)
body=br.response().read()
# Visit first page of wordpress.org/plugins
html=etree.HTML(body)
feedtable = html.xpath("//table[@class='xml-feed-table']")[0]
nrow=0
for trow in feedtable.xpath("tbody/tr"):
nrow += 1
feed = updated = dllink = size = ""
colnum = 0
if ((nrow % 2) == 1):
for col in trow.xpath("td"):
colnum += 1
if colnum == 1:
feed = col.text
if colnum == 2:
updated = col.text
updatedepoch=int(time.mktime((time.strptime(updated,"%m/%d/%Y"))))
if colnum == 3:
dllink = col.xpath("a")[0].get("href")
if colnum == 4:
size = float(col.text)
# Ignore the rest of the columns
if colnum > 4:
break
# Ignore the second line of the table, as the feed name occupies two rows
if feed is not None:
# Check if this file has been updated since the last download we made
if hasToBeUpdated(conn,dllink,updatedepoch):
print colored("File %s is not up to date. Downloading now." % dllink,"red")
print "%s: Updated %s, downloadig %sMB from %s" % (feed,updated,size,dllink)
dlname = dllink.split("/").pop()
# Download the link with the XML
br.retrieve(dllink,dlname)
# Unzip and parse the file to store it in sqlite3
g = gzip.open(dlname,"rb")
gcontent = g.read()
g.close() # Free memory
g = None
print "Now, importing content of the file %s" % dlname
ifxml = etree.XML(gcontent)
gcontent = None # Free memory
for entry in ifxml.getchildren():
# print entry.getchildren()
cwe = summary = cveid = "?"
cvss = 0.0
modified = published = ""
cpetextlist = []
cwee = entry.find("{http://scap.nist.gov/schema/vulnerability/0.4}cwe")
if cwee is not None:
cwe = cwee.values()[0]
cvsseleme =entry.find("{http://scap.nist.gov/schema/vulnerability/0.4}cvss")
if cvsseleme is not None:
cvsselem = cvsseleme.getchildren()[0]
cvss = float(cvsselem.find("{http://scap.nist.gov/schema/cvss-v2/0.2}score").text)
modifiede = entry.find("{http://scap.nist.gov/schema/vulnerability/0.4}last-modified-datetime")
if modifiede is not None:
modified = modifiede.text
publishede = entry.find("{http://scap.nist.gov/schema/vulnerability/0.4}published-datetime")
if publishede is not None:
published = publishede.text
cveide = entry.find("{http://scap.nist.gov/schema/vulnerability/0.4}cve-id")
if cveide is not None:
cveid = cveide.text
summarye = entry.find("{http://scap.nist.gov/schema/vulnerability/0.4}summary")
if summarye is not None:
summary = summarye.text
cpeliste = entry.find("{http://scap.nist.gov/schema/vulnerability/0.4}vulnerable-software-list")
if cpeliste is not None:
for cpee in cpeliste.getchildren():
cpetextlist.append(cpee.text)
if (options.verbose):
print colored("=================","cyan")
print colored(" = %s =" % cveid,"cyan")
print colored("=================","cyan")
print " * cwe: %s" % cwe
print " * cvss: %s" % cvss
print " * modified: %s" % modified
print " * published: %s" % published
print " * summary: %s" % summary
print " * N of cpe: %s" % len(cpetextlist)
if (not isVulnInDatabase(conn,cveid)):
storeVuln(cveid,cvss,cwe,summary,published,modified,cpetextlist)
else:
if (wasVulnUpdated(conn,cveid,modified)):
print colored("Vulnerability %s has been updated. Updating in database" % cveid,"yellow")
updateVuln(conn,cveid,cvss,cwe,summary,published,modified,cpetextlist)
else:
print colored("Vulnerability %s is already in the database" % cveid,"red")
# Save as downloaded
storeDlDate(conn,dllink,feed,size)
else:
print colored("File %s is up to date. Not downloading." % dllink,"green")
closeDatabase(conn)
| gpl-2.0 | -7,465,310,185,516,996,000 | -6,459,287,174,063,774,000 | 41.612637 | 198 | 0.575205 | false |
TRox1972/youtube-dl | youtube_dl/extractor/golem.py | 186 | 2181 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
)
from ..utils import (
determine_ext,
)
class GolemIE(InfoExtractor):
_VALID_URL = r'^https?://video\.golem\.de/.+?/(?P<id>.+?)/'
_TEST = {
'url': 'http://video.golem.de/handy/14095/iphone-6-und-6-plus-test.html',
'md5': 'c1a2c0a3c863319651c7c992c5ee29bf',
'info_dict': {
'id': '14095',
'format_id': 'high',
'ext': 'mp4',
'title': 'iPhone 6 und 6 Plus - Test',
'duration': 300.44,
'filesize': 65309548,
}
}
_PREFIX = 'http://video.golem.de'
def _real_extract(self, url):
video_id = self._match_id(url)
config = self._download_xml(
'https://video.golem.de/xml/{0}.xml'.format(video_id), video_id)
info = {
'id': video_id,
'title': config.findtext('./title', 'golem'),
'duration': self._float(config.findtext('./playtime'), 'duration'),
}
formats = []
for e in config:
url = e.findtext('./url')
if not url:
continue
formats.append({
'format_id': e.tag,
'url': compat_urlparse.urljoin(self._PREFIX, url),
'height': self._int(e.get('height'), 'height'),
'width': self._int(e.get('width'), 'width'),
'filesize': self._int(e.findtext('filesize'), 'filesize'),
'ext': determine_ext(e.findtext('./filename')),
})
self._sort_formats(formats)
info['formats'] = formats
thumbnails = []
for e in config.findall('.//teaser'):
url = e.findtext('./url')
if not url:
continue
thumbnails.append({
'url': compat_urlparse.urljoin(self._PREFIX, url),
'width': self._int(e.get('width'), 'thumbnail width'),
'height': self._int(e.get('height'), 'thumbnail height'),
})
info['thumbnails'] = thumbnails
return info
| unlicense | 4,281,010,797,577,897,500 | 190,238,791,836,737,900 | 29.71831 | 81 | 0.495644 | false |
wireservice/agate | setup.py | 1 | 1797 | #!/usr/bin/env python
from setuptools import setup
install_requires = [
'six>=1.9.0',
'pytimeparse>=1.1.5',
'parsedatetime>=2.1',
'Babel>=2.0',
'isodate>=0.5.4',
'python-slugify>=1.2.1',
'leather>=0.3.2',
'PyICU>=2.4.2',
]
setup(
name='agate',
version='1.6.2',
description='A data analysis library that is optimized for humans instead of machines.',
long_description=open('README.rst').read(),
author='Christopher Groskopf',
author_email='[email protected]',
url='http://agate.readthedocs.org/',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: IPython',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=[
'agate',
'agate.aggregations',
'agate.computations',
'agate.data_types',
'agate.table',
'agate.tableset'
],
install_requires=install_requires
)
| mit | -723,663,106,066,494,700 | 1,518,210,823,147,574,500 | 31.672727 | 92 | 0.592098 | false |
SnappleCap/oh-mainline | vendor/packages/twisted/twisted/spread/pb.py | 18 | 47191 | # -*- test-case-name: twisted.test.test_pb -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Perspective Broker
\"This isn\'t a professional opinion, but it's probably got enough
internet to kill you.\" --glyph
Introduction
============
This is a broker for proxies for and copies of objects. It provides a
translucent interface layer to those proxies.
The protocol is not opaque, because it provides objects which represent the
remote proxies and require no context (server references, IDs) to operate on.
It is not transparent because it does I{not} attempt to make remote objects
behave identically, or even similiarly, to local objects. Method calls are
invoked asynchronously, and specific rules are applied when serializing
arguments.
To get started, begin with L{PBClientFactory} and L{PBServerFactory}.
@author: Glyph Lefkowitz
"""
import random
import new
import types
from zope.interface import implements, Interface
# Twisted Imports
from twisted.python import log, failure, reflect
from twisted.python.versions import Version
from twisted.python.deprecate import deprecated
from twisted.python.hashlib import md5
from twisted.internet import defer, protocol
from twisted.cred.portal import Portal
from twisted.cred.credentials import IAnonymous, ICredentials
from twisted.cred.credentials import IUsernameHashedPassword, Anonymous
from twisted.persisted import styles
from twisted.python.components import registerAdapter
from twisted.spread.interfaces import IJellyable, IUnjellyable
from twisted.spread.jelly import jelly, unjelly, globalSecurity
from twisted.spread import banana
from twisted.spread.flavors import Serializable
from twisted.spread.flavors import Referenceable, NoSuchMethod
from twisted.spread.flavors import Root, IPBRoot
from twisted.spread.flavors import ViewPoint
from twisted.spread.flavors import Viewable
from twisted.spread.flavors import Copyable
from twisted.spread.flavors import Jellyable
from twisted.spread.flavors import Cacheable
from twisted.spread.flavors import RemoteCopy
from twisted.spread.flavors import RemoteCache
from twisted.spread.flavors import RemoteCacheObserver
from twisted.spread.flavors import copyTags
from twisted.spread.flavors import setUnjellyableForClass
from twisted.spread.flavors import setUnjellyableFactoryForClass
from twisted.spread.flavors import setUnjellyableForClassTree
# These three are backwards compatibility aliases for the previous three.
# Ultimately they should be deprecated. -exarkun
from twisted.spread.flavors import setCopierForClass
from twisted.spread.flavors import setFactoryForClass
from twisted.spread.flavors import setCopierForClassTree
MAX_BROKER_REFS = 1024
portno = 8787
class ProtocolError(Exception):
"""
This error is raised when an invalid protocol statement is received.
"""
class DeadReferenceError(ProtocolError):
"""
This error is raised when a method is called on a dead reference (one whose
broker has been disconnected).
"""
class Error(Exception):
"""
This error can be raised to generate known error conditions.
When a PB callable method (perspective_, remote_, view_) raises
this error, it indicates that a traceback should not be printed,
but instead, the string representation of the exception should be
sent.
"""
class RemoteMethod:
"""This is a translucent reference to a remote message.
"""
def __init__(self, obj, name):
"""Initialize with a L{RemoteReference} and the name of this message.
"""
self.obj = obj
self.name = name
def __cmp__(self, other):
return cmp((self.obj, self.name), other)
def __hash__(self):
return hash((self.obj, self.name))
def __call__(self, *args, **kw):
"""Asynchronously invoke a remote method.
"""
return self.obj.broker._sendMessage('',self.obj.perspective, self.obj.luid, self.name, args, kw)
def noOperation(*args, **kw):
"""
Do nothing.
Neque porro quisquam est qui dolorem ipsum quia dolor sit amet,
consectetur, adipisci velit...
"""
noOperation = deprecated(Version("twisted", 8, 2, 0))(noOperation)
class PBConnectionLost(Exception):
pass
def printTraceback(tb):
"""
Print a traceback (string) to the standard log.
"""
log.msg('Perspective Broker Traceback:' )
log.msg(tb)
printTraceback = deprecated(Version("twisted", 8, 2, 0))(printTraceback)
class IPerspective(Interface):
"""
per*spec*tive, n. : The relationship of aspects of a subject to each
other and to a whole: 'a perspective of history'; 'a need to view
the problem in the proper perspective'.
This is a Perspective Broker-specific wrapper for an avatar. That
is to say, a PB-published view on to the business logic for the
system's concept of a 'user'.
The concept of attached/detached is no longer implemented by the
framework. The realm is expected to implement such semantics if
needed.
"""
def perspectiveMessageReceived(broker, message, args, kwargs):
"""
This method is called when a network message is received.
@arg broker: The Perspective Broker.
@type message: str
@arg message: The name of the method called by the other end.
@type args: list in jelly format
@arg args: The arguments that were passed by the other end. It
is recommend that you use the `unserialize' method of the
broker to decode this.
@type kwargs: dict in jelly format
@arg kwargs: The keyword arguments that were passed by the
other end. It is recommended that you use the
`unserialize' method of the broker to decode this.
@rtype: A jelly list.
@return: It is recommended that you use the `serialize' method
of the broker on whatever object you need to return to
generate the return value.
"""
class Avatar:
"""
A default IPerspective implementor.
This class is intended to be subclassed, and a realm should return
an instance of such a subclass when IPerspective is requested of
it.
A peer requesting a perspective will receive only a
L{RemoteReference} to a pb.Avatar. When a method is called on
that L{RemoteReference}, it will translate to a method on the
remote perspective named 'perspective_methodname'. (For more
information on invoking methods on other objects, see
L{flavors.ViewPoint}.)
"""
implements(IPerspective)
def perspectiveMessageReceived(self, broker, message, args, kw):
"""
This method is called when a network message is received.
This will call::
self.perspective_%(message)s(*broker.unserialize(args),
**broker.unserialize(kw))
to handle the method; subclasses of Avatar are expected to
implement methods using this naming convention.
"""
args = broker.unserialize(args, self)
kw = broker.unserialize(kw, self)
method = getattr(self, "perspective_%s" % message)
try:
state = method(*args, **kw)
except TypeError:
log.msg("%s didn't accept %s and %s" % (method, args, kw))
raise
return broker.serialize(state, self, method, args, kw)
class AsReferenceable(Referenceable):
"""
A reference directed towards another object.
"""
def __init__(self, object, messageType="remote"):
self.remoteMessageReceived = getattr(
object, messageType + "MessageReceived")
class RemoteReference(Serializable, styles.Ephemeral):
"""
A translucent reference to a remote object.
I may be a reference to a L{flavors.ViewPoint}, a
L{flavors.Referenceable}, or an L{IPerspective} implementor (e.g.,
pb.Avatar). From the client's perspective, it is not possible to
tell which except by convention.
I am a \"translucent\" reference because although no additional
bookkeeping overhead is given to the application programmer for
manipulating a reference, return values are asynchronous.
See also L{twisted.internet.defer}.
@ivar broker: The broker I am obtained through.
@type broker: L{Broker}
"""
implements(IUnjellyable)
def __init__(self, perspective, broker, luid, doRefCount):
"""(internal) Initialize me with a broker and a locally-unique ID.
The ID is unique only to the particular Perspective Broker
instance.
"""
self.luid = luid
self.broker = broker
self.doRefCount = doRefCount
self.perspective = perspective
self.disconnectCallbacks = []
def notifyOnDisconnect(self, callback):
"""Register a callback to be called if our broker gets disconnected.
This callback will be called with one argument, this instance.
"""
assert callable(callback)
self.disconnectCallbacks.append(callback)
if len(self.disconnectCallbacks) == 1:
self.broker.notifyOnDisconnect(self._disconnected)
def dontNotifyOnDisconnect(self, callback):
"""Remove a callback that was registered with notifyOnDisconnect."""
self.disconnectCallbacks.remove(callback)
if not self.disconnectCallbacks:
self.broker.dontNotifyOnDisconnect(self._disconnected)
def _disconnected(self):
"""Called if we are disconnected and have callbacks registered."""
for callback in self.disconnectCallbacks:
callback(self)
self.disconnectCallbacks = None
def jellyFor(self, jellier):
"""If I am being sent back to where I came from, serialize as a local backreference.
"""
if jellier.invoker:
assert self.broker == jellier.invoker, "Can't send references to brokers other than their own."
return "local", self.luid
else:
return "unpersistable", "References cannot be serialized"
def unjellyFor(self, unjellier, unjellyList):
self.__init__(unjellier.invoker.unserializingPerspective, unjellier.invoker, unjellyList[1], 1)
return self
def callRemote(self, _name, *args, **kw):
"""Asynchronously invoke a remote method.
@type _name: C{string}
@param _name: the name of the remote method to invoke
@param args: arguments to serialize for the remote function
@param kw: keyword arguments to serialize for the remote function.
@rtype: L{twisted.internet.defer.Deferred}
@returns: a Deferred which will be fired when the result of
this remote call is received.
"""
# note that we use '_name' instead of 'name' so the user can call
# remote methods with 'name' as a keyword parameter, like this:
# ref.callRemote("getPeopleNamed", count=12, name="Bob")
return self.broker._sendMessage('',self.perspective, self.luid,
_name, args, kw)
def remoteMethod(self, key):
"""Get a L{RemoteMethod} for this key.
"""
return RemoteMethod(self, key)
def __cmp__(self,other):
"""Compare me [to another L{RemoteReference}].
"""
if isinstance(other, RemoteReference):
if other.broker == self.broker:
return cmp(self.luid, other.luid)
return cmp(self.broker, other)
def __hash__(self):
"""Hash me.
"""
return self.luid
def __del__(self):
"""Do distributed reference counting on finalization.
"""
if self.doRefCount:
self.broker.sendDecRef(self.luid)
setUnjellyableForClass("remote", RemoteReference)
class Local:
"""(internal) A reference to a local object.
"""
def __init__(self, object, perspective=None):
"""Initialize.
"""
self.object = object
self.perspective = perspective
self.refcount = 1
def __repr__(self):
return "<pb.Local %r ref:%s>" % (self.object, self.refcount)
def incref(self):
"""Increment and return my reference count.
"""
self.refcount = self.refcount + 1
return self.refcount
def decref(self):
"""Decrement and return my reference count.
"""
self.refcount = self.refcount - 1
return self.refcount
##
# Failure
##
class CopyableFailure(failure.Failure, Copyable):
"""
A L{flavors.RemoteCopy} and L{flavors.Copyable} version of
L{twisted.python.failure.Failure} for serialization.
"""
unsafeTracebacks = 0
def getStateToCopy(self):
"""
Collect state related to the exception which occurred, discarding
state which cannot reasonably be serialized.
"""
state = self.__dict__.copy()
state['tb'] = None
state['frames'] = []
state['stack'] = []
if isinstance(self.value, failure.Failure):
state['value'] = failure2Copyable(self.value, self.unsafeTracebacks)
else:
state['value'] = str(self.value) # Exception instance
if isinstance(self.type, str):
state['type'] = self.type
else:
state['type'] = reflect.qual(self.type) # Exception class
if self.unsafeTracebacks:
state['traceback'] = self.getTraceback()
else:
state['traceback'] = 'Traceback unavailable\n'
return state
class CopiedFailure(RemoteCopy, failure.Failure):
def printTraceback(self, file=None, elideFrameworkCode=0, detail='default'):
if file is None:
file = log.logfile
file.write("Traceback from remote host -- ")
file.write(self.traceback)
printBriefTraceback = printTraceback
printDetailedTraceback = printTraceback
setUnjellyableForClass(CopyableFailure, CopiedFailure)
def failure2Copyable(fail, unsafeTracebacks=0):
f = new.instance(CopyableFailure, fail.__dict__)
f.unsafeTracebacks = unsafeTracebacks
return f
class Broker(banana.Banana):
"""I am a broker for objects.
"""
version = 6
username = None
factory = None
def __init__(self, isClient=1, security=globalSecurity):
banana.Banana.__init__(self, isClient)
self.disconnected = 0
self.disconnects = []
self.failures = []
self.connects = []
self.localObjects = {}
self.security = security
self.pageProducers = []
self.currentRequestID = 0
self.currentLocalID = 0
self.unserializingPerspective = None
# Some terms:
# PUID: process unique ID; return value of id() function. type "int".
# LUID: locally unique ID; an ID unique to an object mapped over this
# connection. type "int"
# GUID: (not used yet) globally unique ID; an ID for an object which
# may be on a redirected or meta server. Type as yet undecided.
# Dictionary mapping LUIDs to local objects.
# set above to allow root object to be assigned before connection is made
# self.localObjects = {}
# Dictionary mapping PUIDs to LUIDs.
self.luids = {}
# Dictionary mapping LUIDs to local (remotely cached) objects. Remotely
# cached means that they're objects which originate here, and were
# copied remotely.
self.remotelyCachedObjects = {}
# Dictionary mapping PUIDs to (cached) LUIDs
self.remotelyCachedLUIDs = {}
# Dictionary mapping (remote) LUIDs to (locally cached) objects.
self.locallyCachedObjects = {}
self.waitingForAnswers = {}
# Mapping from LUIDs to weakref objects with callbacks for performing
# any local cleanup which may be necessary for the corresponding
# object once it no longer exists.
self._localCleanup = {}
def resumeProducing(self):
"""Called when the consumer attached to me runs out of buffer.
"""
# Go backwards over the list so we can remove indexes from it as we go
for pageridx in xrange(len(self.pageProducers)-1, -1, -1):
pager = self.pageProducers[pageridx]
pager.sendNextPage()
if not pager.stillPaging():
del self.pageProducers[pageridx]
if not self.pageProducers:
self.transport.unregisterProducer()
# Streaming producer methods; not necessary to implement.
def pauseProducing(self):
pass
def stopProducing(self):
pass
def registerPageProducer(self, pager):
self.pageProducers.append(pager)
if len(self.pageProducers) == 1:
self.transport.registerProducer(self, 0)
def expressionReceived(self, sexp):
"""Evaluate an expression as it's received.
"""
if isinstance(sexp, types.ListType):
command = sexp[0]
methodName = "proto_%s" % command
method = getattr(self, methodName, None)
if method:
method(*sexp[1:])
else:
self.sendCall("didNotUnderstand", command)
else:
raise ProtocolError("Non-list expression received.")
def proto_version(self, vnum):
"""Protocol message: (version version-number)
Check to make sure that both ends of the protocol are speaking
the same version dialect.
"""
if vnum != self.version:
raise ProtocolError("Version Incompatibility: %s %s" % (self.version, vnum))
def sendCall(self, *exp):
"""Utility method to send an expression to the other side of the connection.
"""
self.sendEncoded(exp)
def proto_didNotUnderstand(self, command):
"""Respond to stock 'C{didNotUnderstand}' message.
Log the command that was not understood and continue. (Note:
this will probably be changed to close the connection or raise
an exception in the future.)
"""
log.msg("Didn't understand command: %r" % command)
def connectionReady(self):
"""Initialize. Called after Banana negotiation is done.
"""
self.sendCall("version", self.version)
for notifier in self.connects:
try:
notifier()
except:
log.deferr()
self.connects = None
if self.factory: # in tests we won't have factory
self.factory.clientConnectionMade(self)
def connectionFailed(self):
# XXX should never get called anymore? check!
for notifier in self.failures:
try:
notifier()
except:
log.deferr()
self.failures = None
waitingForAnswers = None
def connectionLost(self, reason):
"""The connection was lost.
"""
self.disconnected = 1
# nuke potential circular references.
self.luids = None
if self.waitingForAnswers:
for d in self.waitingForAnswers.values():
try:
d.errback(failure.Failure(PBConnectionLost(reason)))
except:
log.deferr()
# Assure all Cacheable.stoppedObserving are called
for lobj in self.remotelyCachedObjects.values():
cacheable = lobj.object
perspective = lobj.perspective
try:
cacheable.stoppedObserving(perspective, RemoteCacheObserver(self, cacheable, perspective))
except:
log.deferr()
# Loop on a copy to prevent notifiers to mixup
# the list by calling dontNotifyOnDisconnect
for notifier in self.disconnects[:]:
try:
notifier()
except:
log.deferr()
self.disconnects = None
self.waitingForAnswers = None
self.localSecurity = None
self.remoteSecurity = None
self.remotelyCachedObjects = None
self.remotelyCachedLUIDs = None
self.locallyCachedObjects = None
self.localObjects = None
def notifyOnDisconnect(self, notifier):
"""Call the given callback when the Broker disconnects."""
assert callable(notifier)
self.disconnects.append(notifier)
def notifyOnFail(self, notifier):
"""Call the given callback if the Broker fails to connect."""
assert callable(notifier)
self.failures.append(notifier)
def notifyOnConnect(self, notifier):
"""Call the given callback when the Broker connects."""
assert callable(notifier)
if self.connects is None:
try:
notifier()
except:
log.err()
else:
self.connects.append(notifier)
def dontNotifyOnDisconnect(self, notifier):
"""Remove a callback from list of disconnect callbacks."""
try:
self.disconnects.remove(notifier)
except ValueError:
pass
def localObjectForID(self, luid):
"""
Get a local object for a locally unique ID.
@return: An object previously stored with L{registerReference} or
C{None} if there is no object which corresponds to the given
identifier.
"""
lob = self.localObjects.get(luid)
if lob is None:
return
return lob.object
maxBrokerRefsViolations = 0
def registerReference(self, object):
"""Get an ID for a local object.
Store a persistent reference to a local object and map its id()
to a generated, session-unique ID and return that ID.
"""
assert object is not None
puid = object.processUniqueID()
luid = self.luids.get(puid)
if luid is None:
if len(self.localObjects) > MAX_BROKER_REFS:
self.maxBrokerRefsViolations = self.maxBrokerRefsViolations + 1
if self.maxBrokerRefsViolations > 3:
self.transport.loseConnection()
raise Error("Maximum PB reference count exceeded. "
"Goodbye.")
raise Error("Maximum PB reference count exceeded.")
luid = self.newLocalID()
self.localObjects[luid] = Local(object)
self.luids[puid] = luid
else:
self.localObjects[luid].incref()
return luid
def setNameForLocal(self, name, object):
"""Store a special (string) ID for this object.
This is how you specify a 'base' set of objects that the remote
protocol can connect to.
"""
assert object is not None
self.localObjects[name] = Local(object)
def remoteForName(self, name):
"""Returns an object from the remote name mapping.
Note that this does not check the validity of the name, only
creates a translucent reference for it.
"""
return RemoteReference(None, self, name, 0)
def cachedRemotelyAs(self, instance, incref=0):
"""Returns an ID that says what this instance is cached as remotely, or C{None} if it's not.
"""
puid = instance.processUniqueID()
luid = self.remotelyCachedLUIDs.get(puid)
if (luid is not None) and (incref):
self.remotelyCachedObjects[luid].incref()
return luid
def remotelyCachedForLUID(self, luid):
"""Returns an instance which is cached remotely, with this LUID.
"""
return self.remotelyCachedObjects[luid].object
def cacheRemotely(self, instance):
"""
XXX"""
puid = instance.processUniqueID()
luid = self.newLocalID()
if len(self.remotelyCachedObjects) > MAX_BROKER_REFS:
self.maxBrokerRefsViolations = self.maxBrokerRefsViolations + 1
if self.maxBrokerRefsViolations > 3:
self.transport.loseConnection()
raise Error("Maximum PB cache count exceeded. "
"Goodbye.")
raise Error("Maximum PB cache count exceeded.")
self.remotelyCachedLUIDs[puid] = luid
# This table may not be necessary -- for now, it's to make sure that no
# monkey business happens with id(instance)
self.remotelyCachedObjects[luid] = Local(instance, self.serializingPerspective)
return luid
def cacheLocally(self, cid, instance):
"""(internal)
Store a non-filled-out cached instance locally.
"""
self.locallyCachedObjects[cid] = instance
def cachedLocallyAs(self, cid):
instance = self.locallyCachedObjects[cid]
return instance
def serialize(self, object, perspective=None, method=None, args=None, kw=None):
"""Jelly an object according to the remote security rules for this broker.
"""
if isinstance(object, defer.Deferred):
object.addCallbacks(self.serialize, lambda x: x,
callbackKeywords={
'perspective': perspective,
'method': method,
'args': args,
'kw': kw
})
return object
# XXX This call is NOT REENTRANT and testing for reentrancy is just
# crazy, so it likely won't be. Don't ever write methods that call the
# broker's serialize() method recursively (e.g. sending a method call
# from within a getState (this causes concurrency problems anyway so
# you really, really shouldn't do it))
# self.jellier = _NetJellier(self)
self.serializingPerspective = perspective
self.jellyMethod = method
self.jellyArgs = args
self.jellyKw = kw
try:
return jelly(object, self.security, None, self)
finally:
self.serializingPerspective = None
self.jellyMethod = None
self.jellyArgs = None
self.jellyKw = None
def unserialize(self, sexp, perspective = None):
"""Unjelly an sexp according to the local security rules for this broker.
"""
self.unserializingPerspective = perspective
try:
return unjelly(sexp, self.security, None, self)
finally:
self.unserializingPerspective = None
def newLocalID(self):
"""Generate a new LUID.
"""
self.currentLocalID = self.currentLocalID + 1
return self.currentLocalID
def newRequestID(self):
"""Generate a new request ID.
"""
self.currentRequestID = self.currentRequestID + 1
return self.currentRequestID
def _sendMessage(self, prefix, perspective, objectID, message, args, kw):
pbc = None
pbe = None
answerRequired = 1
if kw.has_key('pbcallback'):
pbc = kw['pbcallback']
del kw['pbcallback']
if kw.has_key('pberrback'):
pbe = kw['pberrback']
del kw['pberrback']
if kw.has_key('pbanswer'):
assert (not pbe) and (not pbc), "You can't specify a no-answer requirement."
answerRequired = kw['pbanswer']
del kw['pbanswer']
if self.disconnected:
raise DeadReferenceError("Calling Stale Broker")
try:
netArgs = self.serialize(args, perspective=perspective, method=message)
netKw = self.serialize(kw, perspective=perspective, method=message)
except:
return defer.fail(failure.Failure())
requestID = self.newRequestID()
if answerRequired:
rval = defer.Deferred()
self.waitingForAnswers[requestID] = rval
if pbc or pbe:
log.msg('warning! using deprecated "pbcallback"')
rval.addCallbacks(pbc, pbe)
else:
rval = None
self.sendCall(prefix+"message", requestID, objectID, message, answerRequired, netArgs, netKw)
return rval
def proto_message(self, requestID, objectID, message, answerRequired, netArgs, netKw):
self._recvMessage(self.localObjectForID, requestID, objectID, message, answerRequired, netArgs, netKw)
def proto_cachemessage(self, requestID, objectID, message, answerRequired, netArgs, netKw):
self._recvMessage(self.cachedLocallyAs, requestID, objectID, message, answerRequired, netArgs, netKw)
def _recvMessage(self, findObjMethod, requestID, objectID, message, answerRequired, netArgs, netKw):
"""Received a message-send.
Look up message based on object, unserialize the arguments, and
invoke it with args, and send an 'answer' or 'error' response.
"""
try:
object = findObjMethod(objectID)
if object is None:
raise Error("Invalid Object ID")
netResult = object.remoteMessageReceived(self, message, netArgs, netKw)
except Error, e:
if answerRequired:
# If the error is Jellyable or explicitly allowed via our
# security options, send it back and let the code on the
# other end deal with unjellying. If it isn't Jellyable,
# wrap it in a CopyableFailure, which ensures it can be
# unjellied on the other end. We have to do this because
# all errors must be sent back.
if isinstance(e, Jellyable) or self.security.isClassAllowed(e.__class__):
self._sendError(e, requestID)
else:
self._sendError(CopyableFailure(e), requestID)
except:
if answerRequired:
log.msg("Peer will receive following PB traceback:", isError=True)
f = CopyableFailure()
self._sendError(f, requestID)
log.err()
else:
if answerRequired:
if isinstance(netResult, defer.Deferred):
args = (requestID,)
netResult.addCallbacks(self._sendAnswer, self._sendFailureOrError,
callbackArgs=args, errbackArgs=args)
# XXX Should this be done somewhere else?
else:
self._sendAnswer(netResult, requestID)
##
# success
##
def _sendAnswer(self, netResult, requestID):
"""(internal) Send an answer to a previously sent message.
"""
self.sendCall("answer", requestID, netResult)
def proto_answer(self, requestID, netResult):
"""(internal) Got an answer to a previously sent message.
Look up the appropriate callback and call it.
"""
d = self.waitingForAnswers[requestID]
del self.waitingForAnswers[requestID]
d.callback(self.unserialize(netResult))
##
# failure
##
def _sendFailureOrError(self, fail, requestID):
"""
Call L{_sendError} or L{_sendFailure}, depending on whether C{fail}
represents an L{Error} subclass or not.
"""
if fail.check(Error) is None:
self._sendFailure(fail, requestID)
else:
self._sendError(fail, requestID)
def _sendFailure(self, fail, requestID):
"""Log error and then send it."""
log.msg("Peer will receive following PB traceback:")
log.err(fail)
self._sendError(fail, requestID)
def _sendError(self, fail, requestID):
"""(internal) Send an error for a previously sent message.
"""
if isinstance(fail, failure.Failure):
# If the failures value is jellyable or allowed through security,
# send the value
if (isinstance(fail.value, Jellyable) or
self.security.isClassAllowed(fail.value.__class__)):
fail = fail.value
elif not isinstance(fail, CopyableFailure):
fail = failure2Copyable(fail, self.factory.unsafeTracebacks)
if isinstance(fail, CopyableFailure):
fail.unsafeTracebacks = self.factory.unsafeTracebacks
self.sendCall("error", requestID, self.serialize(fail))
def proto_error(self, requestID, fail):
"""(internal) Deal with an error.
"""
d = self.waitingForAnswers[requestID]
del self.waitingForAnswers[requestID]
d.errback(self.unserialize(fail))
##
# refcounts
##
def sendDecRef(self, objectID):
"""(internal) Send a DECREF directive.
"""
self.sendCall("decref", objectID)
def proto_decref(self, objectID):
"""(internal) Decrement the reference count of an object.
If the reference count is zero, it will free the reference to this
object.
"""
refs = self.localObjects[objectID].decref()
if refs == 0:
puid = self.localObjects[objectID].object.processUniqueID()
del self.luids[puid]
del self.localObjects[objectID]
self._localCleanup.pop(puid, lambda: None)()
##
# caching
##
def decCacheRef(self, objectID):
"""(internal) Send a DECACHE directive.
"""
self.sendCall("decache", objectID)
def proto_decache(self, objectID):
"""(internal) Decrement the reference count of a cached object.
If the reference count is zero, free the reference, then send an
'uncached' directive.
"""
refs = self.remotelyCachedObjects[objectID].decref()
# log.msg('decaching: %s #refs: %s' % (objectID, refs))
if refs == 0:
lobj = self.remotelyCachedObjects[objectID]
cacheable = lobj.object
perspective = lobj.perspective
# TODO: force_decache needs to be able to force-invalidate a
# cacheable reference.
try:
cacheable.stoppedObserving(perspective, RemoteCacheObserver(self, cacheable, perspective))
except:
log.deferr()
puid = cacheable.processUniqueID()
del self.remotelyCachedLUIDs[puid]
del self.remotelyCachedObjects[objectID]
self.sendCall("uncache", objectID)
def proto_uncache(self, objectID):
"""(internal) Tell the client it is now OK to uncache an object.
"""
# log.msg("uncaching locally %d" % objectID)
obj = self.locallyCachedObjects[objectID]
obj.broker = None
## def reallyDel(obj=obj):
## obj.__really_del__()
## obj.__del__ = reallyDel
del self.locallyCachedObjects[objectID]
def respond(challenge, password):
"""Respond to a challenge.
This is useful for challenge/response authentication.
"""
m = md5()
m.update(password)
hashedPassword = m.digest()
m = md5()
m.update(hashedPassword)
m.update(challenge)
doubleHashedPassword = m.digest()
return doubleHashedPassword
def challenge():
"""I return some random data."""
crap = ''
for x in range(random.randrange(15,25)):
crap = crap + chr(random.randint(65,90))
crap = md5(crap).digest()
return crap
class PBClientFactory(protocol.ClientFactory):
"""
Client factory for PB brokers.
As with all client factories, use with reactor.connectTCP/SSL/etc..
getPerspective and getRootObject can be called either before or
after the connect.
"""
protocol = Broker
unsafeTracebacks = False
def __init__(self, unsafeTracebacks=False, security=globalSecurity):
"""
@param unsafeTracebacks: if set, tracebacks for exceptions will be sent
over the wire.
@type unsafeTracebacks: C{bool}
@param security: security options used by the broker, default to
C{globalSecurity}.
@type security: L{twisted.spread.jelly.SecurityOptions}
"""
self.unsafeTracebacks = unsafeTracebacks
self.security = security
self._reset()
def buildProtocol(self, addr):
"""
Build the broker instance, passing the security options to it.
"""
p = self.protocol(isClient=True, security=self.security)
p.factory = self
return p
def _reset(self):
self.rootObjectRequests = [] # list of deferred
self._broker = None
self._root = None
def _failAll(self, reason):
deferreds = self.rootObjectRequests
self._reset()
for d in deferreds:
d.errback(reason)
def clientConnectionFailed(self, connector, reason):
self._failAll(reason)
def clientConnectionLost(self, connector, reason, reconnecting=0):
"""Reconnecting subclasses should call with reconnecting=1."""
if reconnecting:
# any pending requests will go to next connection attempt
# so we don't fail them.
self._broker = None
self._root = None
else:
self._failAll(reason)
def clientConnectionMade(self, broker):
self._broker = broker
self._root = broker.remoteForName("root")
ds = self.rootObjectRequests
self.rootObjectRequests = []
for d in ds:
d.callback(self._root)
def getRootObject(self):
"""Get root object of remote PB server.
@return: Deferred of the root object.
"""
if self._broker and not self._broker.disconnected:
return defer.succeed(self._root)
d = defer.Deferred()
self.rootObjectRequests.append(d)
return d
def disconnect(self):
"""If the factory is connected, close the connection.
Note that if you set up the factory to reconnect, you will need to
implement extra logic to prevent automatic reconnection after this
is called.
"""
if self._broker:
self._broker.transport.loseConnection()
def _cbSendUsername(self, root, username, password, client):
return root.callRemote("login", username).addCallback(
self._cbResponse, password, client)
def _cbResponse(self, (challenge, challenger), password, client):
return challenger.callRemote("respond", respond(challenge, password), client)
def _cbLoginAnonymous(self, root, client):
"""
Attempt an anonymous login on the given remote root object.
@type root: L{RemoteReference}
@param root: The object on which to attempt the login, most likely
returned by a call to L{PBClientFactory.getRootObject}.
@param client: A jellyable object which will be used as the I{mind}
parameter for the login attempt.
@rtype: L{Deferred}
@return: A L{Deferred} which will be called back with a
L{RemoteReference} to an avatar when anonymous login succeeds, or
which will errback if anonymous login fails.
"""
return root.callRemote("loginAnonymous", client)
def login(self, credentials, client=None):
"""
Login and get perspective from remote PB server.
Currently the following credentials are supported::
L{twisted.cred.credentials.IUsernamePassword}
L{twisted.cred.credentials.IAnonymous}
@rtype: L{Deferred}
@return: A L{Deferred} which will be called back with a
L{RemoteReference} for the avatar logged in to, or which will
errback if login fails.
"""
d = self.getRootObject()
if IAnonymous.providedBy(credentials):
d.addCallback(self._cbLoginAnonymous, client)
else:
d.addCallback(
self._cbSendUsername, credentials.username,
credentials.password, client)
return d
class PBServerFactory(protocol.ServerFactory):
"""
Server factory for perspective broker.
Login is done using a Portal object, whose realm is expected to return
avatars implementing IPerspective. The credential checkers in the portal
should accept IUsernameHashedPassword or IUsernameMD5Password.
Alternatively, any object providing or adaptable to L{IPBRoot} can be
used instead of a portal to provide the root object of the PB server.
"""
unsafeTracebacks = False
# object broker factory
protocol = Broker
def __init__(self, root, unsafeTracebacks=False, security=globalSecurity):
"""
@param root: factory providing the root Referenceable used by the broker.
@type root: object providing or adaptable to L{IPBRoot}.
@param unsafeTracebacks: if set, tracebacks for exceptions will be sent
over the wire.
@type unsafeTracebacks: C{bool}
@param security: security options used by the broker, default to
C{globalSecurity}.
@type security: L{twisted.spread.jelly.SecurityOptions}
"""
self.root = IPBRoot(root)
self.unsafeTracebacks = unsafeTracebacks
self.security = security
def buildProtocol(self, addr):
"""
Return a Broker attached to the factory (as the service provider).
"""
proto = self.protocol(isClient=False, security=self.security)
proto.factory = self
proto.setNameForLocal("root", self.root.rootObject(proto))
return proto
def clientConnectionMade(self, protocol):
# XXX does this method make any sense?
pass
class IUsernameMD5Password(ICredentials):
"""
I encapsulate a username and a hashed password.
This credential is used for username/password over PB. CredentialCheckers
which check this kind of credential must store the passwords in plaintext
form or as a MD5 digest.
@type username: C{str} or C{Deferred}
@ivar username: The username associated with these credentials.
"""
def checkPassword(password):
"""
Validate these credentials against the correct password.
@type password: C{str}
@param password: The correct, plaintext password against which to
check.
@rtype: C{bool} or L{Deferred}
@return: C{True} if the credentials represented by this object match the
given password, C{False} if they do not, or a L{Deferred} which will
be called back with one of these values.
"""
def checkMD5Password(password):
"""
Validate these credentials against the correct MD5 digest of the
password.
@type password: C{str}
@param password: The correct MD5 digest of a password against which to
check.
@rtype: C{bool} or L{Deferred}
@return: C{True} if the credentials represented by this object match the
given digest, C{False} if they do not, or a L{Deferred} which will
be called back with one of these values.
"""
class _PortalRoot:
"""Root object, used to login to portal."""
implements(IPBRoot)
def __init__(self, portal):
self.portal = portal
def rootObject(self, broker):
return _PortalWrapper(self.portal, broker)
registerAdapter(_PortalRoot, Portal, IPBRoot)
class _JellyableAvatarMixin:
"""
Helper class for code which deals with avatars which PB must be capable of
sending to a peer.
"""
def _cbLogin(self, (interface, avatar, logout)):
"""
Ensure that the avatar to be returned to the client is jellyable and
set up disconnection notification to call the realm's logout object.
"""
if not IJellyable.providedBy(avatar):
avatar = AsReferenceable(avatar, "perspective")
puid = avatar.processUniqueID()
def dereferenceLogout():
self.broker.dontNotifyOnDisconnect(logout)
logout()
self.broker._localCleanup[puid] = dereferenceLogout
# No special helper function is necessary for notifyOnDisconnect
# because dereference callbacks won't be invoked if the connection is
# randomly dropped. I'm not sure those are ideal semantics, but this
# is the only user of the (private) API at the moment and it works just
# fine as things are. -exarkun
self.broker.notifyOnDisconnect(logout)
return avatar
class _PortalWrapper(Referenceable, _JellyableAvatarMixin):
"""
Root Referenceable object, used to login to portal.
"""
def __init__(self, portal, broker):
self.portal = portal
self.broker = broker
def remote_login(self, username):
"""
Start of username/password login.
"""
c = challenge()
return c, _PortalAuthChallenger(self.portal, self.broker, username, c)
def remote_loginAnonymous(self, mind):
"""
Attempt an anonymous login.
@param mind: An object to use as the mind parameter to the portal login
call (possibly None).
@rtype: L{Deferred}
@return: A Deferred which will be called back with an avatar when login
succeeds or which will be errbacked if login fails somehow.
"""
d = self.portal.login(Anonymous(), mind, IPerspective)
d.addCallback(self._cbLogin)
return d
class _PortalAuthChallenger(Referenceable, _JellyableAvatarMixin):
"""
Called with response to password challenge.
"""
implements(IUsernameHashedPassword, IUsernameMD5Password)
def __init__(self, portal, broker, username, challenge):
self.portal = portal
self.broker = broker
self.username = username
self.challenge = challenge
def remote_respond(self, response, mind):
self.response = response
d = self.portal.login(self, mind, IPerspective)
d.addCallback(self._cbLogin)
return d
# IUsernameHashedPassword:
def checkPassword(self, password):
return self.checkMD5Password(md5(password).digest())
# IUsernameMD5Password
def checkMD5Password(self, md5Password):
md = md5()
md.update(md5Password)
md.update(self.challenge)
correct = md.digest()
return self.response == correct
__all__ = [
# Everything from flavors is exposed publically here.
'IPBRoot', 'Serializable', 'Referenceable', 'NoSuchMethod', 'Root',
'ViewPoint', 'Viewable', 'Copyable', 'Jellyable', 'Cacheable',
'RemoteCopy', 'RemoteCache', 'RemoteCacheObserver', 'copyTags',
'setUnjellyableForClass', 'setUnjellyableFactoryForClass',
'setUnjellyableForClassTree',
'MAX_BROKER_REFS', 'portno',
'ProtocolError', 'DeadReferenceError', 'Error', 'PBConnectionLost',
'RemoteMethod', 'IPerspective', 'Avatar', 'AsReferenceable',
'RemoteReference', 'CopyableFailure', 'CopiedFailure', 'failure2Copyable',
'Broker', 'respond', 'challenge', 'PBClientFactory', 'PBServerFactory',
'IUsernameMD5Password',
]
| agpl-3.0 | 7,459,785,503,295,452,000 | 8,879,763,938,429,603,000 | 33.171615 | 110 | 0.630947 | false |
aviciimaxwell/odoo | addons/mrp_byproduct/__openerp__.py | 259 | 1819 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'MRP Byproducts',
'version': '1.0',
'category': 'Manufacturing',
'description': """
This module allows you to produce several products from one production order.
=============================================================================
You can configure by-products in the bill of material.
Without this module:
--------------------
A + B + C -> D
With this module:
-----------------
A + B + C -> D + E
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/manufacturing',
'depends': ['base', 'mrp'],
'data': [
'security/ir.model.access.csv',
'mrp_byproduct_view.xml'
],
'demo': [],
'test': ['test/mrp_byproduct.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,089,245,004,454,743,000 | -5,719,715,806,289,708,000 | 33.320755 | 78 | 0.560198 | false |
areitz/pants | contrib/go/src/python/pants/contrib/go/tasks/go_test.py | 10 | 2311 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.contrib.go.tasks.go_workspace_task import GoWorkspaceTask
class GoTest(GoWorkspaceTask):
"""Runs `go test` on Go packages.
To run a library's tests, GoTest only requires a Go workspace to be initialized
(see GoWorkspaceTask) with links to necessary source files. It does not require
GoCompile to first compile the library to be tested -- in fact, GoTest will ignore
any binaries in "$GOPATH/pkg/", because Go test files (which live in the package
they are testing) are ignored in normal compilation, so Go test must compile everything
from scratch.
"""
@classmethod
def register_options(cls, register):
super(GoTest, cls).register_options(register)
register('--remote', action='store_true',
help='Enables running tests found in go_remote_libraries.')
register('--build-and-test-flags', default='',
help='Flags to pass in to `go test` tool.')
@classmethod
def supports_passthru_args(cls):
return True
def execute(self):
# Only executes the tests from the package specified by the target roots, so
# we don't run the tests for _all_ dependencies of said package.
targets = filter(self.is_go if self.get_options().remote else self.is_local_src,
self.context.target_roots)
for target in targets:
self.ensure_workspace(target)
self._go_test(target)
def _go_test(self, target):
args = (self.get_options().build_and_test_flags.split()
+ [target.import_path]
+ self.get_passthru_args())
result, go_cmd = self.go_dist.execute_go_cmd('test', gopath=self.get_gopath(target), args=args,
workunit_factory=self.context.new_workunit,
workunit_labels=[WorkUnitLabel.TEST])
if result != 0:
raise TaskError('{} failed with exit code {}'.format(go_cmd, result))
| apache-2.0 | -4,085,857,194,136,753,000 | -1,614,572,911,853,003,500 | 41.796296 | 99 | 0.671571 | false |
rusty1s/embedded_gcnn | cifar_graph.py | 1 | 3650 | from lib.datasets import Cifar10 as Data
from lib.model import Model as BaseModel, generate_placeholders, train
from lib.segmentation import extract_features_fixed
# from lib.segmentation import slic_fixed
from lib.segmentation import quickshift_fixed
from lib.pipeline import preprocess_pipeline_fixed
from lib.layer import EmbeddedGCNN as Conv, MaxPool, AveragePool, FC
# SLIC_FEATURES = [0, 2, 5, 7, 8, 9, 19, 21, 22]
QUICKSHIFT_FEATURES = [2, 3, 4, 5, 7, 8, 19, 21, 22]
DATA_DIR = 'data/cifar_10'
PREPROCESS_FIRST = None
# PREPROCESS_FIRST = 'data/cifar_10/slic'
# PREPROCESS_FIRST = 'data/cifar_10/quickshift'
LEVELS = 4
CONNECTIVITY = 8
SCALE_INVARIANCE = False
STDDEV = 1
LEARNING_RATE = 0.001
NUM_STEPS_PER_DECAY = 5000
TRAIN_DIR = None
# LOG_DIR = 'data/summaries/cifar_slic_graph'
LOG_DIR = 'data/summaries/cifar_quickshift_graph'
AUGMENT_TRAIN_EXAMPLES = True
DROPOUT = 0.5
BATCH_SIZE = 64
MAX_STEPS = 60000
DISPLAY_STEP = 10
# FORM_FEATURES = SLIC_FEATURES
FORM_FEATURES = QUICKSHIFT_FEATURES
NUM_FEATURES = len(FORM_FEATURES) + 3
data = Data(DATA_DIR)
# segmentation_algorithm = slic_fixed(
# num_segments=200, compactness=5, max_iterations=10, sigma=0)
segmentation_algorithm = quickshift_fixed(
ratio=1, kernel_size=1, max_dist=5, sigma=0)
feature_extraction_algorithm = extract_features_fixed(FORM_FEATURES)
preprocess_algorithm = preprocess_pipeline_fixed(
segmentation_algorithm, feature_extraction_algorithm, LEVELS, CONNECTIVITY,
SCALE_INVARIANCE, STDDEV)
class Model(BaseModel):
def _build(self):
conv_1_1 = Conv(
NUM_FEATURES,
64,
adjs_dist=self.placeholders['adj_dist_1'],
adjs_rad=self.placeholders['adj_rad_1'],
logging=self.logging)
conv_1_2 = Conv(
64,
64,
adjs_dist=self.placeholders['adj_dist_1'],
adjs_rad=self.placeholders['adj_rad_1'],
logging=self.logging)
max_pool_1 = MaxPool(size=2)
conv_2 = Conv(
64,
128,
adjs_dist=self.placeholders['adj_dist_2'],
adjs_rad=self.placeholders['adj_rad_2'],
logging=self.logging)
max_pool_2 = MaxPool(size=2)
conv_3 = Conv(
128,
256,
adjs_dist=self.placeholders['adj_dist_3'],
adjs_rad=self.placeholders['adj_rad_3'],
logging=self.logging)
max_pool_3 = MaxPool(size=2)
conv_4 = Conv(
256,
512,
adjs_dist=self.placeholders['adj_dist_4'],
adjs_rad=self.placeholders['adj_rad_4'],
logging=self.logging)
average_pool = AveragePool()
fc_1 = FC(512, 256, weight_decay=0.004, logging=self.logging)
fc_2 = FC(256, 128, weight_decay=0.004, logging=self.logging)
fc_3 = FC(
128,
data.num_classes,
act=lambda x: x,
bias=False,
dropout=self.placeholders['dropout'],
logging=self.logging)
self.layers = [
conv_1_1, conv_1_2, max_pool_1, conv_2, max_pool_2, conv_3,
max_pool_3, conv_4, average_pool, fc_1, fc_2, fc_3
]
placeholders = generate_placeholders(BATCH_SIZE, LEVELS, NUM_FEATURES,
data.num_classes)
model = Model(
placeholders=placeholders,
learning_rate=LEARNING_RATE,
num_steps_per_decay=NUM_STEPS_PER_DECAY,
train_dir=TRAIN_DIR,
log_dir=LOG_DIR)
train(model, data, preprocess_algorithm, BATCH_SIZE, DROPOUT,
AUGMENT_TRAIN_EXAMPLES, MAX_STEPS, PREPROCESS_FIRST, DISPLAY_STEP)
| mit | 2,392,344,673,240,428,000 | -262,852,904,102,243,650 | 30.73913 | 79 | 0.62411 | false |
centricular/cerbero | cerbero/commands/runit.py | 9 | 1586 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
from cerbero.commands import Command, register_command
from cerbero.utils import _, N_, ArgparseArgument, shell
class Run(Command):
doc = N_('Runs a command in the cerbero shell')
name = 'run'
def __init__(self):
Command.__init__(self,
[ArgparseArgument('cmd', nargs='+',
help=_('command to run')),
ArgparseArgument('-v', '--verbose',
action='store_true',
default=False,
help=_('verbose mode'))
])
def run(self, config, args):
command = ' '.join(args.cmd)
shell.call(command, '.', True, args.verbose)
register_command(Run)
| lgpl-2.1 | -5,599,004,135,729,330,000 | 1,845,865,533,576,427,300 | 35.883721 | 67 | 0.648172 | false |
khagler/boto | boto/glacier/writer.py | 153 | 9668 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
# Copyright (c) 2012 Robie Basak <[email protected]>
# Tree hash implementation from Aaron Brady [email protected]
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import hashlib
from boto.glacier.utils import chunk_hashes, tree_hash, bytes_to_hex
# This import is provided for backwards compatibility. This function is
# now in boto.glacier.utils, but any existing code can still import
# this directly from this module.
from boto.glacier.utils import compute_hashes_from_fileobj
_ONE_MEGABYTE = 1024 * 1024
class _Partitioner(object):
"""Convert variable-size writes into part-sized writes
Call write(data) with variable sized data as needed to write all data. Call
flush() after all data is written.
This instance will call send_fn(part_data) as needed in part_size pieces,
except for the final part which may be shorter than part_size. Make sure to
call flush() to ensure that a short final part results in a final send_fn
call.
"""
def __init__(self, part_size, send_fn):
self.part_size = part_size
self.send_fn = send_fn
self._buffer = []
self._buffer_size = 0
def write(self, data):
if data == b'':
return
self._buffer.append(data)
self._buffer_size += len(data)
while self._buffer_size > self.part_size:
self._send_part()
def _send_part(self):
data = b''.join(self._buffer)
# Put back any data remaining over the part size into the
# buffer
if len(data) > self.part_size:
self._buffer = [data[self.part_size:]]
self._buffer_size = len(self._buffer[0])
else:
self._buffer = []
self._buffer_size = 0
# The part we will send
part = data[:self.part_size]
self.send_fn(part)
def flush(self):
if self._buffer_size > 0:
self._send_part()
class _Uploader(object):
"""Upload to a Glacier upload_id.
Call upload_part for each part (in any order) and then close to complete
the upload.
"""
def __init__(self, vault, upload_id, part_size, chunk_size=_ONE_MEGABYTE):
self.vault = vault
self.upload_id = upload_id
self.part_size = part_size
self.chunk_size = chunk_size
self.archive_id = None
self._uploaded_size = 0
self._tree_hashes = []
self.closed = False
def _insert_tree_hash(self, index, raw_tree_hash):
list_length = len(self._tree_hashes)
if index >= list_length:
self._tree_hashes.extend([None] * (list_length - index + 1))
self._tree_hashes[index] = raw_tree_hash
def upload_part(self, part_index, part_data):
"""Upload a part to Glacier.
:param part_index: part number where 0 is the first part
:param part_data: data to upload corresponding to this part
"""
if self.closed:
raise ValueError("I/O operation on closed file")
# Create a request and sign it
part_tree_hash = tree_hash(chunk_hashes(part_data, self.chunk_size))
self._insert_tree_hash(part_index, part_tree_hash)
hex_tree_hash = bytes_to_hex(part_tree_hash)
linear_hash = hashlib.sha256(part_data).hexdigest()
start = self.part_size * part_index
content_range = (start,
(start + len(part_data)) - 1)
response = self.vault.layer1.upload_part(self.vault.name,
self.upload_id,
linear_hash,
hex_tree_hash,
content_range, part_data)
response.read()
self._uploaded_size += len(part_data)
def skip_part(self, part_index, part_tree_hash, part_length):
"""Skip uploading of a part.
The final close call needs to calculate the tree hash and total size
of all uploaded data, so this is the mechanism for resume
functionality to provide it without actually uploading the data again.
:param part_index: part number where 0 is the first part
:param part_tree_hash: binary tree_hash of part being skipped
:param part_length: length of part being skipped
"""
if self.closed:
raise ValueError("I/O operation on closed file")
self._insert_tree_hash(part_index, part_tree_hash)
self._uploaded_size += part_length
def close(self):
if self.closed:
return
if None in self._tree_hashes:
raise RuntimeError("Some parts were not uploaded.")
# Complete the multiplart glacier upload
hex_tree_hash = bytes_to_hex(tree_hash(self._tree_hashes))
response = self.vault.layer1.complete_multipart_upload(
self.vault.name, self.upload_id, hex_tree_hash,
self._uploaded_size)
self.archive_id = response['ArchiveId']
self.closed = True
def generate_parts_from_fobj(fobj, part_size):
data = fobj.read(part_size)
while data:
yield data.encode('utf-8')
data = fobj.read(part_size)
def resume_file_upload(vault, upload_id, part_size, fobj, part_hash_map,
chunk_size=_ONE_MEGABYTE):
"""Resume upload of a file already part-uploaded to Glacier.
The resumption of an upload where the part-uploaded section is empty is a
valid degenerate case that this function can handle. In this case,
part_hash_map should be an empty dict.
:param vault: boto.glacier.vault.Vault object.
:param upload_id: existing Glacier upload id of upload being resumed.
:param part_size: part size of existing upload.
:param fobj: file object containing local data to resume. This must read
from the start of the entire upload, not just from the point being
resumed. Use fobj.seek(0) to achieve this if necessary.
:param part_hash_map: {part_index: part_tree_hash, ...} of data already
uploaded. Each supplied part_tree_hash will be verified and the part
re-uploaded if there is a mismatch.
:param chunk_size: chunk size of tree hash calculation. This must be
1 MiB for Amazon.
"""
uploader = _Uploader(vault, upload_id, part_size, chunk_size)
for part_index, part_data in enumerate(
generate_parts_from_fobj(fobj, part_size)):
part_tree_hash = tree_hash(chunk_hashes(part_data, chunk_size))
if (part_index not in part_hash_map or
part_hash_map[part_index] != part_tree_hash):
uploader.upload_part(part_index, part_data)
else:
uploader.skip_part(part_index, part_tree_hash, len(part_data))
uploader.close()
return uploader.archive_id
class Writer(object):
"""
Presents a file-like object for writing to a Amazon Glacier
Archive. The data is written using the multi-part upload API.
"""
def __init__(self, vault, upload_id, part_size, chunk_size=_ONE_MEGABYTE):
self.uploader = _Uploader(vault, upload_id, part_size, chunk_size)
self.partitioner = _Partitioner(part_size, self._upload_part)
self.closed = False
self.next_part_index = 0
def write(self, data):
if self.closed:
raise ValueError("I/O operation on closed file")
self.partitioner.write(data)
def _upload_part(self, part_data):
self.uploader.upload_part(self.next_part_index, part_data)
self.next_part_index += 1
def close(self):
if self.closed:
return
self.partitioner.flush()
self.uploader.close()
self.closed = True
def get_archive_id(self):
self.close()
return self.uploader.archive_id
@property
def current_tree_hash(self):
"""
Returns the current tree hash for the data that's been written
**so far**.
Only once the writing is complete is the final tree hash returned.
"""
return tree_hash(self.uploader._tree_hashes)
@property
def current_uploaded_size(self):
"""
Returns the current uploaded size for the data that's been written
**so far**.
Only once the writing is complete is the final uploaded size returned.
"""
return self.uploader._uploaded_size
@property
def upload_id(self):
return self.uploader.upload_id
@property
def vault(self):
return self.uploader.vault
| mit | -8,544,248,438,077,588,000 | -4,854,787,292,416,223,000 | 35.900763 | 79 | 0.63343 | false |
danielvdende/incubator-airflow | airflow/contrib/sensors/cassandra_table_sensor.py | 8 | 2323 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.contrib.hooks.cassandra_hook import CassandraHook
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class CassandraTableSensor(BaseSensorOperator):
"""
Checks for the existence of a table in a Cassandra cluster.
For example, if you want to wait for a table called 't' to be created
in a keyspace 'k', instantiate it as follows:
>>> cassandra_sensor = CassandraTableSensor(table="k.t",
... cassandra_conn_id="cassandra_default",
... task_id="cassandra_sensor")
"""
template_fields = ('table',)
@apply_defaults
def __init__(self, table, cassandra_conn_id, *args, **kwargs):
"""
Create a new CassandraTableSensor
:param table: Target Cassandra table.
Use dot notation to target a specific keyspace.
:type table: string
:param cassandra_conn_id: The connection ID to use
when connecting to Cassandra cluster
:type cassandra_conn_id: string
"""
super(CassandraTableSensor, self).__init__(*args, **kwargs)
self.cassandra_conn_id = cassandra_conn_id
self.table = table
def poke(self, context):
self.log.info('Sensor check existence of table: %s', self.table)
hook = CassandraHook(self.cassandra_conn_id)
return hook.table_exists(self.table)
| apache-2.0 | 1,933,232,793,755,558,100 | -5,587,831,125,545,944,000 | 40.482143 | 86 | 0.669393 | false |
rossburton/yocto-autobuilder | lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/manhole/test/test_explorer.py | 42 | 2513 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.manhole.explorer}.
"""
from twisted.trial import unittest
from twisted.manhole.explorer import (
CRUFT_WatchyThingie,
ExplorerImmutable,
Pool,
_WatchMonkey,
)
class Foo:
"""
Test helper.
"""
class PoolTestCase(unittest.TestCase):
"""
Tests for the Pool class.
"""
def test_instanceBuilding(self):
"""
If the object is not in the pool a new instance is created and
returned.
"""
p = Pool()
e = p.getExplorer(123, 'id')
self.assertIsInstance(e, ExplorerImmutable)
self.assertEqual(e.value, 123)
self.assertEqual(e.identifier, 'id')
class CRUFTWatchyThingieTestCase(unittest.TestCase):
"""
Tests for the CRUFT_WatchyThingie class.
"""
def test_watchObjectConstructedClass(self):
"""
L{CRUFT_WatchyThingie.watchObject} changes the class of its
first argument to a custom watching class.
"""
foo = Foo()
cwt = CRUFT_WatchyThingie()
cwt.watchObject(foo, 'id', 'cback')
# check new constructed class
newClassName = foo.__class__.__name__
self.assertEqual(newClassName, "WatchingFoo%X" % (id(foo),))
def test_watchObjectConstructedInstanceMethod(self):
"""
L{CRUFT_WatchyThingie.watchingfoo} adds a C{_watchEmitChanged}
attribute which refers to a bound method on the instance
passed to it.
"""
foo = Foo()
cwt = CRUFT_WatchyThingie()
cwt.watchObject(foo, 'id', 'cback')
# check new constructed instance method
self.assertIdentical(foo._watchEmitChanged.im_self, foo)
class WatchMonkeyTestCase(unittest.TestCase):
"""
Tests for the _WatchMonkey class.
"""
def test_install(self):
"""
When _WatchMonkey is installed on a method, calling that
method calls the _WatchMonkey.
"""
class Foo:
"""
Helper.
"""
def someMethod(self):
"""
Just a method.
"""
foo = Foo()
wm = _WatchMonkey(foo)
wm.install('someMethod')
# patch wm's method to check that the method was exchanged
called = []
wm.__call__ = lambda s: called.append(True)
# call and check
foo.someMethod()
self.assertTrue(called)
| gpl-2.0 | -7,852,348,306,487,913,000 | 2,376,157,176,408,424,400 | 23.637255 | 70 | 0.582969 | false |
thuydang/ocrfeeder | src/ocrfeeder/feeder/imageManipulation.py | 1 | 8701 | # -*- coding: utf-8 -*-
###########################################################################
# OCRFeeder - The complete OCR suite
# Copyright (C) 2009 Joaquim Rocha
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
from ocrfeeder.util.lib import debug
import gettext
from PIL import Image, ImageDraw
import os.path
from ocrfeeder.util import graphics
import sys
_ = gettext.gettext
class ImageProcessor:
def __init__(self, path_to_image,
window_size = None, contrast_tolerance = 120):
self.window_size = window_size
self.contrast_tolerance = contrast_tolerance
error_message = _("A problem occurred while trying to open the image:\n %s\n"
"Ensure the image exists or try converting it to another format.") % path_to_image
if os.path.isfile(path_to_image):
try:
self.original_image = Image.open(path_to_image)
self.black_n_white_image = self.original_image.convert('L')
if not self.window_size:
self.window_size = self.original_image.size[1] / 60.
debug('Window Size: ', self.window_size)
except:
debug(sys.exc_info())
raise ImageManipulationError(error_message)
else:
debug(sys.exc_info())
raise ImageManipulationError(error_message)
self.bg_color = 255
def __windowContrast(self, bgcolor, x, y):
image = self.black_n_white_image
width, height = image.size
image_upper_left_corner_x = x * self.window_size
image_upper_left_corner_y = y * self.window_size
i, j = 1, 1
while j < self.window_size + 1:
if not image_upper_left_corner_y + j < height:
break
while i < self.window_size + 1:
if not image_upper_left_corner_x + i < width:
break
pixel_point = (image_upper_left_corner_x + i,
image_upper_left_corner_y + j)
if graphics.colorsContrast(image.getpixel(pixel_point),
bgcolor,
self.contrast_tolerance):
return 1
i += 3
i = 1
j += 3
return 0
def imageToBinary(self):
image = self.black_n_white_image
binary_info = ['']
width, height = image.size
i, j = 0, 0
while j < height / self.window_size:
while i < width / self.window_size:
binary_info[-1] += str(self.__windowContrast(self.bg_color, i, j))
i += 1
i = 0
binary_info += ['']
j += 1
return binary_info
def divideImageClipInColumns(self, clip_dimensions, column_min_width):
if column_min_width == 0:
return [clip_dimensions]
if column_min_width is None:
column_min_width = int(self.window_size / 2)
clip = self.black_n_white_image.crop(clip_dimensions)
width, height = clip.size
content_column_bounds = self.__getImageContentColumnsBounds(clip,
column_min_width)
x0, y0, x1, y1 = clip_dimensions
column_bounds = []
for i in range(0, len(content_column_bounds), 2):
column_bounds.append((x0 + content_column_bounds[i], y0,
x0 + content_column_bounds[i + 1], y1))
return column_bounds
def __getImageContentColumnsBounds(self, image, column_min_width):
width, height = image.size
column_bounds = []
i = 0
while i < width:
next_step = min(i + column_min_width, width)
slice_bounds = (i, 0, next_step, height)
slice_clip = image.crop(slice_bounds)
has_contrast = self.__imageHasContrast(slice_clip)
if has_contrast:
if not column_bounds:
column_bounds.extend([i, next_step])
elif column_bounds[-1] == i:
column_bounds[-1] = next_step
else:
column_bounds.extend([i, next_step])
i = next_step
return column_bounds
def __imageHasContrast(self, image):
colors = image.getcolors()
has_contrast = True
for color_count in colors:
color = color_count[1]
has_contrast = graphics.colorsContrast(color,
self.bg_color,
self.contrast_tolerance)
if has_contrast:
break
return has_contrast
def adjustImageClipMargins(self, clip_dimensions, margins_min_width):
if margins_min_width == 0:
return clip_dimensions
if margins_min_width is None:
margins_min_width = int(self.window_size / 2)
x0, y0, x1, y1 = clip_dimensions
clip = self.black_n_white_image.crop(clip_dimensions)
left, top, right, bottom = self.__getImageMargins(clip,
margins_min_width)
x0, y0, x1, y1 = x0 + left, y0 + top, x1 - right, y1 - bottom
# Prevent having contents outside of the image's limits
width, height = self.black_n_white_image.size
x1 = min(x1, width)
y1 = min(y1, height)
return x0, y0, x1, y1
def __getImageMargins(self, image, margins_min_width):
width, height = image.size
margins = [0, 0, 0, 0]
# Left margin
i = 0
while i < width - margins_min_width:
clip = image.crop((i, 0, i + margins_min_width, height))
if self.__imageHasContrast(clip):
margins[0] = i
break
i += margins_min_width
# Right margin
i = width
while i > margins_min_width:
clip = image.crop((i - margins_min_width, 0, i, height))
if self.__imageHasContrast(clip):
margins[2] = width - i
break
i -= margins_min_width
# Top margin
i = 0
while i < height - margins_min_width:
clip = image.crop((0, i, width, i + margins_min_width))
if self.__imageHasContrast(clip):
margins[1] = i
break
i += margins_min_width
# Bottom margin
i = height
while i > margins_min_width:
clip = image.crop((0, i - margins_min_width, width, i))
if self.__imageHasContrast(clip):
margins[3] = height - i
break
i -= margins_min_width
return margins
class ContentAnalyser:
def __init__(self, image):
self.image = image
def getHeight(self):
width, height = self.image.size
image_draw = ImageDraw.Draw(self.image)
i = 0
while i+3 < height:
current_line_image = self.image.crop((0, i, width, i + 3))
if len(current_line_image.getcolors()) < 10:
image_draw.rectangle((0, i, width, i + 3), fill = (255, 255, 255))
i += 3
def __getBlankSpaceFromTopToBottom(self, image):
width, height = image.size
i = 0
while i + 2 < height:
current_line_image = image.crop((0, i, width, i + 1))
if len(current_line_image.getcolors()) > 1:
break
i += 2
return i
class ImageManipulationError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class InsuficientPointsForPolygon(Exception):
def __init__(self):
pass
def __str__(self):
return 'Insufficient number of points for polygon. Must be at least three points.'
| gpl-3.0 | -6,195,561,014,522,912,000 | 436,722,718,467,086,340 | 34.806584 | 108 | 0.526951 | false |
neumerance/deploy | .venv/lib/python2.7/site-packages/django/contrib/gis/utils/srs.py | 210 | 3235 | from django.contrib.gis.gdal import SpatialReference
def add_srs_entry(srs, auth_name='EPSG', auth_srid=None, ref_sys_name=None,
database=None):
"""
This function takes a GDAL SpatialReference system and adds its information
to the `spatial_ref_sys` table of the spatial backend. Doing this enables
database-level spatial transformations for the backend. Thus, this utility
is useful for adding spatial reference systems not included by default with
the backend -- for example, the so-called "Google Maps Mercator Projection"
is excluded in PostGIS 1.3 and below, and the following adds it to the
`spatial_ref_sys` table:
>>> from django.contrib.gis.utils import add_srs_entry
>>> add_srs_entry(900913)
Keyword Arguments:
auth_name:
This keyword may be customized with the value of the `auth_name` field.
Defaults to 'EPSG'.
auth_srid:
This keyword may be customized with the value of the `auth_srid` field.
Defaults to the SRID determined by GDAL.
ref_sys_name:
For SpatiaLite users only, sets the value of the `ref_sys_name` field.
Defaults to the name determined by GDAL.
database:
The name of the database connection to use; the default is the value
of `django.db.DEFAULT_DB_ALIAS` (at the time of this writing, it's value
is 'default').
"""
from django.db import connections, DEFAULT_DB_ALIAS
if not database:
database = DEFAULT_DB_ALIAS
connection = connections[database]
if not hasattr(connection.ops, 'spatial_version'):
raise Exception('The `add_srs_entry` utility only works '
'with spatial backends.')
if connection.ops.oracle or connection.ops.mysql:
raise Exception('This utility does not support the '
'Oracle or MySQL spatial backends.')
SpatialRefSys = connection.ops.spatial_ref_sys()
# If argument is not a `SpatialReference` instance, use it as parameter
# to construct a `SpatialReference` instance.
if not isinstance(srs, SpatialReference):
srs = SpatialReference(srs)
if srs.srid is None:
raise Exception('Spatial reference requires an SRID to be '
'compatible with the spatial backend.')
# Initializing the keyword arguments dictionary for both PostGIS
# and SpatiaLite.
kwargs = {'srid' : srs.srid,
'auth_name' : auth_name,
'auth_srid' : auth_srid or srs.srid,
'proj4text' : srs.proj4,
}
# Backend-specific fields for the SpatialRefSys model.
if connection.ops.postgis:
kwargs['srtext'] = srs.wkt
if connection.ops.spatialite:
kwargs['ref_sys_name'] = ref_sys_name or srs.name
# Creating the spatial_ref_sys model.
try:
# Try getting via SRID only, because using all kwargs may
# differ from exact wkt/proj in database.
sr = SpatialRefSys.objects.using(database).get(srid=srs.srid)
except SpatialRefSys.DoesNotExist:
sr = SpatialRefSys.objects.using(database).create(**kwargs)
# Alias is for backwards-compatibility purposes.
add_postgis_srs = add_srs_entry
| apache-2.0 | -1,009,726,651,045,979,900 | 3,110,178,078,566,902,000 | 39.4375 | 79 | 0.66677 | false |
idea4bsd/idea4bsd | plugins/hg4idea/testData/bin/hgext/color.py | 90 | 19590 | # color.py color output for the status and qseries commands
#
# Copyright (C) 2007 Kevin Christen <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''colorize output from some commands
This extension modifies the status and resolve commands to add color
to their output to reflect file status, the qseries command to add
color to reflect patch status (applied, unapplied, missing), and to
diff-related commands to highlight additions, removals, diff headers,
and trailing whitespace.
Other effects in addition to color, like bold and underlined text, are
also available. By default, the terminfo database is used to find the
terminal codes used to change color and effect. If terminfo is not
available, then effects are rendered with the ECMA-48 SGR control
function (aka ANSI escape codes).
Default effects may be overridden from your configuration file::
[color]
status.modified = blue bold underline red_background
status.added = green bold
status.removed = red bold blue_background
status.deleted = cyan bold underline
status.unknown = magenta bold underline
status.ignored = black bold
# 'none' turns off all effects
status.clean = none
status.copied = none
qseries.applied = blue bold underline
qseries.unapplied = black bold
qseries.missing = red bold
diff.diffline = bold
diff.extended = cyan bold
diff.file_a = red bold
diff.file_b = green bold
diff.hunk = magenta
diff.deleted = red
diff.inserted = green
diff.changed = white
diff.trailingwhitespace = bold red_background
resolve.unresolved = red bold
resolve.resolved = green bold
bookmarks.current = green
branches.active = none
branches.closed = black bold
branches.current = green
branches.inactive = none
tags.normal = green
tags.local = black bold
The available effects in terminfo mode are 'blink', 'bold', 'dim',
'inverse', 'invisible', 'italic', 'standout', and 'underline'; in
ECMA-48 mode, the options are 'bold', 'inverse', 'italic', and
'underline'. How each is rendered depends on the terminal emulator.
Some may not be available for a given terminal type, and will be
silently ignored.
Note that on some systems, terminfo mode may cause problems when using
color with the pager extension and less -R. less with the -R option
will only display ECMA-48 color codes, and terminfo mode may sometimes
emit codes that less doesn't understand. You can work around this by
either using ansi mode (or auto mode), or by using less -r (which will
pass through all terminal control codes, not just color control
codes).
Because there are only eight standard colors, this module allows you
to define color names for other color slots which might be available
for your terminal type, assuming terminfo mode. For instance::
color.brightblue = 12
color.pink = 207
color.orange = 202
to set 'brightblue' to color slot 12 (useful for 16 color terminals
that have brighter colors defined in the upper eight) and, 'pink' and
'orange' to colors in 256-color xterm's default color cube. These
defined colors may then be used as any of the pre-defined eight,
including appending '_background' to set the background to that color.
By default, the color extension will use ANSI mode (or win32 mode on
Windows) if it detects a terminal. To override auto mode (to enable
terminfo mode, for example), set the following configuration option::
[color]
mode = terminfo
Any value other than 'ansi', 'win32', 'terminfo', or 'auto' will
disable color.
'''
import os
from mercurial import commands, dispatch, extensions, ui as uimod, util
from mercurial import templater, error
from mercurial.i18n import _
testedwith = 'internal'
# start and stop parameters for effects
_effects = {'none': 0, 'black': 30, 'red': 31, 'green': 32, 'yellow': 33,
'blue': 34, 'magenta': 35, 'cyan': 36, 'white': 37, 'bold': 1,
'italic': 3, 'underline': 4, 'inverse': 7,
'black_background': 40, 'red_background': 41,
'green_background': 42, 'yellow_background': 43,
'blue_background': 44, 'purple_background': 45,
'cyan_background': 46, 'white_background': 47}
def _terminfosetup(ui, mode):
'''Initialize terminfo data and the terminal if we're in terminfo mode.'''
global _terminfo_params
# If we failed to load curses, we go ahead and return.
if not _terminfo_params:
return
# Otherwise, see what the config file says.
if mode not in ('auto', 'terminfo'):
return
_terminfo_params.update((key[6:], (False, int(val)))
for key, val in ui.configitems('color')
if key.startswith('color.'))
try:
curses.setupterm()
except curses.error, e:
_terminfo_params = {}
return
for key, (b, e) in _terminfo_params.items():
if not b:
continue
if not curses.tigetstr(e):
# Most terminals don't support dim, invis, etc, so don't be
# noisy and use ui.debug().
ui.debug("no terminfo entry for %s\n" % e)
del _terminfo_params[key]
if not curses.tigetstr('setaf') or not curses.tigetstr('setab'):
# Only warn about missing terminfo entries if we explicitly asked for
# terminfo mode.
if mode == "terminfo":
ui.warn(_("no terminfo entry for setab/setaf: reverting to "
"ECMA-48 color\n"))
_terminfo_params = {}
def _modesetup(ui, opts):
global _terminfo_params
coloropt = opts['color']
auto = coloropt == 'auto'
always = not auto and util.parsebool(coloropt)
if not always and not auto:
return None
formatted = always or (os.environ.get('TERM') != 'dumb' and ui.formatted())
mode = ui.config('color', 'mode', 'auto')
realmode = mode
if mode == 'auto':
if os.name == 'nt' and 'TERM' not in os.environ:
# looks line a cmd.exe console, use win32 API or nothing
realmode = 'win32'
else:
realmode = 'ansi'
if realmode == 'win32':
_terminfo_params = {}
if not w32effects:
if mode == 'win32':
# only warn if color.mode is explicitly set to win32
ui.warn(_('warning: failed to set color mode to %s\n') % mode)
return None
_effects.update(w32effects)
elif realmode == 'ansi':
_terminfo_params = {}
elif realmode == 'terminfo':
_terminfosetup(ui, mode)
if not _terminfo_params:
if mode == 'terminfo':
## FIXME Shouldn't we return None in this case too?
# only warn if color.mode is explicitly set to win32
ui.warn(_('warning: failed to set color mode to %s\n') % mode)
realmode = 'ansi'
else:
return None
if always or (auto and formatted):
return realmode
return None
try:
import curses
# Mapping from effect name to terminfo attribute name or color number.
# This will also force-load the curses module.
_terminfo_params = {'none': (True, 'sgr0'),
'standout': (True, 'smso'),
'underline': (True, 'smul'),
'reverse': (True, 'rev'),
'inverse': (True, 'rev'),
'blink': (True, 'blink'),
'dim': (True, 'dim'),
'bold': (True, 'bold'),
'invisible': (True, 'invis'),
'italic': (True, 'sitm'),
'black': (False, curses.COLOR_BLACK),
'red': (False, curses.COLOR_RED),
'green': (False, curses.COLOR_GREEN),
'yellow': (False, curses.COLOR_YELLOW),
'blue': (False, curses.COLOR_BLUE),
'magenta': (False, curses.COLOR_MAGENTA),
'cyan': (False, curses.COLOR_CYAN),
'white': (False, curses.COLOR_WHITE)}
except ImportError:
_terminfo_params = False
_styles = {'grep.match': 'red bold',
'grep.linenumber': 'green',
'grep.rev': 'green',
'grep.change': 'green',
'grep.sep': 'cyan',
'grep.filename': 'magenta',
'grep.user': 'magenta',
'grep.date': 'magenta',
'bookmarks.current': 'green',
'branches.active': 'none',
'branches.closed': 'black bold',
'branches.current': 'green',
'branches.inactive': 'none',
'diff.changed': 'white',
'diff.deleted': 'red',
'diff.diffline': 'bold',
'diff.extended': 'cyan bold',
'diff.file_a': 'red bold',
'diff.file_b': 'green bold',
'diff.hunk': 'magenta',
'diff.inserted': 'green',
'diff.trailingwhitespace': 'bold red_background',
'diffstat.deleted': 'red',
'diffstat.inserted': 'green',
'ui.prompt': 'yellow',
'log.changeset': 'yellow',
'resolve.resolved': 'green bold',
'resolve.unresolved': 'red bold',
'status.added': 'green bold',
'status.clean': 'none',
'status.copied': 'none',
'status.deleted': 'cyan bold underline',
'status.ignored': 'black bold',
'status.modified': 'blue bold',
'status.removed': 'red bold',
'status.unknown': 'magenta bold underline',
'tags.normal': 'green',
'tags.local': 'black bold'}
def _effect_str(effect):
'''Helper function for render_effects().'''
bg = False
if effect.endswith('_background'):
bg = True
effect = effect[:-11]
attr, val = _terminfo_params[effect]
if attr:
return curses.tigetstr(val)
elif bg:
return curses.tparm(curses.tigetstr('setab'), val)
else:
return curses.tparm(curses.tigetstr('setaf'), val)
def render_effects(text, effects):
'Wrap text in commands to turn on each effect.'
if not text:
return text
if not _terminfo_params:
start = [str(_effects[e]) for e in ['none'] + effects.split()]
start = '\033[' + ';'.join(start) + 'm'
stop = '\033[' + str(_effects['none']) + 'm'
else:
start = ''.join(_effect_str(effect)
for effect in ['none'] + effects.split())
stop = _effect_str('none')
return ''.join([start, text, stop])
def extstyles():
for name, ext in extensions.extensions():
_styles.update(getattr(ext, 'colortable', {}))
def configstyles(ui):
for status, cfgeffects in ui.configitems('color'):
if '.' not in status or status.startswith('color.'):
continue
cfgeffects = ui.configlist('color', status)
if cfgeffects:
good = []
for e in cfgeffects:
if not _terminfo_params and e in _effects:
good.append(e)
elif e in _terminfo_params or e[:-11] in _terminfo_params:
good.append(e)
else:
ui.warn(_("ignoring unknown color/effect %r "
"(configured in color.%s)\n")
% (e, status))
_styles[status] = ' '.join(good)
class colorui(uimod.ui):
def popbuffer(self, labeled=False):
if self._colormode is None:
return super(colorui, self).popbuffer(labeled)
if labeled:
return ''.join(self.label(a, label) for a, label
in self._buffers.pop())
return ''.join(a for a, label in self._buffers.pop())
_colormode = 'ansi'
def write(self, *args, **opts):
if self._colormode is None:
return super(colorui, self).write(*args, **opts)
label = opts.get('label', '')
if self._buffers:
self._buffers[-1].extend([(str(a), label) for a in args])
elif self._colormode == 'win32':
for a in args:
win32print(a, super(colorui, self).write, **opts)
else:
return super(colorui, self).write(
*[self.label(str(a), label) for a in args], **opts)
def write_err(self, *args, **opts):
if self._colormode is None:
return super(colorui, self).write_err(*args, **opts)
label = opts.get('label', '')
if self._colormode == 'win32':
for a in args:
win32print(a, super(colorui, self).write_err, **opts)
else:
return super(colorui, self).write_err(
*[self.label(str(a), label) for a in args], **opts)
def label(self, msg, label):
if self._colormode is None:
return super(colorui, self).label(msg, label)
effects = []
for l in label.split():
s = _styles.get(l, '')
if s:
effects.append(s)
effects = ' '.join(effects)
if effects:
return '\n'.join([render_effects(s, effects)
for s in msg.split('\n')])
return msg
def templatelabel(context, mapping, args):
if len(args) != 2:
# i18n: "label" is a keyword
raise error.ParseError(_("label expects two arguments"))
thing = templater.stringify(args[1][0](context, mapping, args[1][1]))
thing = templater.runtemplate(context, mapping,
templater.compiletemplate(thing, context))
# apparently, repo could be a string that is the favicon?
repo = mapping.get('repo', '')
if isinstance(repo, str):
return thing
label = templater.stringify(args[0][0](context, mapping, args[0][1]))
label = templater.runtemplate(context, mapping,
templater.compiletemplate(label, context))
thing = templater.stringify(thing)
label = templater.stringify(label)
return repo.ui.label(thing, label)
def uisetup(ui):
if ui.plain():
return
if not issubclass(ui.__class__, colorui):
colorui.__bases__ = (ui.__class__,)
ui.__class__ = colorui
def colorcmd(orig, ui_, opts, cmd, cmdfunc):
mode = _modesetup(ui_, opts)
colorui._colormode = mode
if mode:
extstyles()
configstyles(ui_)
return orig(ui_, opts, cmd, cmdfunc)
extensions.wrapfunction(dispatch, '_runcommand', colorcmd)
templater.funcs['label'] = templatelabel
def extsetup(ui):
commands.globalopts.append(
('', 'color', 'auto',
# i18n: 'always', 'auto', and 'never' are keywords and should
# not be translated
_("when to colorize (boolean, always, auto, or never)"),
_('TYPE')))
if os.name != 'nt':
w32effects = None
else:
import re, ctypes
_kernel32 = ctypes.windll.kernel32
_WORD = ctypes.c_ushort
_INVALID_HANDLE_VALUE = -1
class _COORD(ctypes.Structure):
_fields_ = [('X', ctypes.c_short),
('Y', ctypes.c_short)]
class _SMALL_RECT(ctypes.Structure):
_fields_ = [('Left', ctypes.c_short),
('Top', ctypes.c_short),
('Right', ctypes.c_short),
('Bottom', ctypes.c_short)]
class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
_fields_ = [('dwSize', _COORD),
('dwCursorPosition', _COORD),
('wAttributes', _WORD),
('srWindow', _SMALL_RECT),
('dwMaximumWindowSize', _COORD)]
_STD_OUTPUT_HANDLE = 0xfffffff5L # (DWORD)-11
_STD_ERROR_HANDLE = 0xfffffff4L # (DWORD)-12
_FOREGROUND_BLUE = 0x0001
_FOREGROUND_GREEN = 0x0002
_FOREGROUND_RED = 0x0004
_FOREGROUND_INTENSITY = 0x0008
_BACKGROUND_BLUE = 0x0010
_BACKGROUND_GREEN = 0x0020
_BACKGROUND_RED = 0x0040
_BACKGROUND_INTENSITY = 0x0080
_COMMON_LVB_REVERSE_VIDEO = 0x4000
_COMMON_LVB_UNDERSCORE = 0x8000
# http://msdn.microsoft.com/en-us/library/ms682088%28VS.85%29.aspx
w32effects = {
'none': -1,
'black': 0,
'red': _FOREGROUND_RED,
'green': _FOREGROUND_GREEN,
'yellow': _FOREGROUND_RED | _FOREGROUND_GREEN,
'blue': _FOREGROUND_BLUE,
'magenta': _FOREGROUND_BLUE | _FOREGROUND_RED,
'cyan': _FOREGROUND_BLUE | _FOREGROUND_GREEN,
'white': _FOREGROUND_RED | _FOREGROUND_GREEN | _FOREGROUND_BLUE,
'bold': _FOREGROUND_INTENSITY,
'black_background': 0x100, # unused value > 0x0f
'red_background': _BACKGROUND_RED,
'green_background': _BACKGROUND_GREEN,
'yellow_background': _BACKGROUND_RED | _BACKGROUND_GREEN,
'blue_background': _BACKGROUND_BLUE,
'purple_background': _BACKGROUND_BLUE | _BACKGROUND_RED,
'cyan_background': _BACKGROUND_BLUE | _BACKGROUND_GREEN,
'white_background': (_BACKGROUND_RED | _BACKGROUND_GREEN |
_BACKGROUND_BLUE),
'bold_background': _BACKGROUND_INTENSITY,
'underline': _COMMON_LVB_UNDERSCORE, # double-byte charsets only
'inverse': _COMMON_LVB_REVERSE_VIDEO, # double-byte charsets only
}
passthrough = set([_FOREGROUND_INTENSITY,
_BACKGROUND_INTENSITY,
_COMMON_LVB_UNDERSCORE,
_COMMON_LVB_REVERSE_VIDEO])
stdout = _kernel32.GetStdHandle(
_STD_OUTPUT_HANDLE) # don't close the handle returned
if stdout is None or stdout == _INVALID_HANDLE_VALUE:
w32effects = None
else:
csbi = _CONSOLE_SCREEN_BUFFER_INFO()
if not _kernel32.GetConsoleScreenBufferInfo(
stdout, ctypes.byref(csbi)):
# stdout may not support GetConsoleScreenBufferInfo()
# when called from subprocess or redirected
w32effects = None
else:
origattr = csbi.wAttributes
ansire = re.compile('\033\[([^m]*)m([^\033]*)(.*)',
re.MULTILINE | re.DOTALL)
def win32print(text, orig, **opts):
label = opts.get('label', '')
attr = origattr
def mapcolor(val, attr):
if val == -1:
return origattr
elif val in passthrough:
return attr | val
elif val > 0x0f:
return (val & 0x70) | (attr & 0x8f)
else:
return (val & 0x07) | (attr & 0xf8)
# determine console attributes based on labels
for l in label.split():
style = _styles.get(l, '')
for effect in style.split():
attr = mapcolor(w32effects[effect], attr)
# hack to ensure regexp finds data
if not text.startswith('\033['):
text = '\033[m' + text
# Look for ANSI-like codes embedded in text
m = re.match(ansire, text)
try:
while m:
for sattr in m.group(1).split(';'):
if sattr:
attr = mapcolor(int(sattr), attr)
_kernel32.SetConsoleTextAttribute(stdout, attr)
orig(m.group(2), **opts)
m = re.match(ansire, m.group(3))
finally:
# Explicitly reset original attributes
_kernel32.SetConsoleTextAttribute(stdout, origattr)
| apache-2.0 | -7,137,754,869,312,104,000 | 7,046,257,043,926,822,000 | 34.944954 | 79 | 0.577182 | false |
andela-ifageyinbo/django | django/dispatch/dispatcher.py | 171 | 11711 | import sys
import threading
import warnings
import weakref
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.inspect import func_accepts_kwargs
from django.utils.six.moves import range
if six.PY2:
from .weakref_backports import WeakMethod
else:
from weakref import WeakMethod
def _make_id(target):
if hasattr(target, '__func__'):
return (id(target.__self__), id(target.__func__))
return id(target)
NONE_ID = _make_id(None)
# A marker for caching
NO_RECEIVERS = object()
class Signal(object):
"""
Base class for all signals
Internal attributes:
receivers
{ receiverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None, use_caching=False):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
self.lock = threading.Lock()
self.use_caching = use_caching
# For convenience we create empty caches even if they are not used.
# A note about caching: if use_caching is defined, then for each
# distinct sender we cache the receivers that sender has in
# 'sender_receivers_cache'. The cache is cleaned when .connect() or
# .disconnect() is called and populated on send().
self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}
self._dead_receivers = False
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If a receiver is connected with a dispatch_uid argument, it
will not be added if another receiver was already connected
with that dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if settings.configured and settings.DEBUG:
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
if not func_accepts_kwargs(receiver):
raise ValueError("Signal receivers must accept keyword arguments (**kwargs).")
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'):
ref = WeakMethod
receiver_object = receiver.__self__
if six.PY3:
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
else:
receiver = ref(receiver, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear()
def disconnect(self, receiver=None, sender=None, weak=None, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if weak is not None:
warnings.warn("Passing `weak` to disconnect has no effect.",
RemovedInDjango20Warning, stacklevel=2)
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
disconnected = False
with self.lock:
self._clear_dead_receivers()
for index in range(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
disconnected = True
del self.receivers[index]
break
self.sender_receivers_cache.clear()
return disconnected
def has_listeners(self, sender=None):
return bool(self._live_receivers(sender))
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop, so it is quite possible to not have all
receivers called if a raises an error.
Arguments:
sender
The sender of the signal Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
for receiver in self._live_receivers(sender):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ]. May raise
DispatcherKeyError.
If any receiver raises an error (specifically any subclass of
Exception), the error instance is returned as the result for that
receiver. The traceback is always attached to the error at
``__traceback__``.
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
for receiver in self._live_receivers(sender):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
if not hasattr(err, '__traceback__'):
err.__traceback__ = sys.exc_info()[2]
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _clear_dead_receivers(self):
# Note: caller is assumed to hold self.lock.
if self._dead_receivers:
self._dead_receivers = False
new_receivers = []
for r in self.receivers:
if isinstance(r[1], weakref.ReferenceType) and r[1]() is None:
continue
new_receivers.append(r)
self.receivers = new_receivers
def _live_receivers(self, sender):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
receivers = None
if self.use_caching and not self._dead_receivers:
receivers = self.sender_receivers_cache.get(sender)
# We could end up here with NO_RECEIVERS even if we do check this case in
# .send() prior to calling _live_receivers() due to concurrent .send() call.
if receivers is NO_RECEIVERS:
return []
if receivers is None:
with self.lock:
self._clear_dead_receivers()
senderkey = _make_id(sender)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == NONE_ID or r_senderkey == senderkey:
receivers.append(receiver)
if self.use_caching:
if not receivers:
self.sender_receivers_cache[sender] = NO_RECEIVERS
else:
# Note, we must cache the weakref versions.
self.sender_receivers_cache[sender] = receivers
non_weak_receivers = []
for receiver in receivers:
if isinstance(receiver, weakref.ReferenceType):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
non_weak_receivers.append(receiver)
else:
non_weak_receivers.append(receiver)
return non_weak_receivers
def _remove_receiver(self, receiver=None):
# Mark that the self.receivers list has dead weakrefs. If so, we will
# clean those up in connect, disconnect and _live_receivers while
# holding self.lock. Note that doing the cleanup here isn't a good
# idea, _remove_receiver() will be called as side effect of garbage
# collection, and so the call can happen while we are already holding
# self.lock.
self._dead_receivers = True
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal (or list of signals) and keyword arguments to connect::
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
@receiver([post_save, post_delete], sender=MyModel)
def signals_receiver(sender, **kwargs):
...
"""
def _decorator(func):
if isinstance(signal, (list, tuple)):
for s in signal:
s.connect(func, **kwargs)
else:
signal.connect(func, **kwargs)
return func
return _decorator
| bsd-3-clause | -5,685,475,624,365,825,000 | -1,080,089,065,598,836,600 | 35.943218 | 94 | 0.583554 | false |
DNFcode/edx-platform | cms/djangoapps/contentstore/features/course-settings.py | 50 | 7238 | # pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from terrain.steps import reload_the_page
from selenium.webdriver.common.keys import Keys
from common import type_in_codemirror, upload_file
from django.conf import settings
from nose.tools import assert_true, assert_false, assert_equal # pylint: disable=no-name-in-module
TEST_ROOT = settings.COMMON_TEST_DATA_ROOT
COURSE_START_DATE_CSS = "#course-start-date"
COURSE_END_DATE_CSS = "#course-end-date"
ENROLLMENT_START_DATE_CSS = "#course-enrollment-start-date"
ENROLLMENT_END_DATE_CSS = "#course-enrollment-end-date"
COURSE_START_TIME_CSS = "#course-start-time"
COURSE_END_TIME_CSS = "#course-end-time"
ENROLLMENT_START_TIME_CSS = "#course-enrollment-start-time"
ENROLLMENT_END_TIME_CSS = "#course-enrollment-end-time"
DUMMY_TIME = "15:30"
DEFAULT_TIME = "00:00"
############### ACTIONS ####################
@step('I select Schedule and Details$')
def test_i_select_schedule_and_details(step):
world.click_course_settings()
link_css = 'li.nav-course-settings-schedule a'
world.css_click(link_css)
world.wait_for_requirejs(
["jquery", "js/models/course",
"js/models/settings/course_details", "js/views/settings/main"])
@step('I have set course dates$')
def test_i_have_set_course_dates(step):
step.given('I have opened a new course in Studio')
step.given('I select Schedule and Details')
step.given('And I set course dates')
@step('And I set course dates$')
def test_and_i_set_course_dates(step):
set_date_or_time(COURSE_START_DATE_CSS, '12/20/2013')
set_date_or_time(COURSE_END_DATE_CSS, '12/26/2013')
set_date_or_time(ENROLLMENT_START_DATE_CSS, '12/1/2013')
set_date_or_time(ENROLLMENT_END_DATE_CSS, '12/10/2013')
set_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME)
set_date_or_time(ENROLLMENT_END_TIME_CSS, DUMMY_TIME)
@step('And I clear all the dates except start$')
def test_and_i_clear_all_the_dates_except_start(step):
set_date_or_time(COURSE_END_DATE_CSS, '')
set_date_or_time(ENROLLMENT_START_DATE_CSS, '')
set_date_or_time(ENROLLMENT_END_DATE_CSS, '')
@step('Then I see cleared dates$')
def test_then_i_see_cleared_dates(step):
verify_date_or_time(COURSE_END_DATE_CSS, '')
verify_date_or_time(ENROLLMENT_START_DATE_CSS, '')
verify_date_or_time(ENROLLMENT_END_DATE_CSS, '')
verify_date_or_time(COURSE_END_TIME_CSS, '')
verify_date_or_time(ENROLLMENT_START_TIME_CSS, '')
verify_date_or_time(ENROLLMENT_END_TIME_CSS, '')
# Verify course start date (required) and time still there
verify_date_or_time(COURSE_START_DATE_CSS, '12/20/2013')
verify_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME)
@step('I clear the course start date$')
def test_i_clear_the_course_start_date(step):
set_date_or_time(COURSE_START_DATE_CSS, '')
@step('I receive a warning about course start date$')
def test_i_receive_a_warning_about_course_start_date(step):
assert_true(world.css_has_text('.message-error', 'The course must have an assigned start date.'))
assert_true('error' in world.css_find(COURSE_START_DATE_CSS).first._element.get_attribute('class'))
assert_true('error' in world.css_find(COURSE_START_TIME_CSS).first._element.get_attribute('class'))
@step('the previously set start date is shown$')
def test_the_previously_set_start_date_is_shown(step):
verify_date_or_time(COURSE_START_DATE_CSS, '12/20/2013')
verify_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME)
@step('Given I have tried to clear the course start$')
def test_i_have_tried_to_clear_the_course_start(step):
step.given("I have set course dates")
step.given("I clear the course start date")
step.given("I receive a warning about course start date")
@step('I have entered a new course start date$')
def test_i_have_entered_a_new_course_start_date(step):
set_date_or_time(COURSE_START_DATE_CSS, '12/22/2013')
@step('The warning about course start date goes away$')
def test_the_warning_about_course_start_date_goes_away(step):
assert world.is_css_not_present('.message-error')
assert_false('error' in world.css_find(COURSE_START_DATE_CSS).first._element.get_attribute('class'))
assert_false('error' in world.css_find(COURSE_START_TIME_CSS).first._element.get_attribute('class'))
@step('my new course start date is shown$')
def new_course_start_date_is_shown(step):
verify_date_or_time(COURSE_START_DATE_CSS, '12/22/2013')
# Time should have stayed from before attempt to clear date.
verify_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME)
@step('I change fields$')
def test_i_change_fields(step):
set_date_or_time(COURSE_START_DATE_CSS, '7/7/7777')
set_date_or_time(COURSE_END_DATE_CSS, '7/7/7777')
set_date_or_time(ENROLLMENT_START_DATE_CSS, '7/7/7777')
set_date_or_time(ENROLLMENT_END_DATE_CSS, '7/7/7777')
@step('I change the course overview')
def test_change_course_overview(_step):
type_in_codemirror(0, "<h1>Overview</h1>")
@step('I click the "Upload Course Image" button')
def click_upload_button(_step):
button_css = '.action-upload-image'
world.css_click(button_css)
@step('I upload a new course image$')
def upload_new_course_image(_step):
upload_file('image.jpg', sub_path="uploads")
@step('I should see the new course image$')
def i_see_new_course_image(_step):
img_css = '#course-image'
images = world.css_find(img_css)
assert len(images) == 1
img = images[0]
expected_src = 'image.jpg'
# Don't worry about the domain in the URL
success_func = lambda _: img['src'].endswith(expected_src)
world.wait_for(success_func)
@step('the image URL should be present in the field')
def image_url_present(_step):
field_css = '#course-image-url'
expected_value = 'image.jpg'
assert world.css_value(field_css).endswith(expected_value)
############### HELPER METHODS ####################
def set_date_or_time(css, date_or_time):
"""
Sets date or time field.
"""
world.css_fill(css, date_or_time)
e = world.css_find(css).first
# hit Enter to apply the changes
e._element.send_keys(Keys.ENTER)
def verify_date_or_time(css, date_or_time):
"""
Verifies date or time field.
"""
# We need to wait for JavaScript to fill in the field, so we use
# css_has_value(), which first checks that the field is not blank
assert_true(world.css_has_value(css, date_or_time))
@step('I do not see the changes')
@step('I see the set dates')
def i_see_the_set_dates(_step):
"""
Ensure that each field has the value set in `test_and_i_set_course_dates`.
"""
verify_date_or_time(COURSE_START_DATE_CSS, '12/20/2013')
verify_date_or_time(COURSE_END_DATE_CSS, '12/26/2013')
verify_date_or_time(ENROLLMENT_START_DATE_CSS, '12/01/2013')
verify_date_or_time(ENROLLMENT_END_DATE_CSS, '12/10/2013')
verify_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME)
# Unset times get set to 12 AM once the corresponding date has been set.
verify_date_or_time(COURSE_END_TIME_CSS, DEFAULT_TIME)
verify_date_or_time(ENROLLMENT_START_TIME_CSS, DEFAULT_TIME)
verify_date_or_time(ENROLLMENT_END_TIME_CSS, DUMMY_TIME)
| agpl-3.0 | -966,676,673,940,813,800 | 9,157,422,265,345,893,000 | 34.831683 | 104 | 0.694529 | false |
isotoma/buildbot_travis | buildbot_travis/tests/test_git.py | 2 | 1266 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from twisted.trial import unittest
from buildbot_travis.vcs import git
class GitUrlParser(unittest.TestCase):
def test_simple(self):
url = "git://github.com/tardyp/buildbot_travis"
parsed = git.ParsedGitUrl(url)
self.assertEqual(parsed.scheme, 'git')
self.assertEqual(parsed.netloc, 'github.com')
self.assertEqual(parsed.path, '/tardyp/buildbot_travis')
def test_user(self):
url = "git+ssh://[email protected]/tardyp/buildbot_travis"
parsed = git.ParsedGitUrl(url)
self.assertEqual(parsed.scheme, 'git+ssh')
self.assertEqual(parsed.netloc, 'github.com')
self.assertEqual(parsed.user, 'bla')
self.assertEqual(parsed.path, '/tardyp/buildbot_travis')
def test_userpass(self):
url = "git+ssh://bla:secrit::[email protected]/tardyp/buildbot_travis"
parsed = git.ParsedGitUrl(url)
self.assertEqual(parsed.scheme, 'git+ssh')
self.assertEqual(parsed.netloc, 'github.com')
self.assertEqual(parsed.user, 'bla')
self.assertEqual(parsed.passwd, 'secrit::!')
self.assertEqual(parsed.path, '/tardyp/buildbot_travis')
| apache-2.0 | 4,506,478,402,024,098,000 | 6,958,679,258,544,335,000 | 37.363636 | 73 | 0.671406 | false |
TrainingB/Clembot | clembot/exts/gyms/citymanager.py | 1 | 4523 | from discord.ext import commands
from clembot.core.logs import init_loggers
from clembot.exts.utils.utilities import Utilities
from clembot.core import checks
from clembot.exts.config.channelconfigmanager import ChannelConfigCache
class CityManager(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.dbi = bot.dbi
self.utilities = Utilities()
self.logger = init_loggers()
self.guild_dict = bot.guild_dict
self.MyGuildConfigCache = bot.MyGuildConfigCache
self.MyChannelConfigCache = ChannelConfigCache(self.bot)
self._cache = {}
@commands.command(pass_context=True, hidden=True, aliases=["get-city"])
async def get_city(self, ctx):
await self._get_city(ctx)
@commands.command(pass_context=True, hidden=True, aliases=["set-city"])
async def _set_city(self, ctx, city_state):
city_state = city_state.upper()
await self.MyChannelConfigCache.save_channel_city(ctx.message.guild.id, ctx.message.channel.id, city_state)
await self._get_city(ctx)
@commands.command(pass_context=True, hidden=True, aliases=["set-guild-city"])
@checks.guildowner_or_permissions(manage_guild=True)
async def _set_guild_city(self, ctx, city_state):
city_state = city_state.upper()
await self.MyGuildConfigCache.save_guild_config(ctx.message.guild.id, 'city', city_state)
await self._get_guild_city(ctx.message)
@commands.command(pass_context=True, hidden=True, aliases=["get-guild-city"])
@checks.guildowner_or_permissions(manage_guild=True)
async def get_guild_city(self, ctx):
await self._get_guild_city(ctx.message)
async def _get_city(self, ctx):
content = "Beep Beep! Reporting City for this channel / guild has not been set."
channel_city = await self.get_city_for_channel(ctx.guild.id, ctx.message.channel.id)
if channel_city:
content = f"Beep Beep! **{ctx.message.author.display_name}** Reporting City for this channel is **{channel_city}**."
return await self.utilities._send_message(ctx.message.channel, content)
async def _get_guild_city(self, message):
guild_city = await self.MyGuildConfigCache.get_guild_config(message.guild.id, 'city')
content = f"Beep Beep! **{message.author.display_name}** Reporting City for this guild is **{guild_city}**."
return await self.utilities._send_message(message.channel, content)
async def get_city_for_channel(self, guild_id, channel_id=None, parent_channel_id=None) -> str :
try:
city_for_channel = await self.MyChannelConfigCache.get_channel_config(guild_id=guild_id, channel_id=channel_id, config_name='city')
if not city_for_channel:
if parent_channel_id:
city_for_channel = await self.MyChannelConfigCache.get_channel_config(guild_id=guild_id, channel_id=parent_channel_id, config_name='city')
if not city_for_channel:
city_for_channel = await self.MyGuildConfigCache.get_guild_config(guild_id=guild_id, config_name='city')
return city_for_channel
except Exception as error:
self.logger.info(error)
return None
async def get_city_for_channel_only(self, guild_id, channel_id, parent_channel_id=None) -> str :
try:
self.logger.info(f"read_channel_city({guild_id}, {channel_id}, {parent_channel_id})")
city_for_channel = await self.MyChannelConfigCache.get_channel_config(guild_id=guild_id, channel_id=channel_id, config_name='city')
if not city_for_channel:
city_for_channel = await self.MyChannelConfigCache.get_channel_config(guild_id=guild_id, channel_id=parent_channel_id, config_name='city')
return city_for_channel
except Exception as error:
print(error)
self.logger.info(error)
return None
# async def save_channel_city(self, guild_id, channel_id, city_state):
# print("save_channel_city()")
# try:
# await self.MyChannelConfigCache.save_channel_config('city', city_state, guild_id, channel_id)
# new_channel_city = await self.MyChannelConfigCache.get_channel_config('city', guild_id=guild_id, channel_id=channel_id)
# return new_channel_city
# except Exception as error:
# print(error)
# return None
def setup(bot):
bot.add_cog(CityManager(bot))
| gpl-3.0 | -3,037,844,893,559,876,000 | -4,861,760,049,286,292,000 | 40.495413 | 158 | 0.662171 | false |
einaru/luma | luma/plugins/search/SearchResult.py | 3 | 15190 | # -*- coding: utf-8 -*-
#
# plugins.search.SearchResult
#
# Copyright (c) 2011
# Einar Uvsløkk, <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/
from PyQt4 import QtGui, QtCore
from PyQt4.QtGui import (QAbstractItemView, QSizePolicy, QSortFilterProxyModel,
QSpacerItem, QStandardItemModel, QTreeView, QWidget)
from base.gui.Dialog import ExportDialog, DeleteDialog
from base.util.IconTheme import pixmapFromTheme
class ResultView(QWidget):
"""This class represent a search result view.
"""
def __init__(self, filter='', attributes=[], resultlist=[], parent=None):
"""Initialize a result view for the `SearchPlugin`.
:param filter: the filter applied on the search
:type filter: string
:param attributes: a list containing the attributes used in the
search operation. Usually extracted from the `filter`.
:type attributes: list
:param resultlist: a list of `SmartDataObject` from the search
operation.
:type resultlist: list
:param parent: the parent for this widget.
:type parent: QWidget
"""
super(ResultView, self).__init__(parent)
self.setObjectName('ResultView')
self.layout = QtGui.QVBoxLayout(self)
# Only display the no-result message if resultlist is empty
if len(resultlist) == 0:
self.retranslate(all=False)
self.onNoResult()
return
# The proxy model is used for sort and filter support
self.proxymodel = QSortFilterProxyModel(self)
self.proxymodel.setDynamicSortFilter(True)
self.headerdata = ['dn']
self.headerdata.extend(attributes)
self.resultdata = resultlist
# FIXME: should we create a custom item model ?
self.model = QStandardItemModel(0, len(self.headerdata), parent=self)
#self.model = ResultItemModel(self)
#self.model = ResultItemModel(self.headerdata, self.resultdata, self)
self.proxymodel.setSourceModel(self.model)
self.resultview = QTreeView(self)
self.resultview.setUniformRowHeights(True)
self.resultview.setRootIsDecorated(False)
self.resultview.setAlternatingRowColors(True)
self.resultview.setSortingEnabled(True)
self.resultview.setModel(self.proxymodel)
# For right-click context menu
self.resultview.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.resultview.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.layout.addWidget(self.resultview)
# The filter box enables the user to filter the returned search
# results. It becomes accessible with Ctrl-F (QKeySequence.Find)
self.filterBox = ResultFilterWidget(self.headerdata, parent=self)
self.filterBox.setVisible(False)
self.layout.addWidget(self.filterBox)
# We need to call the retranslate method before populating
# the result data
self.retranslate()
#self.model.populateHeader(self.headerdata)
#self.model.populateModel(self.resultdata)
self.setHeaderData(self.headerdata)
self.setResultData(self.resultdata)
self.resultview.resizeColumnToContents(0)
self.__createContextMenu()
self.__connectSlots()
def __connectSlots(self):
"""Connect signal and slots.
"""
self.resultview.customContextMenuRequested.connect(
self.onContextMenuRequested)
self.filterBox.inputEdit.textChanged['QString'].connect(
self.onFilterInputChanged)
self.filterBox.columnBox.currentIndexChanged[int].connect(
self.onFilterColumnChanged)
def __getVSpacer(self):
return QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Minimum)
def __createContextMenu(self):
"""Display the context menu.
"""
self.contextMenu = QtGui.QMenu()
self.contextMenuView = QtGui.QAction(self)
self.contextMenuDelete = QtGui.QAction(self)
self.contextMenuExport = QtGui.QAction(self)
self.contextMenu.addAction(self.contextMenuView)
self.contextMenu.addAction(self.contextMenuDelete)
self.contextMenu.addAction(self.contextMenuExport)
# Connect the context menu actions to the correct slots
self.contextMenuView.triggered.connect(self.onViewItemsSelected)
self.contextMenuDelete.triggered.connect(self.onDeleteItemsSelected)
self.contextMenuExport.triggered.connect(self.onExportItemsSelected)
def onNoResult(self):
"""Adds a styled *no result* message to the main layout.
"""
font = QtGui.QFont()
font.setBold(True)
sadface = QtGui.QLabel(self)
sadface.setPixmap(pixmapFromTheme('face-sad', ':/icons/48/face-sad'))
noresult = QtGui.QLabel(self)
noresult.setText(self.str_NO_RESULT)
noresult.setFont(font)
hlayout = QtGui.QHBoxLayout()
hlayout.addItem(self.__getVSpacer())
hlayout.addWidget(sadface)
hlayout.addWidget(noresult)
hlayout.addItem(self.__getVSpacer())
self.layout.addLayout(hlayout)
def setHeaderData(self, data=[]):
"""Populates the ``resultview`` model with header data.
Parameters:
- `data`: a list with header items. Usually this is the
attributelist from the LDAP search.
"""
i = 0
for header in data:
self.model.setHeaderData(i, QtCore.Qt.Horizontal, header)
i += 1
def setResultData(self, data=[]):
"""Populates the ``resultview`` model with result data.
Parameters:
- `data`: a list containing the SmartDataObjects representing
items in the LDAP search result.
"""
row = 0
for obj in data:
self.model.insertRow(row)
col = 0
for attr in self.headerdata:
if self.isDistinguishedName(attr):
modelData = obj.getPrettyDN()
elif self.isObjectClass(attr):
modelData = ','.join(obj.getObjectClasses())
elif obj.hasAttribute(attr):
if obj.isAttributeBinary(attr):
modelData = self.str_BINARY_DATA
else:
modelData = ','.join(obj.getAttributeValueList(attr))
self.model.setData(self.model.index(row, col), modelData)
col += 1
row += 1
def isDistinguishedName(self, attr):
"""Returns ``True`` if `attr` is a distinguished name,
``False`` otherwise.
Parameters:
- `attr`: the LDAP string attribute value to check.
"""
return attr.lower() == 'dn'
def isObjectClass(self, attr):
"""Returns ``True`` if `attr` is an object class, ``False``
otherwise.
Parameters:
- `attr`: the LDAP string attribute value to check.
"""
return attr.lower() == 'objectclass'
def onContextMenuRequested(self, point):
"""Display the context menu
"""
# FIXME: In order to be able to export, delete and view search
# result entries. We should make use of the various dialogs in
# the Browser plugin. Unitl we have refactored the design in a
# way that allow us to use these without accessing the browser
# modules, we simple don't provide these options yet.
return
self.selection = self.resultview.selectedIndexes()
deleteSupport = True
exportSupport = True
rowsselected = len(self.selection) / len(self.headerdata)
if not rowsselected > 0:
self.contextMenu.setEnabled(False)
self.contextMenu.exec_(self.resultview.mapToGlobal(point))
return
self.contextMenu.setEnabled(True)
# Look over at Browser plugin for implementation of
# multiselect and operation support validation
print rowsselected
self.contextMenuView.setEnabled(True)
if rowsselected == 1:
self.contextMenuView.setText(self.str_VIEW_ITEM)
else:
self.contextMenuView.setText(self.str_VIEW_ITEMS)
if deleteSupport:
self.contextMenuDelete.setEnabled(True)
if rowsselected == 1:
self.contextMenuDelete.setText(self.str_DELETE_ITEM)
else:
self.contextMenuDelete.setText(self.str_DELETE_ITEMS)
if exportSupport:
self.contextMenuExport.setEnabled(True)
if rowsselected == 1:
self.contextMenuExport.setText(self.str_EXPORT_ITEM)
else:
self.contextMenuExport.setText(self.str_EXPORT_ITEMS)
# Finally we execute the context menu
self.contextMenu.exec_(self.resultview.mapToGlobal(point))
def onViewItemsSelected(self):
"""Slot for the *view* context menu action.
"""
raise NotImplementedError(
'Need to implement a proper model for this to be supported')
def onDeleteItemsSelected(self):
"""Slot for the *delete* context menu action.
"""
msg = 'Delete from the Search Plugin is not implemented jet.'
dialog = DeleteDialog(self, msg)
dialog.setDeleteItems([])
dialog.exec_()
def onExportItemsSelected(self):
"""Slot for the 'export' context menu action.
"""
msg = 'Export from the Search Plugin is not implemented jet.'
dialog = ExportDialog(self, msg)
# Only for proof of concept
dialog.setExportData([])
dialog.exec_()
def onFilterBoxVisibilityChanged(self, visible):
"""Slot for the QKeySequence.Find.
- `visible`: a boolean value indicating wether or not to toggle
the filter box widget visibility on or off.
"""
if visible:
self.filterBox.setVisible(True)
self.filterBox.inputEdit.setFocus()
else:
# I belive it's common practise to clear the filter when
# the filter box is closed. This is at least the way the
# filter boxes works for most webbrowsers.
self.filterBox.inputEdit.clear()
self.filterBox.setVisible(False)
self.resultview.setFocus()
def onFilterInputChanged(self, filter=''):
"""Slot for the filter input in the result filter widget.
We get the selected syntax from the syntax combobox
"""
# The PyQt4 QVariant is causing some problems here, when we try
# to use the <combobox>.itemData directly, even though the data
# holds valid QRexExp.PatternSyntax values.
# We therefore need to explicitly make the QVariant and integer.
i = self.filterBox.syntaxBox.currentIndex()
syntaxIndex = self.filterBox.syntaxBox.itemData(i).toInt()[0]
syntax = QtCore.QRegExp.PatternSyntax(syntaxIndex)
# As of now we do filtering in a case insensitive way, until we
# come up with a way to introduce case sensitivity selection in a
# UI inexpensive way. We want to keep the filter widget as clean
# and simple as possible.
regex = QtCore.QRegExp(filter, QtCore.Qt.CaseInsensitive, syntax)
self.proxymodel.setFilterRegExp(regex)
def onFilterColumnChanged(self, index):
"""Slot for the column combobox in the filter box widget.
"""
self.proxymodel.setFilterKeyColumn(index)
def retranslate(self, all=True):
"""For dynamic translation support.
"""
self.str_VIEW_ITEM = QtGui.QApplication.translate(
'ResultView', 'View Item')
self.str_VIEW_ITEMS = QtGui.QApplication.translate(
'ResultView', 'View Items')
self.str_DELETE_ITEM = QtGui.QApplication.translate(
'ResultView', 'Delete Item')
self.str_DELETE_ITEMS = QtGui.QApplication.translate(
'ResultView', 'Delete Items')
self.str_EXPORT_ITEM = QtGui.QApplication.translate(
'ResultView', 'Export Item')
self.str_EXPORT_ITEMS = QtGui.QApplication.translate(
'ResultView', 'Export Items')
self.str_NO_RESULT = QtGui.QApplication.translate(
'ResultView', 'Sorry, no result to display!')
self.str_BINARY_DATA = QtGui.QApplication.translate(
'ResultView', 'Binary Data')
if all:
self.filterBox.retranslate()
class ResultFilterWidget(QWidget):
"""A Widget for basic filter input on a result view.
This class provide a simple and clean filter widget, enabling the
user to filter the returned results. It provide several filter
syntax potions, a column selector and a filter input widget.
I aims at beeing simple, small and out of the way.
"""
syntaxOptions = {
'Fixed String': QtCore.QRegExp.FixedString,
'Regular Expression': QtCore.QRegExp.RegExp,
'Wildcard': QtCore.QRegExp.Wildcard,
}
def __init__(self, columns=[], parent=None):
"""
Parameters:
- `columns`: a list containing the columns to populate the
column selector. Usually the main model headerdata (search
attributelist).
"""
super(ResultFilterWidget, self).__init__(parent)
self.layout = QtGui.QHBoxLayout(self)
self.syntaxBox = QtGui.QComboBox(self)
self.columnBox = QtGui.QComboBox(self)
self.inputEdit = QtGui.QLineEdit(self)
spacer = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Minimum)
for text, syntax in sorted(self.syntaxOptions.iteritems()):
self.syntaxBox.addItem(text, userData=syntax)
self.columnBox.addItems(columns)
self.layout.addItem(spacer)
self.layout.addWidget(self.syntaxBox)
self.layout.addWidget(self.columnBox)
self.layout.addWidget(self.inputEdit)
self.retranslate()
def retranslate(self):
"""For dynamic translation support.
"""
self.syntaxBox.setToolTip(QtGui.QApplication.translate(
'ResultFilterWidget', 'Choose filter syntax.'))
self.columnBox.setToolTip(QtGui.QApplication.translate(
'ResultFilterWidget', 'Choose filter column.'))
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
| gpl-2.0 | -1,194,832,054,611,544,300 | 7,419,394,793,246,486,000 | 36.689826 | 79 | 0.640529 | false |
drincruz/luigi | test/test_ssh.py | 95 | 1796 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess
from helpers import unittest
from luigi.contrib.ssh import RemoteContext
class TestMockedRemoteContext(unittest.TestCase):
def test_subprocess_delegation(self):
""" Test subprocess call structure using mock module """
orig_Popen = subprocess.Popen
self.last_test = None
def Popen(cmd, **kwargs):
self.last_test = cmd
subprocess.Popen = Popen
context = RemoteContext(
"some_host",
username="luigi",
key_file="/some/key.pub"
)
context.Popen(["ls"])
self.assertTrue("ssh" in self.last_test)
self.assertTrue("-i" in self.last_test)
self.assertTrue("/some/key.pub" in self.last_test)
self.assertTrue("luigi@some_host" in self.last_test)
self.assertTrue("ls" in self.last_test)
subprocess.Popen = orig_Popen
def test_check_output_fail_connect(self):
""" Test check_output to a non-existing host """
context = RemoteContext("__NO_HOST_LIKE_THIS__", connect_timeout=1)
self.assertRaises(
subprocess.CalledProcessError,
context.check_output, ["ls"]
)
| apache-2.0 | 6,864,898,566,654,277,000 | 2,809,269,236,761,216,500 | 31.654545 | 75 | 0.659243 | false |
fly19890211/edx-platform | common/test/acceptance/tests/lms/test_lms_split_test_courseware_search.py | 58 | 6906 | """
Test courseware search
"""
import os
import json
from ..helpers import remove_file
from ...pages.common.logout import LogoutPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.lms.courseware_search import CoursewareSearchPage
from ...pages.lms.course_nav import CourseNavPage
from ...fixtures.course import XBlockFixtureDesc
from ..helpers import create_user_partition_json
from xmodule.partitions.partitions import Group
from nose.plugins.attrib import attr
from ..studio.base_studio_test import ContainerBase
from ...pages.studio.auto_auth import AutoAuthPage as StudioAutoAuthPage
@attr('shard_1')
class SplitTestCoursewareSearchTest(ContainerBase):
"""
Test courseware search on Split Test Module.
"""
USERNAME = 'STUDENT_TESTER'
EMAIL = '[email protected]'
TEST_INDEX_FILENAME = "test_root/index_file.dat"
def setUp(self, is_staff=True):
"""
Create search page and course content to search
"""
# create test file in which index for this test will live
with open(self.TEST_INDEX_FILENAME, "w+") as index_file:
json.dump({}, index_file)
self.addCleanup(remove_file, self.TEST_INDEX_FILENAME)
super(SplitTestCoursewareSearchTest, self).setUp(is_staff=is_staff)
self.staff_user = self.user
self.courseware_search_page = CoursewareSearchPage(self.browser, self.course_id)
self.course_navigation_page = CourseNavPage(self.browser)
self.course_outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self._add_and_configure_split_test()
self._studio_reindex()
def _auto_auth(self, username, email, staff):
"""
Logout and login with given credentials.
"""
LogoutPage(self.browser).visit()
StudioAutoAuthPage(self.browser, username=username, email=email,
course_id=self.course_id, staff=staff).visit()
def _studio_reindex(self):
"""
Reindex course content on studio course page
"""
self._auto_auth(self.staff_user["username"], self.staff_user["email"], True)
self.course_outline.visit()
self.course_outline.start_reindex()
self.course_outline.wait_for_ajax()
def _add_and_configure_split_test(self):
"""
Add a split test and a configuration to a test course fixture
"""
# Create a new group configurations
# pylint: disable=W0212
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
"Name",
"Description.",
[Group("0", "Group A"), Group("1", "Group B")]
),
create_user_partition_json(
456,
"Name 2",
"Description 2.",
[Group("2", "Group C"), Group("3", "Group D")]
),
],
},
})
# Add a split test module to the 'Test Unit' vertical in the course tree
split_test_1 = XBlockFixtureDesc('split_test', 'Test Content Experiment 1', metadata={'user_partition_id': 0})
split_test_1_parent_vertical = self.course_fixture.get_nested_xblocks(category="vertical")[1]
self.course_fixture.create_xblock(split_test_1_parent_vertical.locator, split_test_1)
# Add a split test module to the 'Test 2 Unit' vertical in the course tree
split_test_2 = XBlockFixtureDesc('split_test', 'Test Content Experiment 2', metadata={'user_partition_id': 456})
split_test_2_parent_vertical = self.course_fixture.get_nested_xblocks(category="vertical")[2]
self.course_fixture.create_xblock(split_test_2_parent_vertical.locator, split_test_2)
def populate_course_fixture(self, course_fixture):
"""
Populate the children of the test course fixture.
"""
course_fixture.add_advanced_settings({
u"advanced_modules": {"value": ["split_test"]},
})
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Content Section').add_children(
XBlockFixtureDesc('sequential', 'Content Subsection').add_children(
XBlockFixtureDesc('vertical', 'Content Unit').add_children(
XBlockFixtureDesc('html', 'VISIBLETOALLCONTENT', data='<html>VISIBLETOALLCONTENT</html>')
)
)
),
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
),
XBlockFixtureDesc('chapter', 'X Section').add_children(
XBlockFixtureDesc('sequential', 'X Subsection').add_children(
XBlockFixtureDesc('vertical', 'X Unit')
)
),
)
self.test_1_breadcrumb = "Test Section \xe2\x96\xb8 Test Subsection \xe2\x96\xb8 Test Unit".decode("utf-8")
self.test_2_breadcrumb = "X Section \xe2\x96\xb8 X Subsection \xe2\x96\xb8 X Unit".decode("utf-8")
def test_page_existence(self):
"""
Make sure that the page is accessible.
"""
self._auto_auth(self.USERNAME, self.EMAIL, False)
self.courseware_search_page.visit()
def test_search_for_experiment_content_user_not_assigned(self):
"""
Test user can't search for experiment content if not assigned to a group.
"""
self._auto_auth(self.USERNAME, self.EMAIL, False)
self.courseware_search_page.visit()
self.courseware_search_page.search_for_term("Group")
assert "Sorry, no results were found." in self.courseware_search_page.search_results.html[0]
def test_search_for_experiment_content_user_assigned_to_one_group(self):
"""
Test user can search for experiment content restricted to his group
when assigned to just one experiment group
"""
self._auto_auth(self.USERNAME, self.EMAIL, False)
self.courseware_search_page.visit()
self.course_navigation_page.go_to_section("Test Section", "Test Subsection")
self.courseware_search_page.search_for_term("Group")
assert "1 result" in self.courseware_search_page.search_results.html[0]
assert self.test_1_breadcrumb in self.courseware_search_page.search_results.html[0]
assert self.test_2_breadcrumb not in self.courseware_search_page.search_results.html[0]
| agpl-3.0 | -5,506,982,147,044,915,000 | -4,231,390,968,957,292,500 | 40.107143 | 120 | 0.61309 | false |
vicky2135/lucious | oscar/lib/python2.7/site-packages/django/core/management/commands/inspectdb.py | 56 | 12874 | from __future__ import unicode_literals
import keyword
import re
from collections import OrderedDict
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, connections
from django.utils.encoding import force_text
class Command(BaseCommand):
help = "Introspects the database tables in the given database and outputs a Django model module."
requires_system_checks = False
db_module = 'django.db'
def add_arguments(self, parser):
parser.add_argument(
'table', action='store', nargs='*', type=str,
help='Selects what tables or views should be introspected.',
)
parser.add_argument(
'--database', action='store', dest='database', default=DEFAULT_DB_ALIAS,
help='Nominates a database to introspect. Defaults to using the "default" database.',
)
def handle(self, **options):
try:
for line in self.handle_inspection(options):
self.stdout.write("%s\n" % line)
except NotImplementedError:
raise CommandError("Database inspection isn't supported for the currently selected database backend.")
def handle_inspection(self, options):
connection = connections[options['database']]
# 'table_name_filter' is a stealth option
table_name_filter = options.get('table_name_filter')
def table2model(table_name):
return re.sub(r'[^a-zA-Z0-9]', '', table_name.title())
def strip_prefix(s):
return s[1:] if s.startswith("u'") else s
with connection.cursor() as cursor:
yield "# This is an auto-generated Django model module."
yield "# You'll have to do the following manually to clean this up:"
yield "# * Rearrange models' order"
yield "# * Make sure each model has one field with primary_key=True"
yield "# * Make sure each ForeignKey has `on_delete` set to the desired behavior."
yield (
"# * Remove `managed = False` lines if you wish to allow "
"Django to create, modify, and delete the table"
)
yield "# Feel free to rename the models, but don't rename db_table values or field names."
yield "from __future__ import unicode_literals"
yield ''
yield 'from %s import models' % self.db_module
known_models = []
tables_to_introspect = options['table'] or connection.introspection.table_names(cursor)
for table_name in tables_to_introspect:
if table_name_filter is not None and callable(table_name_filter):
if not table_name_filter(table_name):
continue
try:
try:
relations = connection.introspection.get_relations(cursor, table_name)
except NotImplementedError:
relations = {}
try:
indexes = connection.introspection.get_indexes(cursor, table_name)
except NotImplementedError:
indexes = {}
try:
constraints = connection.introspection.get_constraints(cursor, table_name)
except NotImplementedError:
constraints = {}
table_description = connection.introspection.get_table_description(cursor, table_name)
except Exception as e:
yield "# Unable to inspect table '%s'" % table_name
yield "# The error was: %s" % force_text(e)
continue
yield ''
yield ''
yield 'class %s(models.Model):' % table2model(table_name)
known_models.append(table2model(table_name))
used_column_names = [] # Holds column names used in the table so far
column_to_field_name = {} # Maps column names to names of model fields
for row in table_description:
comment_notes = [] # Holds Field notes, to be displayed in a Python comment.
extra_params = OrderedDict() # Holds Field parameters such as 'db_column'.
column_name = row[0]
is_relation = column_name in relations
att_name, params, notes = self.normalize_col_name(
column_name, used_column_names, is_relation)
extra_params.update(params)
comment_notes.extend(notes)
used_column_names.append(att_name)
column_to_field_name[column_name] = att_name
# Add primary_key and unique, if necessary.
if column_name in indexes:
if indexes[column_name]['primary_key']:
extra_params['primary_key'] = True
elif indexes[column_name]['unique']:
extra_params['unique'] = True
if is_relation:
rel_to = (
"self" if relations[column_name][1] == table_name
else table2model(relations[column_name][1])
)
if rel_to in known_models:
field_type = 'ForeignKey(%s' % rel_to
else:
field_type = "ForeignKey('%s'" % rel_to
else:
# Calling `get_field_type` to get the field type string and any
# additional parameters and notes.
field_type, field_params, field_notes = self.get_field_type(connection, table_name, row)
extra_params.update(field_params)
comment_notes.extend(field_notes)
field_type += '('
# Don't output 'id = meta.AutoField(primary_key=True)', because
# that's assumed if it doesn't exist.
if att_name == 'id' and extra_params == {'primary_key': True}:
if field_type == 'AutoField(':
continue
elif field_type == 'IntegerField(' and not connection.features.can_introspect_autofield:
comment_notes.append('AutoField?')
# Add 'null' and 'blank', if the 'null_ok' flag was present in the
# table description.
if row[6]: # If it's NULL...
if field_type == 'BooleanField(':
field_type = 'NullBooleanField('
else:
extra_params['blank'] = True
extra_params['null'] = True
field_desc = '%s = %s%s' % (
att_name,
# Custom fields will have a dotted path
'' if '.' in field_type else 'models.',
field_type,
)
if field_type.startswith('ForeignKey('):
field_desc += ', models.DO_NOTHING'
if extra_params:
if not field_desc.endswith('('):
field_desc += ', '
field_desc += ', '.join(
'%s=%s' % (k, strip_prefix(repr(v)))
for k, v in extra_params.items())
field_desc += ')'
if comment_notes:
field_desc += ' # ' + ' '.join(comment_notes)
yield ' %s' % field_desc
for meta_line in self.get_meta(table_name, constraints, column_to_field_name):
yield meta_line
def normalize_col_name(self, col_name, used_column_names, is_relation):
"""
Modify the column name to make it Python-compatible as a field name
"""
field_params = {}
field_notes = []
new_name = col_name.lower()
if new_name != col_name:
field_notes.append('Field name made lowercase.')
if is_relation:
if new_name.endswith('_id'):
new_name = new_name[:-3]
else:
field_params['db_column'] = col_name
new_name, num_repl = re.subn(r'\W', '_', new_name)
if num_repl > 0:
field_notes.append('Field renamed to remove unsuitable characters.')
if new_name.find('__') >= 0:
while new_name.find('__') >= 0:
new_name = new_name.replace('__', '_')
if col_name.lower().find('__') >= 0:
# Only add the comment if the double underscore was in the original name
field_notes.append("Field renamed because it contained more than one '_' in a row.")
if new_name.startswith('_'):
new_name = 'field%s' % new_name
field_notes.append("Field renamed because it started with '_'.")
if new_name.endswith('_'):
new_name = '%sfield' % new_name
field_notes.append("Field renamed because it ended with '_'.")
if keyword.iskeyword(new_name):
new_name += '_field'
field_notes.append('Field renamed because it was a Python reserved word.')
if new_name[0].isdigit():
new_name = 'number_%s' % new_name
field_notes.append("Field renamed because it wasn't a valid Python identifier.")
if new_name in used_column_names:
num = 0
while '%s_%d' % (new_name, num) in used_column_names:
num += 1
new_name = '%s_%d' % (new_name, num)
field_notes.append('Field renamed because of name conflict.')
if col_name != new_name and field_notes:
field_params['db_column'] = col_name
return new_name, field_params, field_notes
def get_field_type(self, connection, table_name, row):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
"""
field_params = OrderedDict()
field_notes = []
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for data_types_reverse to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = int(row[3])
if field_type == 'DecimalField':
if row[4] is None or row[5] is None:
field_notes.append(
'max_digits and decimal_places have been guessed, as this '
'database handles decimal fields as float')
field_params['max_digits'] = row[4] if row[4] is not None else 10
field_params['decimal_places'] = row[5] if row[5] is not None else 5
else:
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
return field_type, field_params, field_notes
def get_meta(self, table_name, constraints, column_to_field_name):
"""
Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model corresponding
to the given database table name.
"""
unique_together = []
for index, params in constraints.items():
if params['unique']:
columns = params['columns']
if len(columns) > 1:
# we do not want to include the u"" or u'' prefix
# so we build the string rather than interpolate the tuple
tup = '(' + ', '.join("'%s'" % column_to_field_name[c] for c in columns) + ')'
unique_together.append(tup)
meta = ["",
" class Meta:",
" managed = False",
" db_table = '%s'" % table_name]
if unique_together:
tup = '(' + ', '.join(unique_together) + ',)'
meta += [" unique_together = %s" % tup]
return meta
| bsd-3-clause | 335,556,738,244,778,560 | -2,182,118,872,389,163,500 | 43.857143 | 114 | 0.515846 | false |
PabloCastellano/nodeshot | nodeshot/networking/net/models/choices.py | 8 | 4580 | from django.utils.translation import ugettext_lazy as _
from nodeshot.core.base.utils import choicify
ROUTING_PROTOCOLS = (
('olsr', 'OLSR'),
('batman', 'B.A.T.M.A.N.'),
('batman-adv', 'B.A.T.M.A.N. advanced'),
('bmx', 'BMX (Batman Experimental)'),
('babel', 'Babel'),
('802.11s', 'Open 802.11s'),
('bgp', 'BGP'),
('ospf', 'OSPF'),
('static', _('Static Routing')),
)
DEVICE_TYPES = {
'adsl': 'ADSL',
'battery': 'battery',
'breaker': 'breaker',
'cam': 'cam',
'cloudy': 'cloudy',
'confine': 'confine',
'fomconv': 'fomconv',
'generator': 'generator',
'generic': 'generic',
'mobile': 'mobile',
'nat': 'nat',
'olt': 'olt',
'onu': 'onu',
'other': 'other',
'phone': 'phone',
'ppanel': 'ppanel',
'rack': 'rack',
'radio device': 'radio',
'router': 'router',
'sensor': 'sensor',
'server': 'server',
'solar': 'solar',
'splitter': 'splitter',
'switch managed': 'switch',
'torpedo': 'torpedo',
'ups': 'ups',
}
DEVICE_TYPES_CHOICES = choicify(DEVICE_TYPES)
DEVICE_STATUS = {
'not_reachable': 0, # device is not reachable
'reachable': 1, # device is reachable
'unknown': 2, # device has not been seen by the system yet
'inactive': 3, # manually deactivated by user or admin
}
DEVICE_STATUS_CHOICES = choicify(DEVICE_STATUS)
WIRELESS_MODE = (
('sta', _('station')),
('ap', _('access point')),
('adhoc', _('adhoc')),
('monitor', _('monitor')),
('mesh', _('mesh')),
)
WIRELESS_CHANNEL = (
('2412', '2.4Ghz Ch 1 (2412 Mhz)'),
('2417', '2.4Ghz Ch 2 (2417 Mhz)'),
('2422', '2.4Ghz Ch 3 (2422 Mhz)'),
('2427', '2.4Ghz Ch 4 (2427 Mhz)'),
('2427', '2.4Ghz Ch 5 (2432 Mhz)'),
('2437', '2.4Ghz Ch 6 (2437 Mhz)'),
('2442', '2.4Ghz Ch 7 (2442 Mhz)'),
('2447', '2.4Ghz Ch 8 (2447 Mhz)'),
('2452', '2.4Ghz Ch 9 (2452 Mhz)'),
('2457', '2.4Ghz Ch 10 (2457 Mhz)'),
('2462', '2.4Ghz Ch 11 (2462 Mhz)'),
('2467', '2.4Ghz Ch 12 (2467 Mhz)'),
('2472', '2.4Ghz Ch 13 (2472 Mhz)'),
('2484', '2.4Ghz Ch 14 (2484 Mhz)'),
('4915', '5Ghz Ch 183 (4915 Mhz)'),
('4920', '5Ghz Ch 184 (4920 Mhz)'),
('4925', '5Ghz Ch 185 (4925 Mhz)'),
('4935', '5Ghz Ch 187 (4935 Mhz)'),
('4940', '5Ghz Ch 188 (4940 Mhz)'),
('4945', '5Ghz Ch 189 (4945 Mhz)'),
('4960', '5Ghz Ch 192 (4960 Mhz)'),
('4980', '5Ghz Ch 196 (4980 Mhz)'),
('5035', '5Ghz Ch 7 (5035 Mhz)'),
('5040', '5Ghz Ch 8 (5040 Mhz)'),
('5045', '5Ghz Ch 9 (5045 Mhz)'),
('5055', '5Ghz Ch 11 (5055 Mhz)'),
('5060', '5Ghz Ch 12 (5060 Mhz)'),
('5080', '5Ghz Ch 16 (5080 Mhz)'),
('5170', '5Ghz Ch 34 (5170 Mhz)'),
('5180', '5Ghz Ch 36 (5180 Mhz)'),
('5190', '5Ghz Ch 38 (5190 Mhz)'),
('5200', '5Ghz Ch 40 (5200 Mhz)'),
('5210', '5Ghz Ch 42 (5210 Mhz)'),
('5220', '5Ghz Ch 44 (5220 Mhz)'),
('5230', '5Ghz Ch 46 (5230 Mhz)'),
('5240', '5Ghz Ch 48 (5240 Mhz)'),
('5260', '5Ghz Ch 52 (5260 Mhz)'),
('5280', '5Ghz Ch 56 (5280 Mhz)'),
('5300', '5Ghz Ch 60 (5300 Mhz)'),
('5320', '5Ghz Ch 64 (5320 Mhz)'),
('5500', '5Ghz Ch 100 (5500 Mhz)'),
('5520', '5Ghz Ch 104 (5520 Mhz)'),
('5540', '5Ghz Ch 108 (5540 Mhz)'),
('5560', '5Ghz Ch 112 (5560 Mhz)'),
('5580', '5Ghz Ch 116 (5580 Mhz)'),
('5600', '5Ghz Ch 120 (5600 Mhz)'),
('5620', '5Ghz Ch 124 (5620 Mhz)'),
('5640', '5Ghz Ch 128 (5640 Mhz)'),
('5660', '5Ghz Ch 132 (5660 Mhz)'),
('5680', '5Ghz Ch 136 (5680 Mhz)'),
('5700', '5Ghz Ch 140 (5700 Mhz)'),
('5745', '5Ghz Ch 149 (5745 Mhz)'),
('5765', '5Ghz Ch 153 (5765 Mhz)'),
('5785', '5Ghz Ch 157 (5785 Mhz)'),
('5805', '5Ghz Ch 161 (5805 Mhz)'),
('5825', '5Ghz Ch 165 (5825 Mhz)')
)
INTERFACE_TYPES = {
'loopback': 0,
'ethernet': 1,
'wireless': 2,
'bridge': 3,
'virtual': 4,
'tunnel': 5,
'vlan': 6,
}
INTERFACE_TYPE_CHOICES = choicify(INTERFACE_TYPES)
WIRELESS_STANDARDS = (
('802.11a', '802.11a'),
('802.11b', '802.11b'),
('802.11g', '802.11g'),
('802.11n', '802.11n'),
('802.11s', '802.11s'),
('802.11ad', '802.11ac'),
('802.11ac', '802.11ad'),
)
ETHERNET_STANDARDS = (
('legacy', 'Legacy Ethernet'),
('fast', '10/100 Fast Ethernet'),
('gigabit', '10/100/1000 Gigabit Ethernet'),
('basefx', '100/1000 BaseFX (Fiber)'),
)
DUPLEX_CHOICES = (
('full', 'full-duplex'),
('half', 'half-duplex')
)
IP_PROTOCOLS = (
('ipv4', 'ipv4'),
('ipv6', 'ipv6')
)
| gpl-3.0 | -113,404,479,699,354,780 | 7,464,306,174,688,703,000 | 27.987342 | 72 | 0.51048 | false |
shaananc/security-proj2 | bindings/python/topsort.py | 218 | 13288 | # topsort - dependency (topological) sorting and cycle finding functions
# Copyright (C) 2007 RADLogic
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# See http://www.fsf.org/licensing/licenses/lgpl.txt for full license text.
"""Provide toplogical sorting (i.e. dependency sorting) functions.
The topsort function is based on code posted on Usenet by Tim Peters.
Modifications:
- added doctests
- changed some bits to use current Python idioms
(listcomp instead of filter, +=/-=, inherit from Exception)
- added a topsort_levels version that ports items in each dependency level
into a sub-list
- added find_cycles to aid in cycle debugging
Run this module directly to run the doctests (unittests).
Make sure they all pass before checking in any modifications.
Requires Python >= 2.2
(For Python 2.2 also requires separate sets.py module)
This requires the rad_util.py module.
"""
# Provide support for Python 2.2*
from __future__ import generators
__version__ = '$Revision: 0.9 $'
__date__ = '$Date: 2007/03/27 04:15:26 $'
__credits__ = '''Tim Peters -- original topsort code
Tim Wegener -- doctesting, updating to current idioms, topsort_levels,
find_cycles
'''
# Make Python 2.3 sets look like Python 2.4 sets.
try:
set
except NameError:
from sets import Set as set
from rad_util import is_rotated
class CycleError(Exception):
"""Cycle Error"""
pass
def topsort(pairlist):
"""Topologically sort a list of (parent, child) pairs.
Return a list of the elements in dependency order (parent to child order).
>>> print topsort( [(1,2), (3,4), (5,6), (1,3), (1,5), (1,6), (2,5)] )
[1, 2, 3, 5, 4, 6]
>>> print topsort( [(1,2), (1,3), (2,4), (3,4), (5,6), (4,5)] )
[1, 2, 3, 4, 5, 6]
>>> print topsort( [(1,2), (2,3), (3,2)] )
Traceback (most recent call last):
CycleError: ([1], {2: 1, 3: 1}, {2: [3], 3: [2]})
"""
num_parents = {} # element -> # of predecessors
children = {} # element -> list of successors
for parent, child in pairlist:
# Make sure every element is a key in num_parents.
if not num_parents.has_key( parent ):
num_parents[parent] = 0
if not num_parents.has_key( child ):
num_parents[child] = 0
# Since child has a parent, increment child's num_parents count.
num_parents[child] += 1
# ... and parent gains a child.
children.setdefault(parent, []).append(child)
# Suck up everything without a parent.
answer = [x for x in num_parents.keys() if num_parents[x] == 0]
# For everything in answer, knock down the parent count on its children.
# Note that answer grows *in* the loop.
for parent in answer:
del num_parents[parent]
if children.has_key( parent ):
for child in children[parent]:
num_parents[child] -= 1
if num_parents[child] == 0:
answer.append( child )
# Following "del" isn't needed; just makes
# CycleError details easier to grasp.
del children[parent]
if num_parents:
# Everything in num_parents has at least one child ->
# there's a cycle.
raise CycleError(answer, num_parents, children)
return answer
def topsort_levels(pairlist):
"""Topologically sort a list of (parent, child) pairs into depth levels.
This returns a generator.
Turn this into a an iterator using the iter built-in function.
(if you iterate over the iterator, each element gets generated when
it is asked for, rather than generating the whole list up-front.)
Each generated element is a list of items at that dependency level.
>>> dependency_pairs = [(1,2), (3,4), (5,6), (1,3), (1,5), (1,6), (2,5)]
>>> for level in iter(topsort_levels( dependency_pairs )):
... print level
[1]
[2, 3]
[4, 5]
[6]
>>> dependency_pairs = [(1,2), (1,3), (2,4), (3,4), (5,6), (4,5)]
>>> for level in iter(topsort_levels( dependency_pairs )):
... print level
[1]
[2, 3]
[4]
[5]
[6]
>>> dependency_pairs = [(1,2), (2,3), (3,4), (4, 3)]
>>> try:
... for level in iter(topsort_levels( dependency_pairs )):
... print level
... except CycleError, exc:
... print 'CycleError:', exc
[1]
[2]
CycleError: ({3: 1, 4: 1}, {3: [4], 4: [3]})
The cycle error should look like.
CycleError: ({3: 1, 4: 1}, {3: [4], 4: [3]})
# todo: Make the doctest more robust (i.e. handle arbitrary dict order).
"""
num_parents = {} # element -> # of predecessors
children = {} # element -> list of successors
for parent, child in pairlist:
# Make sure every element is a key in num_parents.
if not num_parents.has_key( parent ):
num_parents[parent] = 0
if not num_parents.has_key( child ):
num_parents[child] = 0
# Since child has a parent, increment child's num_parents count.
num_parents[child] += 1
# ... and parent gains a child.
children.setdefault(parent, []).append(child)
return topsort_levels_core(num_parents, children)
def topsort_levels_core(num_parents, children):
"""Topologically sort a bunch of interdependent items based on dependency.
This returns a generator.
Turn this into a an iterator using the iter built-in function.
(if you iterate over the iterator, each element gets generated when
it is asked for, rather than generating the whole list up-front.)
Each generated element is a list of items at that dependency level.
>>> list(topsort_levels_core(
... {1: 0, 2: 1, 3: 1, 4: 1, 5: 2, 6: 2},
... {1: [2, 3, 5, 6], 2: [5], 3: [4], 4: [], 5: [6]}))
[[1], [2, 3], [4, 5], [6]]
>>> list(topsort_levels_core(
... {1: 0, 2: 2, 3: 1},
... {1: [2], 2: [3], 3: [2]}))
Traceback (most recent call last):
CycleError: ({2: 1, 3: 1}, {2: [3], 3: [2]})
This function has a more complicated interface than topsort_levels,
but is useful if the data is easier to generate in this form.
Arguments:
num_parents -- key: item, value: number of parents (predecessors)
children -- key: item, value: list of children (successors)
"""
while 1:
# Suck up everything without a predecessor.
level_parents = [x for x in num_parents.keys() if num_parents[x] == 0]
if not level_parents:
break
# Offer the next generated item,
# which is a list of the items at this dependency level.
yield level_parents
# For everything item in this level,
# decrement the parent count,
# since we have accounted for its parent.
for level_parent in level_parents:
del num_parents[level_parent]
if children.has_key(level_parent):
for level_parent_child in children[level_parent]:
num_parents[level_parent_child] -= 1
del children[level_parent]
if num_parents:
# Everything in num_parents has at least one child ->
# there's a cycle.
raise CycleError(num_parents, children)
else:
# This is the end of the generator.
raise StopIteration
def find_cycles(parent_children):
"""Yield cycles. Each result is a list of items comprising a cycle.
Use a 'stack' based approach to find all the cycles.
This is a generator, so yields each cycle as it finds it.
It is implicit that the last item in each cycle list is a parent of the
first item (thereby forming a cycle).
Arguments:
parent_children -- parent -> collection of children
Simplest cycle:
>>> cycles = list(find_cycles({'A': ['B'], 'B': ['A']}))
>>> len(cycles)
1
>>> cycle = cycles[0]
>>> cycle.sort()
>>> print cycle
['A', 'B']
Simplest cycle with extra baggage at the start and the end:
>>> cycles = list(find_cycles(parent_children={'A': ['B'],
... 'B': ['C'],
... 'C': ['B', 'D'],
... 'D': [],
... }))
>>> len(cycles)
1
>>> cycle = cycles[0]
>>> cycle.sort()
>>> print cycle
['B', 'C']
Double cycle:
>>> cycles = list(find_cycles(parent_children={'A': ['B'],
... 'B': ['C1', 'C2'],
... 'C1': ['D1'],
... 'D1': ['E1'],
... 'E1': ['D1'],
... 'C2': ['D2'],
... 'D2': ['E2'],
... 'E2': ['D2'],
... }))
>>> len(cycles)
2
>>> for cycle in cycles:
... cycle.sort()
>>> cycles.sort()
>>> cycle1 = cycles[0]
>>> cycle1.sort()
>>> print cycle1
['D1', 'E1']
>>> cycle2 = cycles[1]
>>> cycle2.sort()
>>> print cycle2
['D2', 'E2']
Simple cycle with children not specified for one item:
# todo: Should this barf instead?
>>> cycles = list(find_cycles(parent_children={'A': ['B'],
... 'B': ['A'],
... 'C': ['D']}))
>>> len(cycles)
1
>>> cycle = cycles[0]
>>> cycle.sort()
>>> print cycle
['A', 'B']
Diamond cycle
>>> cycles = list(find_cycles(parent_children={'A': ['B1', 'B2'],
... 'B1': ['C'],
... 'B2': ['C'],
... 'C': ['A', 'B1']}))
>>> len(cycles)
3
>>> sorted_cycles = []
>>> for cycle in cycles:
... cycle = list(cycle)
... cycle.sort()
... sorted_cycles.append(cycle)
>>> sorted_cycles.sort()
>>> for cycle in sorted_cycles:
... print cycle
['A', 'B1', 'C']
['A', 'B2', 'C']
['B1', 'C']
Hairy case (order can matter if something is wrong):
(Note order of B and C in the list.)
>>> cycles = list(find_cycles(parent_children={
... 'TD': ['DD'],
... 'TC': ['DC'],
... 'DC': ['DQ'],
... 'C': ['DQ'],
... 'DQ': ['IA', 'TO'],
... 'IA': ['A'],
... 'A': ['B', 'C'],
... }))
>>> len(cycles)
1
>>> cycle = cycles[0]
>>> cycle.sort()
>>> print cycle
['A', 'C', 'DQ', 'IA']
"""
cycles = []
visited_nodes = set()
for parent in parent_children:
if parent in visited_nodes:
# This node is part of a path that has already been traversed.
continue
paths = [[parent]]
while paths:
path = paths.pop()
parent = path[-1]
try:
children = parent_children[parent]
except KeyError:
continue
for child in children:
# Keeping a set of the path nodes, for O(1) lookups at the
# expense of more memory and complexity, actually makes speed
# worse. (Due to construction of sets.)
# This is O(N).
if child in path:
# This is a cycle.
cycle = path[path.index(child):]
# Check that this is not a dup cycle.
is_dup = False
for other_cycle in cycles:
if is_rotated(other_cycle, cycle):
is_dup = True
break
if not is_dup:
cycles.append(cycle)
yield cycle
else:
# Push this new path onto the 'stack'.
# This is probably the most expensive part of the algorithm
# (a list copy).
paths.append(path + [child])
# Mark the node as visited.
visited_nodes.add(child)
if __name__ == '__main__':
# Run the doctest tests.
import sys
import doctest
doctest.testmod(sys.modules['__main__'])
| gpl-2.0 | 7,121,401,923,031,350,000 | -79,654,178,030,625,120 | 32.897959 | 79 | 0.508805 | false |
rinatkosny/onedrive-d-old | onedrive_d/od_sqlite.py | 7 | 10052 | #!/usr/bin/python3
"""
Database manipulator classes for onedrive-d.
"""
import os
import threading
import sqlite3
from . import od_glob
class TaskManager:
"""
Task manager abstracts the task queue implemented in SQLite to better
control concurrency.
task status: 0 (added), 1 (fetched), 2 (done, deletable)
task types:
for dirs: sy (sync), rm (remove), mk (mkdir on server, postwork=[sy]), tr (move local to trash).
for files: af (analyze file), up (upload), dl (download, postwork=[add_row]),
mv (move), rf (remove), cp (copy).
"""
# this semaphore counts the number of tasks in the table
task_counter = threading.Semaphore(0)
# logger
logger = od_glob.get_logger()
# mutex lock
lock = threading.Lock()
db = None
def __init__(self):
# enable auto-commit by setting isolation_level
self.acquire_lock()
if TaskManager.db is None:
TaskManager.db = sqlite3.connect(
':memory:', isolation_level=None, check_same_thread=False)
TaskManager.db.execute("""
CREATE TABLE tasks
(type TEXT, local_path TEXT, remote_id TEXT, remote_parent_id TEXT,
status INT DEFAULT 0, args TEXT, extra_info TEXT,
UNIQUE(local_path) ON CONFLICT ABORT)
""")
self.release_lock()
self.cursor = TaskManager.db.cursor()
def close(self):
self.acquire_lock()
if TaskManager.db is not None:
TaskManager.db.commit()
TaskManager.db.close()
TaskManager.db = None
self.release_lock()
def acquire_lock(self):
TaskManager.lock.acquire()
def release_lock(self):
TaskManager.lock.release()
def dec_sem(self):
TaskManager.task_counter.acquire()
# self.logger.debug('decremented semaphore.')
def inc_sem(self):
TaskManager.task_counter.release()
# self.logger.debug('incremented semaphore.')
def add_task(self, type, local_path, remote_id='', remote_parent_id='', status=0, args='', extra_info=''):
# print(type + ' ' + local_path)
task_added = False
self.acquire_lock()
try:
# delete old pending tasks
self.cursor.execute(
'DELETE FROM tasks WHERE local_path=? AND status=0', (local_path, ))
# add new task
self.cursor.execute('INSERT INTO tasks (type, local_path, remote_id, remote_parent_id, status, args, extra_info) VALUES (?,?,?,?,?,?,?)',
(type, local_path, remote_id, remote_parent_id, status, args, extra_info)
)
self.logger.debug('added task "%s" "%s".', type, local_path)
task_added = True
except sqlite3.IntegrityError:
self.logger.debug('failed to add task "%s" "%s".', type, local_path)
self.release_lock()
if task_added:
self.inc_sem()
def get_task(self):
self.acquire_lock()
self.cursor.execute(
'SELECT rowid, type, local_path, remote_id, remote_parent_id, status, args, extra_info FROM tasks WHERE status=0 ORDER BY rowid ASC LIMIT 1')
row = self.cursor.fetchone()
if row is None:
self.release_lock()
return None
self.cursor.execute('UPDATE tasks SET status=1 WHERE rowid=?', (row[0], ))
self.release_lock()
data = {
'task_id': row[0],
'type': row[1],
'local_path': row[2],
'remote_id': row[3],
'remote_parent_id': row[4],
'status': row[5],
'args': row[6],
'extra_info': row[7]
}
return data
def del_task(self, task_id):
self.acquire_lock()
self.cursor.execute('DELETE FROM tasks WHERE rowid=?', (task_id, ))
self.release_lock()
def clean_tasks(self):
self.acquire_lock()
self.cursor.execute('DELETE FROM tasks')
self.release_lock()
def dump(self):
self.acquire_lock()
ret = TaskManager.db.iterdump()
self.release_lock()
return ret
class EntryManager:
lock = threading.Lock()
db_name = 'entries.db'
db_initialized = False
logger = od_glob.get_logger()
def __init__(self):
config = od_glob.get_config_instance()
self.conn = sqlite3.connect(
config.APP_CONF_PATH + '/' + EntryManager.db_name, isolation_level=None)
self.cursor = self.conn.cursor()
self.acquire_lock()
if not EntryManager.db_initialized:
self.cursor.execute("""
CREATE TABLE IF NOT EXISTS entries
(parent_path TEXT, name TEXT, isdir INT, remote_id TEXT UNIQUE PRIMARY KEY,
remote_parent_id TEXT PRIMARY_KEY, size INT, client_updated_time TEXT, status TEXT, visited INT,
UNIQUE(parent_path, name) ON CONFLICT REPLACE)
""")
self.cursor.execute('UPDATE entries SET visited=0')
self.conn.commit()
EntryManager.db_initialized = True
self.release_lock()
def close(self):
self.acquire_lock()
self.conn.commit()
self.conn.close()
self.release_lock()
def acquire_lock(self):
EntryManager.lock.acquire()
def release_lock(self):
EntryManager.lock.release()
def update_entry(self, local_path, obj):
"""
Update an entry row in entries database.
@param local_path: path to the local entry (MUST exist).
@param obj: REST object returned by API.
"""
# print(obj)
path, basename = os.path.split(local_path)
isdir = os.path.isdir(local_path)
if 'size' in obj:
size = obj['size']
else:
size = 0
self.acquire_lock()
self.cursor.execute(
'INSERT OR REPLACE INTO entries (parent_path, name, isdir, remote_id, remote_parent_id, size, client_updated_time, status, visited) VALUES (?,?,?,?,?,?,?,?,1)',
(path, basename, isdir, obj['id'], obj['parent_id'], size, obj['client_updated_time'], ''))
self.release_lock()
def update_local_path(self, old_path, new_path):
path, basename = os.path.split(old_path)
new_path, new_basename = os.path.split(new_path)
self.acquire_lock()
self.cursor.execute('UPDATE entries SET parent_path=?, name=? WHERE parent_path=? AND name=?',
(new_path, new_basename, path, basename))
self.release_lock()
def _calc_sql_expr(self, isdir, local_path, remote_id):
if local_path == '':
where = 'remote_id=?'
cond = (isdir, remote_id)
else:
path, basename = os.path.split(local_path)
if remote_id is None:
where = 'parent_path=? AND name=?'
cond = (isdir, path, basename)
else:
where = 'parent_path=? AND name=? AND remote_id=?'
cond = (isdir, path, basename, remote_id)
return (where, cond)
def get_entry(self, isdir, local_path='', remote_id=None):
"""
At least one of local_path and remote_id should be given.
"""
where, cond = self._calc_sql_expr(isdir, local_path, remote_id)
self.acquire_lock()
self.cursor.execute('SELECT rowid, parent_path, name, isdir, remote_id, remote_parent_id, size, client_updated_time, status FROM entries WHERE isdir=? AND ' + where,
cond)
row = self.cursor.fetchone()
if row is not None:
self.cursor.execute(
'UPDATE entries SET visited=1 WHERE rowid=?', (row[0], ))
self.release_lock()
if row is not None:
# faster than dict(zip(k, v))
row = {
'entry_id': row[0],
'parent_path': row[1],
'name': row[2],
'isdir': row[3],
'remote_id': row[4],
'remote_parent_id': row[5],
'size': row[6],
'client_updated_time': row[7],
'status': row[8]
}
return row
def update_moved_entry_if_exists(self, isdir, local_path, new_parent):
"""
The criteria is actually dangerous, even limiting it to entries of same name.
"""
try:
local_mtime = od_glob.time_to_str(
od_glob.timestamp_to_time(os.path.getmtime(local_path)))
if not isdir:
local_fsize = os.path.getsize(local_path)
else:
local_fsize = 0
except OSError as e:
self.logger.error(e)
return None
parent_path, basename = os.path.split(local_path)
self.acquire_lock()
count = self.cursor.execute('UPDATE entries SET parent_path=?, name=?, status="MOVED_TO", remote_parent_id=? WHERE status="MOVED_FROM" AND client_updated_time=? AND size=? AND isdir=? AND name=? LIMIT 1',
(parent_path, basename, new_parent, local_mtime, local_fsize, isdir, basename)).rowcount
self.release_lock()
return count == 1
def update_status_if_exists(self, isdir, local_path='', remote_id=None, status=''):
"""
Better not get_entry then update because we want atomicity.
"""
where, cond = self._calc_sql_expr(isdir, local_path, remote_id)
self.acquire_lock()
self.cursor.execute('UPDATE entries SET status=? WHERE isdir=? AND ' + where,
(status, ) + cond)
self.release_lock()
def update_parent_path_by_parent_id(self, parent_path, remote_parent_id):
self.acquire_lock()
self.cursor.execute(
'SELECT parent_path FROM entries WHERE remote_parent_id=? LIMIT 1', (remote_parent_id, ))
row = self.cursor.fetchone()
if row is not None:
# if there is no immediate child, then there is no indirect child
# update parent path for indirect children
self.cursor.execute('UPDATE entries SET parent_path=replace(parent_path, ?, ?) WHERE parent_path LIKE ?',
row[0] + '/', parent_path + '/', row[0] + '/%')
# update parent path for remote_parent_id's immediate children
self.cursor.execute(
'UPDATE entries SET parent_path=? WHERE remote_parent_id=?', (parent_path, remote_parent_id))
self.release_lock()
def del_unvisited_entries(self):
self.acquire_lock()
self.cursor.execute('DELETE FROM entries WHERE visited=0')
self.release_lock()
def del_entry_by_remote_id(self, remote_id):
self.acquire_lock()
self.cursor.execute('DELETE FROM entries WHERE remote_id=?', (remote_id, ))
self.release_lock()
def del_entry_by_path(self, local_path):
path, basename = os.path.split(local_path)
self.acquire_lock()
self.cursor.execute(
'DELETE FROM entries WHERE parent_path=? AND name=?', (path, basename))
self.release_lock()
def del_entry_by_parent(self, parent_path=None, remote_parent_id=None):
self.acquire_lock()
if remote_parent_id is not None:
# this one does not simulate recursive deletion
self.cursor.execute(
'DELETE FROM entries WHERE remote_parent_id=?', (remote_parent_id, ))
if parent_path is not None:
# this one simulates recursive deletion
path, basename = os.path.split(parent_path)
self.cursor.execute(
'DELETE FROM entries WHERE parent_path LIKE ? OR parent_path=?', (parent_path + '/%', parent_path))
self.cursor.execute(
'DELETE FROM entries WHERE parent_path=? AND name=?', (path, basename))
self.release_lock()
| lgpl-3.0 | -5,488,743,539,056,038,000 | 9,001,538,105,137,737,000 | 30.911111 | 206 | 0.674791 | false |
Savaged-Zen/Savaged-Zen-Speedy | tools/perf/scripts/python/check-perf-trace.py | 948 | 2501 | # perf trace event handlers, generated by perf trace -g python
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 | -1,595,655,514,356,993,500 | 4,254,878,797,064,072,700 | 29.5 | 78 | 0.642543 | false |
arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/django/contrib/gis/db/backends/mysql/introspection.py | 700 | 1771 | from MySQLdb.constants import FIELD_TYPE
from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.mysql.introspection import DatabaseIntrospection
class MySQLIntrospection(DatabaseIntrospection):
# Updating the data_types_reverse dictionary with the appropriate
# type for Geometry fields.
data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
data_types_reverse[FIELD_TYPE.GEOMETRY] = 'GeometryField'
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# In order to get the specific geometry type of the field,
# we introspect on the table definition using `DESCRIBE`.
cursor.execute('DESCRIBE %s' %
self.connection.ops.quote_name(table_name))
# Increment over description info until we get to the geometry
# column.
for column, typ, null, key, default, extra in cursor.fetchall():
if column == geo_col:
# Using OGRGeomType to convert from OGC name to Django field.
# MySQL does not support 3D or SRIDs, so the field params
# are empty.
field_type = OGRGeomType(typ).django
field_params = {}
break
finally:
cursor.close()
return field_type, field_params
def supports_spatial_index(self, cursor, table_name):
# Supported with MyISAM, or InnoDB on MySQL 5.7.5+
storage_engine = self.get_storage_engine(cursor, table_name)
return (
(storage_engine == 'InnoDB' and self.connection.mysql_version >= (5, 7, 5)) or
storage_engine == 'MyISAM'
)
| mit | -5,188,843,610,677,354,000 | -768,514,128,085,719,400 | 42.195122 | 90 | 0.615471 | false |
schwarz/youtube-dl | youtube_dl/extractor/dreisat.py | 107 | 3259 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
unified_strdate,
)
class DreiSatIE(InfoExtractor):
IE_NAME = '3sat'
_VALID_URL = r'(?:http://)?(?:www\.)?3sat\.de/mediathek/(?:index\.php|mediathek\.php)?\?(?:(?:mode|display)=[^&]+&)*obj=(?P<id>[0-9]+)$'
_TESTS = [
{
'url': 'http://www.3sat.de/mediathek/index.php?mode=play&obj=45918',
'md5': 'be37228896d30a88f315b638900a026e',
'info_dict': {
'id': '45918',
'ext': 'mp4',
'title': 'Waidmannsheil',
'description': 'md5:cce00ca1d70e21425e72c86a98a56817',
'uploader': '3sat',
'upload_date': '20140913'
}
},
{
'url': 'http://www.3sat.de/mediathek/mediathek.php?mode=play&obj=51066',
'only_matching': True,
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
details_url = 'http://www.3sat.de/mediathek/xmlservice/web/beitragsDetails?ak=web&id=%s' % video_id
details_doc = self._download_xml(details_url, video_id, 'Downloading video details')
status_code = details_doc.find('./status/statuscode')
if status_code is not None and status_code.text != 'ok':
code = status_code.text
if code == 'notVisibleAnymore':
message = 'Video %s is not available' % video_id
else:
message = '%s returned error: %s' % (self.IE_NAME, code)
raise ExtractorError(message, expected=True)
thumbnail_els = details_doc.findall('.//teaserimage')
thumbnails = [{
'width': int(te.attrib['key'].partition('x')[0]),
'height': int(te.attrib['key'].partition('x')[2]),
'url': te.text,
} for te in thumbnail_els]
information_el = details_doc.find('.//information')
video_title = information_el.find('./title').text
video_description = information_el.find('./detail').text
details_el = details_doc.find('.//details')
video_uploader = details_el.find('./channel').text
upload_date = unified_strdate(details_el.find('./airtime').text)
format_els = details_doc.findall('.//formitaet')
formats = [{
'format_id': fe.attrib['basetype'],
'width': int(fe.find('./width').text),
'height': int(fe.find('./height').text),
'url': fe.find('./url').text,
'filesize': int(fe.find('./filesize').text),
'video_bitrate': int(fe.find('./videoBitrate').text),
} for fe in format_els
if not fe.find('./url').text.startswith('http://www.metafilegenerator.de/')]
self._sort_formats(formats)
return {
'_type': 'video',
'id': video_id,
'title': video_title,
'formats': formats,
'description': video_description,
'thumbnails': thumbnails,
'thumbnail': thumbnails[-1]['url'],
'uploader': video_uploader,
'upload_date': upload_date,
}
| unlicense | 714,306,783,977,240,300 | 537,443,338,471,717,000 | 36.45977 | 140 | 0.537588 | false |
sysbot/cloudenvy | cloudenvy/main.py | 1 | 3204 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
import argparse
import logging
import pkgutil
import string
from cloudenvy.config import EnvyConfig
import cloudenvy.commands
#TODO(bcwaldon): replace this with entry points!
def _load_commands():
"""Iterate through modules in command and import suspected command classes
This looks for a class in each module in cloudenvy.commands that has the
same name as its module with the first character uppercased. For example,
the cloudenvy.commands.up module should have a class Up within it.
"""
modlist = list(pkgutil.iter_modules(cloudenvy.commands.__path__))
#NOTE(bcwaldon): this parses out a string representation of each
# individual command module. For example, if we had a single command
# in cloudenvy.commands named 'up', this list would look like ['up]
commands = [_[1] for _ in modlist]
for command in commands:
#NOTE(bcwaldon): the __import__ statement returns a handle on the
# top-level 'cloudenvy' package, so we must iterate down through
# each sub-package to get a handle on our module
module_name = 'cloudenvy.commands.{0}'.format(command)
_cloudenvy = __import__(module_name, globals(), locals(), [], -1)
module = getattr(_cloudenvy.commands, command)
command_class = getattr(module, string.capitalize(command))
yield (command, command_class)
def _build_parser():
parser = argparse.ArgumentParser(
description='Launch a virtual machine on an OpenStack cloud.')
parser.add_argument('-v', '--verbosity', action='count',
help='Increase output verbosity.')
parser.add_argument('-c', '--cloud', action='store',
help='Specify which cloud to use.')
return parser
def _init_help_command(parser, subparser):
def find_command_help(config, args):
if args.command:
subparser.choices[args.command].print_help()
else:
parser.print_help()
help_cmd = subparser.add_parser(
'help', help='Display help information for a specfiic command.'
)
help_cmd.add_argument(
'command', action='store', nargs='?',
help='Specific command to describe.'
)
help_cmd.set_defaults(func=find_command_help)
return parser
def _init_commands(commands, parser):
_commands = {}
for (command, command_class) in commands:
_commands[command] = command_class(parser, _commands)
def main():
parser = _build_parser()
command_subparser = parser.add_subparsers(title='Available commands')
_init_help_command(parser, command_subparser)
commands = _load_commands()
_init_commands(commands, command_subparser)
args = parser.parse_args()
config = EnvyConfig(args)
if args.verbosity == 3:
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger('novaclient').setLevel(logging.DEBUG)
elif args.verbosity == 2:
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger('novaclient').setLevel(logging.INFO)
elif args.verbosity == 1:
logging.getLogger().setLevel(logging.INFO)
args.func(config, args)
| apache-2.0 | -2,645,471,006,552,786,000 | 8,820,266,058,107,143,000 | 33.085106 | 78 | 0.669476 | false |
vine/uwsgi | examples/welcome3.py | 6 | 1074 | import uwsgi
import os
def xsendfile(e, sr):
sr('200 OK', [('Content-Type', 'image/png'), ('X-Sendfile', os.path.abspath('logo_uWSGI.png'))])
return b''
def serve_logo(e, sr):
sr('200 OK', [('Content-Type', 'image/png')])
return uwsgi.sendfile('logo_uWSGI.png')
def serve_config(e, sr):
sr('200 OK', [('Content-Type', 'text/html')])
for opt in uwsgi.opt.keys():
body = "{opt} = {optvalue}<br/>".format(opt=opt, optvalue=uwsgi.opt[opt].decode('ascii'))
yield bytes(body.encode('ascii'))
routes = {}
routes['/xsendfile'] = xsendfile
routes['/logo'] = serve_logo
routes['/config'] = serve_config
def application(env, start_response):
if env['PATH_INFO'] in routes:
return routes[env['PATH_INFO']](env, start_response)
start_response('200 OK', [('Content-Type', 'text/html')])
body = """
<img src="/logo"/> version {version}<br/>
<hr size="1"/>
Configuration<br/>
<iframe src="/config"></iframe><br/>
<br/>
""".format(version=uwsgi.version.decode('ascii'))
return bytes(body.encode('ascii'))
| gpl-2.0 | -2,962,758,308,354,080,300 | -8,840,144,784,627,878,000 | 21.851064 | 100 | 0.612663 | false |
zanderle/django | django/contrib/gis/gdal/raster/source.py | 297 | 13274 | import json
import os
from ctypes import addressof, byref, c_double, c_void_p
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import raster as capi
from django.contrib.gis.gdal.raster.band import BandList
from django.contrib.gis.gdal.raster.const import GDAL_RESAMPLE_ALGORITHMS
from django.contrib.gis.gdal.srs import SpatialReference, SRSException
from django.contrib.gis.geometry.regex import json_regex
from django.utils import six
from django.utils.encoding import (
force_bytes, force_text, python_2_unicode_compatible,
)
from django.utils.functional import cached_property
class TransformPoint(list):
indices = {
'origin': (0, 3),
'scale': (1, 5),
'skew': (2, 4),
}
def __init__(self, raster, prop):
x = raster.geotransform[self.indices[prop][0]]
y = raster.geotransform[self.indices[prop][1]]
list.__init__(self, [x, y])
self._raster = raster
self._prop = prop
@property
def x(self):
return self[0]
@x.setter
def x(self, value):
gtf = self._raster.geotransform
gtf[self.indices[self._prop][0]] = value
self._raster.geotransform = gtf
@property
def y(self):
return self[1]
@y.setter
def y(self, value):
gtf = self._raster.geotransform
gtf[self.indices[self._prop][1]] = value
self._raster.geotransform = gtf
@python_2_unicode_compatible
class GDALRaster(GDALBase):
"""
Wraps a raster GDAL Data Source object.
"""
def __init__(self, ds_input, write=False):
self._write = 1 if write else 0
Driver.ensure_registered()
# Preprocess json inputs. This converts json strings to dictionaries,
# which are parsed below the same way as direct dictionary inputs.
if isinstance(ds_input, six.string_types) and json_regex.match(ds_input):
ds_input = json.loads(ds_input)
# If input is a valid file path, try setting file as source.
if isinstance(ds_input, six.string_types):
if not os.path.exists(ds_input):
raise GDALException('Unable to read raster source input "{}"'.format(ds_input))
try:
# GDALOpen will auto-detect the data source type.
self._ptr = capi.open_ds(force_bytes(ds_input), self._write)
except GDALException as err:
raise GDALException('Could not open the datasource at "{}" ({}).'.format(ds_input, err))
elif isinstance(ds_input, dict):
# A new raster needs to be created in write mode
self._write = 1
# Create driver (in memory by default)
driver = Driver(ds_input.get('driver', 'MEM'))
# For out of memory drivers, check filename argument
if driver.name != 'MEM' and 'name' not in ds_input:
raise GDALException('Specify name for creation of raster with driver "{}".'.format(driver.name))
# Check if width and height where specified
if 'width' not in ds_input or 'height' not in ds_input:
raise GDALException('Specify width and height attributes for JSON or dict input.')
# Check if srid was specified
if 'srid' not in ds_input:
raise GDALException('Specify srid for JSON or dict input.')
# Create GDAL Raster
self._ptr = capi.create_ds(
driver._ptr,
force_bytes(ds_input.get('name', '')),
ds_input['width'],
ds_input['height'],
ds_input.get('nr_of_bands', len(ds_input.get('bands', []))),
ds_input.get('datatype', 6),
None
)
# Set band data if provided
for i, band_input in enumerate(ds_input.get('bands', [])):
band = self.bands[i]
band.data(band_input['data'])
if 'nodata_value' in band_input:
band.nodata_value = band_input['nodata_value']
# Set SRID
self.srs = ds_input.get('srid')
# Set additional properties if provided
if 'origin' in ds_input:
self.origin.x, self.origin.y = ds_input['origin']
if 'scale' in ds_input:
self.scale.x, self.scale.y = ds_input['scale']
if 'skew' in ds_input:
self.skew.x, self.skew.y = ds_input['skew']
elif isinstance(ds_input, c_void_p):
# Instantiate the object using an existing pointer to a gdal raster.
self._ptr = ds_input
else:
raise GDALException('Invalid data source input type: "{}".'.format(type(ds_input)))
def __del__(self):
if self._ptr and capi:
capi.close_ds(self._ptr)
def __str__(self):
return self.name
def __repr__(self):
"""
Short-hand representation because WKB may be very large.
"""
return '<Raster object at %s>' % hex(addressof(self._ptr))
def _flush(self):
"""
Flush all data from memory into the source file if it exists.
The data that needs flushing are geotransforms, coordinate systems,
nodata_values and pixel values. This function will be called
automatically wherever it is needed.
"""
# Raise an Exception if the value is being changed in read mode.
if not self._write:
raise GDALException('Raster needs to be opened in write mode to change values.')
capi.flush_ds(self._ptr)
@property
def name(self):
"""
Returns the name of this raster. Corresponds to filename
for file-based rasters.
"""
return force_text(capi.get_ds_description(self._ptr))
@cached_property
def driver(self):
"""
Returns the GDAL Driver used for this raster.
"""
ds_driver = capi.get_ds_driver(self._ptr)
return Driver(ds_driver)
@property
def width(self):
"""
Width (X axis) in pixels.
"""
return capi.get_ds_xsize(self._ptr)
@property
def height(self):
"""
Height (Y axis) in pixels.
"""
return capi.get_ds_ysize(self._ptr)
@property
def srs(self):
"""
Returns the SpatialReference used in this GDALRaster.
"""
try:
wkt = capi.get_ds_projection_ref(self._ptr)
if not wkt:
return None
return SpatialReference(wkt, srs_type='wkt')
except SRSException:
return None
@srs.setter
def srs(self, value):
"""
Sets the spatial reference used in this GDALRaster. The input can be
a SpatialReference or any parameter accepted by the SpatialReference
constructor.
"""
if isinstance(value, SpatialReference):
srs = value
elif isinstance(value, six.integer_types + six.string_types):
srs = SpatialReference(value)
else:
raise ValueError('Could not create a SpatialReference from input.')
capi.set_ds_projection_ref(self._ptr, srs.wkt.encode())
self._flush()
@property
def geotransform(self):
"""
Returns the geotransform of the data source.
Returns the default geotransform if it does not exist or has not been
set previously. The default is [0.0, 1.0, 0.0, 0.0, 0.0, -1.0].
"""
# Create empty ctypes double array for data
gtf = (c_double * 6)()
capi.get_ds_geotransform(self._ptr, byref(gtf))
return list(gtf)
@geotransform.setter
def geotransform(self, values):
"Sets the geotransform for the data source."
if sum([isinstance(x, (int, float)) for x in values]) != 6:
raise ValueError('Geotransform must consist of 6 numeric values.')
# Create ctypes double array with input and write data
values = (c_double * 6)(*values)
capi.set_ds_geotransform(self._ptr, byref(values))
self._flush()
@property
def origin(self):
"""
Coordinates of the raster origin.
"""
return TransformPoint(self, 'origin')
@property
def scale(self):
"""
Pixel scale in units of the raster projection.
"""
return TransformPoint(self, 'scale')
@property
def skew(self):
"""
Skew of pixels (rotation parameters).
"""
return TransformPoint(self, 'skew')
@property
def extent(self):
"""
Returns the extent as a 4-tuple (xmin, ymin, xmax, ymax).
"""
# Calculate boundary values based on scale and size
xval = self.origin.x + self.scale.x * self.width
yval = self.origin.y + self.scale.y * self.height
# Calculate min and max values
xmin = min(xval, self.origin.x)
xmax = max(xval, self.origin.x)
ymin = min(yval, self.origin.y)
ymax = max(yval, self.origin.y)
return xmin, ymin, xmax, ymax
@property
def bands(self):
return BandList(self)
def warp(self, ds_input, resampling='NearestNeighbour', max_error=0.0):
"""
Returns a warped GDALRaster with the given input characteristics.
The input is expected to be a dictionary containing the parameters
of the target raster. Allowed values are width, height, SRID, origin,
scale, skew, datatype, driver, and name (filename).
By default, the warp functions keeps all parameters equal to the values
of the original source raster. For the name of the target raster, the
name of the source raster will be used and appended with
_copy. + source_driver_name.
In addition, the resampling algorithm can be specified with the "resampling"
input parameter. The default is NearestNeighbor. For a list of all options
consult the GDAL_RESAMPLE_ALGORITHMS constant.
"""
# Get the parameters defining the geotransform, srid, and size of the raster
if 'width' not in ds_input:
ds_input['width'] = self.width
if 'height' not in ds_input:
ds_input['height'] = self.height
if 'srid' not in ds_input:
ds_input['srid'] = self.srs.srid
if 'origin' not in ds_input:
ds_input['origin'] = self.origin
if 'scale' not in ds_input:
ds_input['scale'] = self.scale
if 'skew' not in ds_input:
ds_input['skew'] = self.skew
# Get the driver, name, and datatype of the target raster
if 'driver' not in ds_input:
ds_input['driver'] = self.driver.name
if 'name' not in ds_input:
ds_input['name'] = self.name + '_copy.' + self.driver.name
if 'datatype' not in ds_input:
ds_input['datatype'] = self.bands[0].datatype()
# Set the number of bands
ds_input['nr_of_bands'] = len(self.bands)
# Create target raster
target = GDALRaster(ds_input, write=True)
# Copy nodata values to warped raster
for index, band in enumerate(self.bands):
target.bands[index].nodata_value = band.nodata_value
# Select resampling algorithm
algorithm = GDAL_RESAMPLE_ALGORITHMS[resampling]
# Reproject image
capi.reproject_image(
self._ptr, self.srs.wkt.encode(),
target._ptr, target.srs.wkt.encode(),
algorithm, 0.0, max_error,
c_void_p(), c_void_p(), c_void_p()
)
# Make sure all data is written to file
target._flush()
return target
def transform(self, srid, driver=None, name=None, resampling='NearestNeighbour',
max_error=0.0):
"""
Returns a copy of this raster reprojected into the given SRID.
"""
# Convert the resampling algorithm name into an algorithm id
algorithm = GDAL_RESAMPLE_ALGORITHMS[resampling]
# Instantiate target spatial reference system
target_srs = SpatialReference(srid)
# Create warped virtual dataset in the target reference system
target = capi.auto_create_warped_vrt(
self._ptr, self.srs.wkt.encode(), target_srs.wkt.encode(),
algorithm, max_error, c_void_p()
)
target = GDALRaster(target)
# Construct the target warp dictionary from the virtual raster
data = {
'srid': srid,
'width': target.width,
'height': target.height,
'origin': [target.origin.x, target.origin.y],
'scale': [target.scale.x, target.scale.y],
'skew': [target.skew.x, target.skew.y],
}
# Set the driver and filepath if provided
if driver:
data['driver'] = driver
if name:
data['name'] = name
# Warp the raster into new srid
return self.warp(data, resampling=resampling, max_error=max_error)
| bsd-3-clause | -1,774,346,874,449,399,300 | -1,043,829,570,942,997,800 | 33.21134 | 112 | 0.586937 | false |
CitizenB/ansible | lib/ansible/plugins/connection/libvirt_lxc.py | 7 | 6851 | # Based on local.py (c) 2012, Michael DeHaan <[email protected]>
# Based on chroot.py (c) 2013, Maykel Moya <[email protected]>
# (c) 2013, Michael Scherer <[email protected]>
# (c) 2015, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import distutils.spawn
import os
import os.path
import pipes
import subprocess
import traceback
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins.connection import ConnectionBase
BUFSIZE = 65536
class Connection(ConnectionBase):
''' Local lxc based connections '''
transport = 'libvirt_lxc'
has_pipelining = True
# su currently has an undiagnosed issue with calculating the file
# checksums (so copy, for instance, doesn't work right)
# Have to look into that before re-enabling this
become_methods = frozenset(C.BECOME_METHODS).difference(('su',))
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self.lxc = self._play_context.remote_addr
self.virsh = self._search_executable('virsh')
self._check_domain(self.lxc)
def _search_executable(self, executable):
cmd = distutils.spawn.find_executable(executable)
if not cmd:
raise AnsibleError("%s command not found in PATH") % executable
return cmd
def _check_domain(self, domain):
p = subprocess.Popen([self.virsh, '-q', '-c', 'lxc:///', 'dominfo', domain],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode:
raise AnsibleError("%s is not a lxc defined in libvirt" % domain)
def _connect(self):
''' connect to the lxc; nothing to do here '''
super(Connection, self)._connect()
if not self._connected:
self._display.vvv("THIS IS A LOCAL LXC DIR", host=self.lxc)
self._connected = True
def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
''' run a command on the chroot. This is only needed for implementing
put_file() get_file() so that we don't have to read the whole file
into memory.
compared to exec_command() it looses some niceties like being able to
return the process's exit code immediately.
'''
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh'
local_cmd = [self.virsh, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', executable , '-c', cmd]
self._display.vvv("EXEC %s" % (local_cmd), host=self.lxc)
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def exec_command(self, cmd, in_data=None, sudoable=False):
''' run a command on the chroot '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
p = self._buffered_exec_command(cmd)
stdout, stderr = p.communicate(in_data)
return (p.returncode, stdout, stderr)
def _prefix_login_path(self, remote_path):
''' Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will
exist in any given chroot. So for now we're choosing "/" instead.
This also happens to be the former default.
Can revisit using $HOME instead if it's a problem
'''
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
return os.path.normpath(remote_path)
def put_file(self, in_path, out_path):
''' transfer a file from local to lxc '''
super(Connection, self).put_file(in_path, out_path)
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.lxc)
out_path = pipes.quote(self._prefix_login_path(out_path))
try:
with open(in_path, 'rb') as in_file:
try:
p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), stdin=in_file)
except OSError:
raise AnsibleError("chroot connection requires dd command in the chroot")
try:
stdout, stderr = p.communicate()
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
except IOError:
raise AnsibleError("file or module does not exist at: %s" % in_path)
def fetch_file(self, in_path, out_path):
''' fetch a file from lxc to local '''
super(Connection, self).fetch_file(in_path, out_path)
self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.lxc)
in_path = pipes.quote(self._prefix_login_path(in_path))
try:
p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
except OSError:
raise AnsibleError("chroot connection requires dd command in the chroot")
with open(out_path, 'wb+') as out_file:
try:
chunk = p.stdout.read(BUFSIZE)
while chunk:
out_file.write(chunk)
chunk = p.stdout.read(BUFSIZE)
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
def close(self):
''' terminate the connection; nothing to do here '''
super(Connection, self).close()
self._connected = False
| gpl-3.0 | -7,704,487,933,255,461,000 | -1,930,451,200,596,896,800 | 40.521212 | 121 | 0.618888 | false |
divio/django | tests/null_fk/tests.py | 352 | 2982 | from __future__ import unicode_literals
from django.db.models import Q
from django.test import TestCase
from .models import (
Comment, Forum, Item, Post, PropertyValue, SystemDetails, SystemInfo,
)
class NullFkTests(TestCase):
def test_null_fk(self):
d = SystemDetails.objects.create(details='First details')
s = SystemInfo.objects.create(system_name='First forum', system_details=d)
f = Forum.objects.create(system_info=s, forum_name='First forum')
p = Post.objects.create(forum=f, title='First Post')
c1 = Comment.objects.create(post=p, comment_text='My first comment')
c2 = Comment.objects.create(comment_text='My second comment')
# Starting from comment, make sure that a .select_related(...) with a specified
# set of fields will properly LEFT JOIN multiple levels of NULLs (and the things
# that come after the NULLs, or else data that should exist won't). Regression
# test for #7369.
c = Comment.objects.select_related().get(id=c1.id)
self.assertEqual(c.post, p)
self.assertEqual(Comment.objects.select_related().get(id=c2.id).post, None)
self.assertQuerysetEqual(
Comment.objects.select_related('post__forum__system_info').all(),
[
(c1.id, 'My first comment', '<Post: First Post>'),
(c2.id, 'My second comment', 'None')
],
transform=lambda c: (c.id, c.comment_text, repr(c.post))
)
# Regression test for #7530, #7716.
self.assertIsNone(Comment.objects.select_related('post').filter(post__isnull=True)[0].post)
self.assertQuerysetEqual(
Comment.objects.select_related('post__forum__system_info__system_details'),
[
(c1.id, 'My first comment', '<Post: First Post>'),
(c2.id, 'My second comment', 'None')
],
transform=lambda c: (c.id, c.comment_text, repr(c.post))
)
def test_combine_isnull(self):
item = Item.objects.create(title='Some Item')
pv = PropertyValue.objects.create(label='Some Value')
item.props.create(key='a', value=pv)
item.props.create(key='b') # value=NULL
q1 = Q(props__key='a', props__value=pv)
q2 = Q(props__key='b', props__value__isnull=True)
# Each of these individually should return the item.
self.assertEqual(Item.objects.get(q1), item)
self.assertEqual(Item.objects.get(q2), item)
# Logically, qs1 and qs2, and qs3 and qs4 should be the same.
qs1 = Item.objects.filter(q1) & Item.objects.filter(q2)
qs2 = Item.objects.filter(q2) & Item.objects.filter(q1)
qs3 = Item.objects.filter(q1) | Item.objects.filter(q2)
qs4 = Item.objects.filter(q2) | Item.objects.filter(q1)
# Regression test for #15823.
self.assertEqual(list(qs1), list(qs2))
self.assertEqual(list(qs3), list(qs4))
| bsd-3-clause | -5,037,095,936,194,728,000 | -7,628,307,502,459,137,000 | 41.6 | 99 | 0.620054 | false |
ehashman/oh-mainline | vendor/packages/python-social-auth/social/apps/pyramid_app/views.py | 75 | 1091 | from pyramid.view import view_config
from social.utils import module_member
from social.actions import do_auth, do_complete, do_disconnect
from social.apps.pyramid_app.utils import psa, login_required
@view_config(route_name='social.auth', request_method='GET')
@psa('social.complete')
def auth(request):
return do_auth(request.backend, redirect_name='next')
@view_config(route_name='social.complete', request_method=('GET', 'POST'))
@psa('social.complete')
def complete(request, *args, **kwargs):
do_login = module_member(request.backend.setting('LOGIN_FUNCTION'))
return do_complete(request.backend, do_login, request.user,
redirect_name='next', *args, **kwargs)
@view_config(route_name='social.disconnect', request_method=('POST',))
@view_config(route_name='social.disconnect_association',
request_method=('POST',))
@psa()
@login_required
def disconnect(request):
return do_disconnect(request.backend, request.user,
request.matchdict.get('association_id'),
redirect_name='next')
| agpl-3.0 | -6,734,458,409,627,225,000 | 3,555,488,081,841,292,300 | 35.366667 | 74 | 0.688359 | false |
mattcaldwell/pip | tests/lib/venv.py | 5 | 2574 | from __future__ import absolute_import
import os
import sys
import subprocess
import virtualenv as _virtualenv
from .path import Path
# On Python < 3.3 we don't have subprocess.DEVNULL
try:
DEVNULL = subprocess.DEVNULL
except AttributeError:
DEVNULL = open(os.devnull, "wb")
class VirtualEnvironment(object):
"""
An abstraction around virtual environments, currently it only uses
virtualenv but in the future it could use pyvenv.
"""
def __init__(self, location, *args, **kwargs):
self.location = Path(location)
self.pip_source_dir = kwargs.pop("pip_source_dir")
self._system_site_packages = kwargs.pop("system_site_packages", False)
home, lib, inc, bin = _virtualenv.path_locations(self.location)
# workaround for https://github.com/pypa/virtualenv/issues/306
if hasattr(sys, "pypy_version_info"):
lib = os.path.join(home, 'lib-python', sys.version[:3])
self.lib = Path(lib)
self.bin = Path(bin)
super(VirtualEnvironment, self).__init__(*args, **kwargs)
def __repr__(self):
return "<VirtualEnvironment {}>".format(self.location)
@classmethod
def create(cls, location, clear=False, pip_source_dir=None):
obj = cls(location, pip_source_dir=pip_source_dir)
obj._create(clear=clear)
return obj
def _create(self, clear=False):
# Create the actual virtual environment
_virtualenv.create_environment(
self.location,
clear=clear,
never_download=True,
no_pip=True,
)
# Install our development version of pip install the virtual
# environment
cmd = [self.bin.join("python"), "setup.py", "develop"]
p = subprocess.Popen(
cmd,
cwd=self.pip_source_dir,
stderr=subprocess.STDOUT,
stdout=DEVNULL,
)
p.communicate()
if p.returncode != 0:
raise Exception(p.stderr)
raise subprocess.CalledProcessError(
p.returncode,
cmd[0],
output=p.stdout,
)
def clear(self):
self._create(clear=True)
@property
def system_site_packages(self):
return self._system_site_packages
@system_site_packages.setter
def system_site_packages(self, value):
marker = self.lib.join("no-global-site-packages.txt")
if value:
marker.rm()
else:
marker.touch()
self._system_site_packages = value
| mit | 6,653,464,957,221,958,000 | 7,511,471,281,214,885,000 | 27.921348 | 78 | 0.599068 | false |
jiegec/gnuradio | gr-atsc/python/atsc/qa_atsc.py | 55 | 7019 | #!/usr/bin/env python
#
# Copyright 2004,2006,2007,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import sys
from gnuradio import gr, gr_unittest, atsc, blocks
from gnuradio.atsc import atsc_utils
class memoize(object):
def __init__(self, thunk):
self.thunk = thunk
self.cached = False
self.value = None
def __call__(self):
if self.cached:
return self.value
self.value = self.thunk()
self.cached = True
return self.value
"""
Make a fake transport stream that's big enough for our purposes.
We generate 8 full fields. This is relatively expensive. It
takes about 2 seconds to execute.
"""
make_transport_stream = \
memoize(lambda : tuple(atsc_utils.make_fake_transport_stream_packet(8 * atsc.ATSC_DSEGS_PER_FIELD)))
def pad_transport_stream(src):
"""
An MPEG transport stream packet is 188 bytes long. Internally we use a packet
that is 256 bytes long to help with buffer alignment. This function adds the
appropriate trailing padding to convert each packet from 188 to 256 bytes.
"""
return atsc_utils.pad_stream(src, atsc.sizeof_atsc_mpeg_packet, atsc.sizeof_atsc_mpeg_packet_pad)
def depad_transport_stream(src):
"""
An MPEG transport stream packet is 188 bytes long. Internally we use a packet
that is 256 bytes long to help with buffer alignment. This function removes the
trailing padding to convert each packet from 256 back to 188 bytes.
"""
return atsc_utils.depad_stream(src, atsc.sizeof_atsc_mpeg_packet, atsc.sizeof_atsc_mpeg_packet_pad)
class vector_source_ts(gr.hier_block2):
"""
MPEG Transport stream source for testing.
"""
def __init__(self, ts):
"""
Pad tranport stream packets to 256 bytes and reformat appropriately.
Args:
ts: MPEG transport stream. (sequence of ints in [0,255]; len(ts) % 188 == 0)
"""
src = blocks.vector_source_b(pad_transport_stream(ts))
s2v = blocks.stream_to_vector(gr.sizeof_char, atsc.sizeof_atsc_mpeg_packet)
gr.hier_block2.__init__(self, "vector_source_ts",
gr.io_signature(0, 0, 0),
s2v.output_signature())
self.connect(src, s2v, self)
class vector_sink_ts(gr.hier_block2):
"""
MPEG Transport stream sink for testing.
"""
def __init__(self):
"""
"""
v2s = blocks.vector_to_stream(gr.sizeof_char, atsc.sizeof_atsc_mpeg_packet)
self.sink = blocks.vector_sink_b()
gr.hier_block2.__init__(self, "vector_sink_ts",
v2s.input_signature(),
gr.io_signature(0, 0, 0))
self.connect(self, v2s, self.sink)
def data(self):
"""
Extracts tranport stream from sink and returns it to python.
Depads tranport stream packets from 256 back to 188 bytes.
@rtype: tuple of ints in [0,255]; len(result) % 188 == 0
"""
return tuple(depad_transport_stream(self.sink.data()))
class qa_atsc(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
# The tests are run in alphabetical order
def test_loopback_000(self):
"""
Loopback randomizer to derandomizer
"""
src_data = make_transport_stream()
expected_result = src_data
src = vector_source_ts(src_data)
rand = atsc.randomizer()
derand = atsc.derandomizer()
dst = vector_sink_ts()
self.tb.connect(src, rand)
self.tb.connect(rand, derand)
self.tb.connect(derand, dst)
self.tb.run()
result_data = dst.data ()
self.assertEqual (expected_result, result_data)
def est_loopback_001(self):
"""
Loopback randomizer/rs_encoder to rs_decoder/derandomizer
"""
src_data = make_transport_stream()
expected_result = src_data
src = vector_source_ts(src_data)
rand = atsc.randomizer()
rs_enc = atsc.rs_encoder()
rs_dec = atsc.rs_decoder()
derand = atsc.derandomizer()
dst = vector_sink_ts()
self.tb.connect(src, rand, rs_enc, rs_dec, derand, dst)
self.tb.run ()
result_data = dst.data ()
self.assertEqual (expected_result, result_data)
def est_loopback_002(self):
"""
Loopback randomizer/rs_encoder/interleaver to
deinterleaver/rs_decoder/derandomizer
"""
src_data = make_transport_stream()
interleaver_delay = 52
expected_result = src_data[0:len(src_data)-(interleaver_delay*atsc.ATSC_MPEG_PKT_LENGTH)]
src = vector_source_ts(src_data)
rand = atsc.randomizer()
rs_enc = atsc.rs_encoder()
inter = atsc.interleaver()
deinter = atsc.deinterleaver()
rs_dec = atsc.rs_decoder()
derand = atsc.derandomizer()
dst = vector_sink_ts()
self.tb.connect(src, rand, rs_enc, inter, deinter, rs_dec, derand, dst)
self.tb.run ()
result_data = dst.data ()
result_data = result_data[(interleaver_delay*atsc.ATSC_MPEG_PKT_LENGTH):len(result_data)]
self.assertEqual (expected_result, result_data)
def est_loopback_003(self):
"""
Loopback randomizer/rs_encoder/interleaver/trellis_encoder
via ds_to_softds to
viterbi_decoder/deinterleaver/rs_decoder/derandomizer
"""
src_data = make_transport_stream()
interleaver_delay = 52
viterbi_delay = 12
expected_result = src_data[0:len(src_data)-((interleaver_delay+viterbi_delay)*atsc.ATSC_MPEG_PKT_LENGTH)]
src = vector_source_ts(src_data)
rand = atsc.randomizer()
rs_enc = atsc.rs_encoder()
inter = atsc.interleaver()
trellis = atsc.trellis_encoder()
softds = atsc.ds_to_softds()
viterbi = atsc.viterbi_decoder()
deinter = atsc.deinterleaver()
rs_dec = atsc.rs_decoder()
derand = atsc.derandomizer()
dst = vector_sink_ts()
self.tb.connect(src, rand, rs_enc, inter, trellis, softds, viterbi, deinter, rs_dec, derand, dst)
self.tb.run ()
result_data = dst.data ()[((interleaver_delay+viterbi_delay)*atsc.ATSC_MPEG_PKT_LENGTH):len(dst.data())]
self.assertEqual (expected_result, result_data)
if __name__ == '__main__':
gr_unittest.main()
| gpl-3.0 | 3,616,353,379,019,858,000 | -5,508,483,541,685,273,000 | 30.195556 | 113 | 0.643254 | false |
pychess/pychess | setup.py | 1 | 12398 | # -*- coding: UTF-8 -*-
from glob import glob
from os import listdir
from os.path import isdir, isfile
import os
import site
import sys
import subprocess
this_dir = os.path.dirname(os.path.abspath(__file__))
sys.path = [os.path.join(this_dir, "lib")] + sys.path
msi = False
if sys.argv[-1] == "bdist_msi":
try:
from cx_Freeze import setup, Executable
from cx_Freeze.windist import bdist_msi
msi = True
except ImportError:
print("ERROR: can't import cx_Freeze!")
sys.exit(1)
else:
from distutils.core import setup
if sys.version_info < (3, 5, 0):
print('ERROR: PyChess requires Python >= 3.5.0')
sys.exit(1)
if sys.platform == "win32":
try:
from gi.repository import Gtk
print("Gtk verion is %s.%s.%s", (Gtk.MAJOR_VERSION, Gtk.MINOR_VERSION, Gtk.MICRO_VERSION))
except ImportError:
print('ERROR: PyChess in Windows Platform requires to install PyGObject.')
print('Installing from http://sourceforge.net/projects/pygobjectwin32')
sys.exit(1)
from imp import load_module, find_module
pychess = load_module("pychess", *find_module("pychess", ["lib"]))
VERSION = pychess.VERSION
NAME = "pychess"
# We have to subclass register command because
# PyChess from another author already exist on pypi.
from distutils.command.register import register
class RegisterCommand(register):
def run(self):
self.distribution.metadata.name = "PyChess-%s" % pychess.VERSION_NAME
register.run(self)
DESC = "Chess client"
LONG_DESC = """PyChess is a chess client for playing and analyzing chess games. It is
intended to be usable both for those totally new to chess as well as
advanced users who want to use a computer to further enhance their play.
PyChess has a builtin python chess engine and auto-detects most
popular chess engines (Stockfish, Rybka, Houdini, Shredder, GNU Chess,
Crafty, Fruit, and many more). These engines are available as opponents,
and are used to provide hints and analysis. PyChess also shows analysis
from opening books and Gaviota end-game tablebases.
When you get sick of playing computer players you can login to FICS (the
Free Internet Chess Server) and play against people all over the world.
PyChess has a built-in Timeseal client, so you won't lose clock time during
a game due to lag. PyChess also has pre-move support, which means you can
make (or start making) a move before your opponent has made their move.
PyChess has many other features including:
- CECP and UCI chess engine support with customizable engine configurations
- Polyglot opening book support
- Hint and Spy move arrows
- Hint, Score, and Annotation panels
- Play and analyze games in separate game tabs
- 18 chess variants including Chess960, Suicide, Crazyhouse, Shuffle, Losers, Piece Odds, and Atomic
- Reads and writes PGN, EPD and FEN chess file formats
- Undo and pause chess games
- Move animation in games
- Drag and drop chess files
- Optional game move and event sounds
- Chess piece themes with 40 built-in piece themes
- Legal move highlighting
- Direct copy+paste pgn game input via Enter Game Notation open-game dialog
- Internationalised text and Figurine Algebraic Notation (FAN) support
- Translated into 38 languages (languages with +5% strings translated)
- Easy to use and intuitive look and feel"""
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Environment :: X11 Applications :: GTK',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: POSIX',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Games/Entertainment :: Board Games']
os.chdir(os.path.abspath(os.path.dirname(__file__)))
# save
stderr = sys.stderr
stdout = sys.stdout
if not isfile("eco.db"):
with open("pgn2ecodb.py") as fh:
exec(fh.read())
if not isfile(os.path.abspath("pieces/Spatial.png")):
with open("create_theme_preview.py") as fh:
exec(fh.read())
# restore
sys.stderr = stderr
sys.stdout = stdout
DATA_FILES = [("share/pychess",
["README.md", "AUTHORS", "ARTISTS", "DOCUMENTERS",
"LICENSE", "TRANSLATORS", "pychess_book.bin", "eco.db"])]
# UI
DATA_FILES += [("share/pychess/glade", glob('glade/*.glade'))]
DATA_FILES += [("share/pychess/glade", ['glade/background.jpg'])]
DATA_FILES += [("share/pychess/glade", glob('glade/*.png'))]
DATA_FILES += [("share/pychess/glade/16x16", glob('glade/16x16/*.png'))]
DATA_FILES += [("share/pychess/glade/48x48", glob('glade/48x48/*.png'))]
DATA_FILES += [("share/pychess/glade", glob('glade/*.svg'))]
DATA_FILES += [("share/pychess/flags", glob('flags/*.png'))]
DATA_FILES += [("share/pychess/boards", glob('boards/*.png'))]
# Data
DATA_FILES += [('share/mime/packages', ['pychess.xml'])]
DATA_FILES += [('share/metainfo', ['pychess.metainfo.xml'])]
DATA_FILES += [('share/applications', ['pychess.desktop'])]
DATA_FILES += [('share/icons/hicolor/scalable/apps', ['pychess.svg'])]
DATA_FILES += [('share/menu', ['menu/pychess'])]
DATA_FILES += [('share/pixmaps', ['pychess.svg', 'pychess.xmp'])]
if sys.platform == "win32":
DATA_FILES += [("share/pychess/sounds", glob('sounds/*.wav'))]
DATA_FILES += [("share/pychess/engines", glob('engines/*.*'))]
else:
DATA_FILES += [("share/pychess/sounds", glob('sounds/*.ogg'))]
DATA_FILES += [('share/icons/hicolor/24x24/apps', ['pychess.png'])]
DATA_FILES += [('share/gtksourceview-3.0/language-specs', ['gtksourceview-3.0/language-specs/pgn.lang'])]
# Piece sets
DATA_FILES += [("share/pychess/pieces", glob('pieces/*.png'))]
DATA_FILES += [("share/pychess/pieces/ttf", glob('pieces/ttf/*.ttf'))]
if not isfile(os.path.abspath("learn/puzzles/mate_in_4.sqlite")):
from pychess.Savers.pgn import PGNFile
from pychess.System.protoopen import protoopen
# Lectures, puzzles, lessons
for filename in glob('learn/puzzles/*.pgn'):
chessfile = PGNFile(protoopen(filename))
chessfile.init_tag_database()
for filename in glob('learn/lessons/*.pgn'):
chessfile = PGNFile(protoopen(filename))
chessfile.init_tag_database()
DATA_FILES += [("share/pychess/learn/puzzles", glob('learn/puzzles/*.olv'))]
DATA_FILES += [("share/pychess/learn/puzzles", glob('learn/puzzles/*.pgn'))]
DATA_FILES += [("share/pychess/learn/puzzles", glob('learn/puzzles/*.sqlite'))]
DATA_FILES += [("share/pychess/learn/lessons", glob('learn/lessons/*.pgn'))]
DATA_FILES += [("share/pychess/learn/lessons", glob('learn/lessons/*.sqlite'))]
DATA_FILES += [("share/pychess/learn/lectures", glob('learn/lectures/*.txt'))]
for dir in [d for d in listdir('pieces') if isdir(os.path.join('pieces', d)) and d != 'ttf']:
DATA_FILES += [("share/pychess/pieces/" + dir, glob('pieces/' + dir + '/*.svg'))]
# Manpages
DATA_FILES += [('share/man/man1', ['manpages/pychess.1.gz'])]
# Language
pofile = "LC_MESSAGES/pychess"
if sys.platform == "win32":
argv0_path = os.path.dirname(os.path.abspath(sys.executable))
if pychess.MSYS2:
major, minor, micro, releaselevel, serial = sys.version_info
msgfmt_path = argv0_path + "/../lib/python%s.%s/tools/i18n/" % (major, minor)
else:
msgfmt_path = argv0_path + "/tools/i18n/"
msgfmt = "%s %smsgfmt.py" % (os.path.abspath(sys.executable), msgfmt_path)
else:
msgfmt = "msgfmt"
pychess_langs = []
for dir in [d for d in listdir("lang") if isdir("lang/" + d) and d != "en"]:
if sys.platform == "win32":
command = "%s lang/%s/%s.po" % (msgfmt, dir, pofile)
else:
command = "%s lang/%s/%s.po -o lang/%s/%s.mo" % (msgfmt, dir, pofile, dir, pofile)
subprocess.call(command.split())
DATA_FILES += [("share/locale/" + dir + "/LC_MESSAGES", ["lang/" + dir + "/" + pofile + ".mo"])]
pychess_langs.append(dir)
PACKAGES = []
if msi:
if pychess.MSYS2:
gtk_data_path = sys.prefix
gtk_exec_path = os.path.join(sys.prefix, "bin")
lang_path = os.path.join(sys.prefix, "share", "locale")
else:
# Get the site-package folder, not everybody will install
# Python into C:\PythonXX
site_dir = site.getsitepackages()[1]
gtk_data_path = os.path.join(site_dir, "gnome")
gtk_exec_path = os.path.join(site_dir, "gnome")
lang_path = os.path.join(site_dir, "gnome", "share", "locale")
# gtk3.0 .mo files
gtk_mo = [f + "/LC_MESSAGES/gtk30.mo" for f in os.listdir(lang_path) if f in pychess_langs]
# Collect the list of missing dll when cx_freeze builds the app
gtk_exec = ['libgtksourceview-3.0-1.dll',
'libjpeg-8.dll',
'librsvg-2-2.dll',
]
# We need to add all the libraries too (for themes, etc..)
gtk_data = ['etc',
'lib/gdk-pixbuf-2.0',
'lib/girepository-1.0',
'share/icons/adwaita/icon-theme.cache',
'share/icons/adwaita/index.theme',
'share/icons/adwaita/16x16',
'share/icons/adwaita/24x24',
'share/icons/adwaita/48x48',
'share/glib-2.0']
# Create the list of includes as cx_freeze likes
include_files = []
for mo in gtk_mo:
mofile = os.path.join(lang_path, mo)
if os.path.isfile(mofile):
include_files.append((mofile, "share/locale/" + mo))
for dll in gtk_exec:
include_files.append((os.path.join(gtk_exec_path, dll), dll))
# Let's add gtk data
for lib in gtk_data:
include_files.append((os.path.join(gtk_data_path, lib), lib))
base = None
# Lets not open the console while running the app
if sys.platform == "win32":
base = "Win32GUI"
executables = [Executable("pychess",
base=base,
icon="pychess.ico",
shortcutName="PyChess",
shortcutDir="DesktopFolder"),
Executable(script="lib/__main__.py",
targetName="pychess-engine.exe",
base=base)]
bdist_msi_options = {
"upgrade_code": "{5167584f-c196-428f-be40-4c861025e90a}",
"add_to_path": False}
perspectives = ["pychess.perspectives"]
for persp in ("welcome", "games", "fics", "database", "learn"):
perspectives.append("pychess.perspectives.%s" % persp)
build_exe_options = {
"path": sys.path + ["lib"],
"includes": ["gi"],
"packages": ["asyncio", "gi", "sqlalchemy.dialects.sqlite", "sqlalchemy.sql.default_comparator", "pexpect", "pychess"] + perspectives,
"include_files": include_files}
if pychess.MSYS2:
build_exe_options["excludes"] = ["tkinter"]
else:
build_exe_options["include_msvcr"] = True
else:
PACKAGES = ["pychess", "pychess.gfx", "pychess.ic", "pychess.ic.managers",
"pychess.Players", "pychess.Savers", "pychess.System",
"pychess.Utils", "pychess.Utils.lutils", "pychess.Variants",
"pychess.Database", "pychess.widgets", "pychess.widgets.pydock",
"pychess.perspectives", "pychess.perspectives.welcome",
"pychess.perspectives.games", "pychess.perspectives.fics",
"pychess.perspectives.database", "pychess.perspectives.learn",
"pychess.external"]
build_exe_options = {}
bdist_msi_options = {}
executables = {}
setup(
cmdclass={"register": RegisterCommand},
name=NAME,
version=VERSION,
author='Pychess team',
author_email='[email protected]',
maintainer='Thomas Dybdahl Ahle',
classifiers=CLASSIFIERS,
keywords='python gtk chess xboard gnuchess game pgn epd board linux',
description=DESC,
long_description=LONG_DESC,
license='GPL3',
url='http://pychess.org',
download_url='http://pychess.org/download/',
package_dir={'': 'lib'},
packages=PACKAGES,
data_files=DATA_FILES,
scripts=['pychess'],
options={"build_exe": build_exe_options,
"bdist_msi": bdist_msi_options},
executables=executables
)
| gpl-3.0 | -643,296,627,309,764,500 | -764,314,577,996,437,400 | 37.030675 | 142 | 0.640103 | false |
fossoult/odoo | addons/account_followup/account_followup.py | 49 | 28947 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import api
from openerp.osv import fields, osv
from lxml import etree
from openerp.tools.translate import _
class followup(osv.osv):
_name = 'account_followup.followup'
_description = 'Account Follow-up'
_rec_name = 'name'
_columns = {
'followup_line': fields.one2many('account_followup.followup.line', 'followup_id', 'Follow-up', copy=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
'name': fields.related('company_id', 'name', string = "Name", readonly=True, type="char"),
}
_defaults = {
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'account_followup.followup', context=c),
}
_sql_constraints = [('company_uniq', 'unique(company_id)', 'Only one follow-up per company is allowed')]
class followup_line(osv.osv):
def _get_default_template(self, cr, uid, ids, context=None):
try:
return self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account_followup', 'email_template_account_followup_default')[1]
except ValueError:
return False
_name = 'account_followup.followup.line'
_description = 'Follow-up Criteria'
_columns = {
'name': fields.char('Follow-Up Action', required=True),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of follow-up lines."),
'delay': fields.integer('Due Days', help="The number of days after the due date of the invoice to wait before sending the reminder. Could be negative if you want to send a polite alert beforehand.", required=True),
'followup_id': fields.many2one('account_followup.followup', 'Follow Ups', required=True, ondelete="cascade"),
'description': fields.text('Printed Message', translate=True),
'send_email':fields.boolean('Send an Email', help="When processing, it will send an email"),
'send_letter':fields.boolean('Send a Letter', help="When processing, it will print a letter"),
'manual_action':fields.boolean('Manual Action', help="When processing, it will set the manual action to be taken for that customer. "),
'manual_action_note':fields.text('Action To Do', placeholder="e.g. Give a phone call, check with others , ..."),
'manual_action_responsible_id':fields.many2one('res.users', 'Assign a Responsible', ondelete='set null'),
'email_template_id':fields.many2one('email.template', 'Email Template', ondelete='set null'),
}
_order = 'delay'
_sql_constraints = [('days_uniq', 'unique(followup_id, delay)', 'Days of the follow-up levels must be different')]
_defaults = {
'send_email': True,
'send_letter': True,
'manual_action':False,
'description': """
Dear %(partner_name)s,
Exception made if there was a mistake of ours, it seems that the following amount stays unpaid. Please, take appropriate measures in order to carry out this payment in the next 8 days.
Would your payment have been carried out after this mail was sent, please ignore this message. Do not hesitate to contact our accounting department.
Best Regards,
""",
'email_template_id': _get_default_template,
}
def _check_description(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
if line.description:
try:
line.description % {'partner_name': '', 'date':'', 'user_signature': '', 'company_name': ''}
except:
return False
return True
_constraints = [
(_check_description, 'Your description is invalid, use the right legend or %% if you want to use the percent character.', ['description']),
]
class account_move_line(osv.osv):
def _get_result(self, cr, uid, ids, name, arg, context=None):
res = {}
for aml in self.browse(cr, uid, ids, context=context):
res[aml.id] = aml.debit - aml.credit
return res
_inherit = 'account.move.line'
_columns = {
'followup_line_id': fields.many2one('account_followup.followup.line', 'Follow-up Level',
ondelete='restrict'), #restrict deletion of the followup line
'followup_date': fields.date('Latest Follow-up', select=True),
'result':fields.function(_get_result, type='float', method=True,
string="Balance") #'balance' field is not the same
}
class res_partner(osv.osv):
def fields_view_get(self, cr, uid, view_id=None, view_type=None, context=None, toolbar=False, submenu=False):
res = super(res_partner, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context,
toolbar=toolbar, submenu=submenu)
context = context or {}
if view_type == 'form' and context.get('Followupfirst'):
doc = etree.XML(res['arch'], parser=None, base_url=None)
first_node = doc.xpath("//page[@name='followup_tab']")
root = first_node[0].getparent()
root.insert(0, first_node[0])
res['arch'] = etree.tostring(doc, encoding="utf-8")
return res
def _get_latest(self, cr, uid, ids, names, arg, context=None, company_id=None):
res={}
if company_id == None:
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
else:
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
for partner in self.browse(cr, uid, ids, context=context):
amls = partner.unreconciled_aml_ids
latest_date = False
latest_level = False
latest_days = False
latest_level_without_lit = False
latest_days_without_lit = False
for aml in amls:
if (aml.company_id == company) and (aml.followup_line_id != False) and (not latest_days or latest_days < aml.followup_line_id.delay):
latest_days = aml.followup_line_id.delay
latest_level = aml.followup_line_id.id
if (aml.company_id == company) and (not latest_date or latest_date < aml.followup_date):
latest_date = aml.followup_date
if (aml.company_id == company) and (aml.blocked == False) and (aml.followup_line_id != False and
(not latest_days_without_lit or latest_days_without_lit < aml.followup_line_id.delay)):
latest_days_without_lit = aml.followup_line_id.delay
latest_level_without_lit = aml.followup_line_id.id
res[partner.id] = {'latest_followup_date': latest_date,
'latest_followup_level_id': latest_level,
'latest_followup_level_id_without_lit': latest_level_without_lit}
return res
@api.cr_uid_ids_context
def do_partner_manual_action(self, cr, uid, partner_ids, context=None):
#partner_ids -> res.partner
for partner in self.browse(cr, uid, partner_ids, context=context):
#Check action: check if the action was not empty, if not add
action_text= ""
if partner.payment_next_action:
action_text = (partner.payment_next_action or '') + "\n" + (partner.latest_followup_level_id_without_lit.manual_action_note or '')
else:
action_text = partner.latest_followup_level_id_without_lit.manual_action_note or ''
#Check date: only change when it did not exist already
action_date = partner.payment_next_action_date or fields.date.context_today(self, cr, uid, context=context)
# Check responsible: if partner has not got a responsible already, take from follow-up
responsible_id = False
if partner.payment_responsible_id:
responsible_id = partner.payment_responsible_id.id
else:
p = partner.latest_followup_level_id_without_lit.manual_action_responsible_id
responsible_id = p and p.id or False
self.write(cr, uid, [partner.id], {'payment_next_action_date': action_date,
'payment_next_action': action_text,
'payment_responsible_id': responsible_id})
def do_partner_print(self, cr, uid, wizard_partner_ids, data, context=None):
#wizard_partner_ids are ids from special view, not from res.partner
if not wizard_partner_ids:
return {}
data['partner_ids'] = wizard_partner_ids
datas = {
'ids': wizard_partner_ids,
'model': 'account_followup.followup',
'form': data
}
return self.pool['report'].get_action(cr, uid, [], 'account_followup.report_followup', data=datas, context=context)
@api.cr_uid_ids_context
def do_partner_mail(self, cr, uid, partner_ids, context=None):
if context is None:
context = {}
ctx = context.copy()
ctx['followup'] = True
#partner_ids are res.partner ids
# If not defined by latest follow-up level, it will be the default template if it can find it
mtp = self.pool.get('email.template')
unknown_mails = 0
for partner in self.browse(cr, uid, partner_ids, context=ctx):
if partner.email and partner.email.strip():
level = partner.latest_followup_level_id_without_lit
if level and level.send_email and level.email_template_id and level.email_template_id.id:
mtp.send_mail(cr, uid, level.email_template_id.id, partner.id, context=ctx)
else:
mail_template_id = self.pool.get('ir.model.data').get_object_reference(cr, uid,
'account_followup', 'email_template_account_followup_default')
mtp.send_mail(cr, uid, mail_template_id[1], partner.id, context=ctx)
else:
unknown_mails = unknown_mails + 1
action_text = _("Email not sent because of email address of partner not filled in")
if partner.payment_next_action_date:
payment_action_date = min(fields.date.context_today(self, cr, uid, context=ctx), partner.payment_next_action_date)
else:
payment_action_date = fields.date.context_today(self, cr, uid, context=ctx)
if partner.payment_next_action:
payment_next_action = partner.payment_next_action + " \n " + action_text
else:
payment_next_action = action_text
self.write(cr, uid, [partner.id], {'payment_next_action_date': payment_action_date,
'payment_next_action': payment_next_action}, context=ctx)
return unknown_mails
def get_followup_table_html(self, cr, uid, ids, context=None):
""" Build the html tables to be included in emails send to partners,
when reminding them their overdue invoices.
:param ids: [id] of the partner for whom we are building the tables
:rtype: string
"""
from report import account_followup_print
assert len(ids) == 1
if context is None:
context = {}
partner = self.browse(cr, uid, ids[0], context=context)
#copy the context to not change global context. Overwrite it because _() looks for the lang in local variable 'context'.
#Set the language to use = the partner language
context = dict(context, lang=partner.lang)
followup_table = ''
if partner.unreconciled_aml_ids:
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
current_date = fields.date.context_today(self, cr, uid, context=context)
rml_parse = account_followup_print.report_rappel(cr, uid, "followup_rml_parser")
final_res = rml_parse._lines_get_with_partner(partner, company.id)
for currency_dict in final_res:
currency = currency_dict.get('line', [{'currency_id': company.currency_id}])[0]['currency_id']
followup_table += '''
<table border="2" width=100%%>
<tr>
<td>''' + _("Invoice Date") + '''</td>
<td>''' + _("Description") + '''</td>
<td>''' + _("Reference") + '''</td>
<td>''' + _("Due Date") + '''</td>
<td>''' + _("Amount") + " (%s)" % (currency.symbol) + '''</td>
<td>''' + _("Lit.") + '''</td>
</tr>
'''
total = 0
for aml in currency_dict['line']:
block = aml['blocked'] and 'X' or ' '
total += aml['balance']
strbegin = "<TD>"
strend = "</TD>"
date = aml['date_maturity'] or aml['date']
if date <= current_date and aml['balance'] > 0:
strbegin = "<TD><B>"
strend = "</B></TD>"
followup_table +="<TR>" + strbegin + str(aml['date']) + strend + strbegin + aml['name'] + strend + strbegin + (aml['ref'] or '') + strend + strbegin + str(date) + strend + strbegin + str(aml['balance']) + strend + strbegin + block + strend + "</TR>"
total = reduce(lambda x, y: x+y['balance'], currency_dict['line'], 0.00)
total = rml_parse.formatLang(total, dp='Account', currency_obj=currency)
followup_table += '''<tr> </tr>
</table>
<center>''' + _("Amount due") + ''' : %s </center>''' % (total)
return followup_table
def write(self, cr, uid, ids, vals, context=None):
if vals.get("payment_responsible_id", False):
for part in self.browse(cr, uid, ids, context=context):
if part.payment_responsible_id <> vals["payment_responsible_id"]:
#Find partner_id of user put as responsible
responsible_partner_id = self.pool.get("res.users").browse(cr, uid, vals['payment_responsible_id'], context=context).partner_id.id
self.pool.get("mail.thread").message_post(cr, uid, 0,
body = _("You became responsible to do the next action for the payment follow-up of") + " <b><a href='#id=" + str(part.id) + "&view_type=form&model=res.partner'> " + part.name + " </a></b>",
type = 'comment',
subtype = "mail.mt_comment", context = context,
model = 'res.partner', res_id = part.id,
partner_ids = [responsible_partner_id])
return super(res_partner, self).write(cr, uid, ids, vals, context=context)
def action_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'payment_next_action_date': False, 'payment_next_action':'', 'payment_responsible_id': False}, context=context)
def do_button_print(self, cr, uid, ids, context=None):
assert(len(ids) == 1)
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
#search if the partner has accounting entries to print. If not, it may not be present in the
#psql view the report is based on, so we need to stop the user here.
if not self.pool.get('account.move.line').search(cr, uid, [
('partner_id', '=', ids[0]),
('account_id.type', '=', 'receivable'),
('reconcile_id', '=', False),
('state', '!=', 'draft'),
('company_id', '=', company_id),
'|', ('date_maturity', '=', False), ('date_maturity', '<=', fields.date.context_today(self, cr, uid)),
], context=context):
raise osv.except_osv(_('Error!'),_("The partner does not have any accounting entries to print in the overdue report for the current company."))
self.message_post(cr, uid, [ids[0]], body=_('Printed overdue payments report'), context=context)
#build the id of this partner in the psql view. Could be replaced by a search with [('company_id', '=', company_id),('partner_id', '=', ids[0])]
wizard_partner_ids = [ids[0] * 10000 + company_id]
followup_ids = self.pool.get('account_followup.followup').search(cr, uid, [('company_id', '=', company_id)], context=context)
if not followup_ids:
raise osv.except_osv(_('Error!'),_("There is no followup plan defined for the current company."))
data = {
'date': fields.date.today(),
'followup_id': followup_ids[0],
}
#call the print overdue report on this partner
return self.do_partner_print(cr, uid, wizard_partner_ids, data, context=context)
def _get_amounts_and_date(self, cr, uid, ids, name, arg, context=None):
'''
Function that computes values for the followup functional fields. Note that 'payment_amount_due'
is similar to 'credit' field on res.partner except it filters on user's company.
'''
res = {}
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
current_date = fields.date.context_today(self, cr, uid, context=context)
for partner in self.browse(cr, uid, ids, context=context):
worst_due_date = False
amount_due = amount_overdue = 0.0
for aml in partner.unreconciled_aml_ids:
if (aml.company_id == company):
date_maturity = aml.date_maturity or aml.date
if not worst_due_date or date_maturity < worst_due_date:
worst_due_date = date_maturity
amount_due += aml.result
if (date_maturity <= current_date):
amount_overdue += aml.result
res[partner.id] = {'payment_amount_due': amount_due,
'payment_amount_overdue': amount_overdue,
'payment_earliest_due_date': worst_due_date}
return res
def _get_followup_overdue_query(self, cr, uid, args, overdue_only=False, context=None):
'''
This function is used to build the query and arguments to use when making a search on functional fields
* payment_amount_due
* payment_amount_overdue
Basically, the query is exactly the same except that for overdue there is an extra clause in the WHERE.
:param args: arguments given to the search in the usual domain notation (list of tuples)
:param overdue_only: option to add the extra argument to filter on overdue accounting entries or not
:returns: a tuple with
* the query to execute as first element
* the arguments for the execution of this query
:rtype: (string, [])
'''
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
having_where_clause = ' AND '.join(map(lambda x: '(SUM(bal2) %s %%s)' % (x[1]), args))
having_values = [x[2] for x in args]
query = self.pool.get('account.move.line')._query_get(cr, uid, context=context)
overdue_only_str = overdue_only and 'AND date_maturity <= NOW()' or ''
return ('''SELECT pid AS partner_id, SUM(bal2) FROM
(SELECT CASE WHEN bal IS NOT NULL THEN bal
ELSE 0.0 END AS bal2, p.id as pid FROM
(SELECT (debit-credit) AS bal, partner_id
FROM account_move_line l
WHERE account_id IN
(SELECT id FROM account_account
WHERE type=\'receivable\' AND active)
''' + overdue_only_str + '''
AND reconcile_id IS NULL
AND company_id = %s
AND ''' + query + ''') AS l
RIGHT JOIN res_partner p
ON p.id = partner_id ) AS pl
GROUP BY pid HAVING ''' + having_where_clause, [company_id] + having_values)
def _payment_overdue_search(self, cr, uid, obj, name, args, context=None):
if not args:
return []
query, query_args = self._get_followup_overdue_query(cr, uid, args, overdue_only=True, context=context)
cr.execute(query, query_args)
res = cr.fetchall()
if not res:
return [('id','=','0')]
return [('id','in', [x[0] for x in res])]
def _payment_earliest_date_search(self, cr, uid, obj, name, args, context=None):
if not args:
return []
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
having_where_clause = ' AND '.join(map(lambda x: '(MIN(l.date_maturity) %s %%s)' % (x[1]), args))
having_values = [x[2] for x in args]
query = self.pool.get('account.move.line')._query_get(cr, uid, context=context)
cr.execute('SELECT partner_id FROM account_move_line l '\
'WHERE account_id IN '\
'(SELECT id FROM account_account '\
'WHERE type=\'receivable\' AND active) '\
'AND l.company_id = %s '
'AND reconcile_id IS NULL '\
'AND '+query+' '\
'AND partner_id IS NOT NULL '\
'GROUP BY partner_id HAVING '+ having_where_clause,
[company_id] + having_values)
res = cr.fetchall()
if not res:
return [('id','=','0')]
return [('id','in', [x[0] for x in res])]
def _payment_due_search(self, cr, uid, obj, name, args, context=None):
if not args:
return []
query, query_args = self._get_followup_overdue_query(cr, uid, args, overdue_only=False, context=context)
cr.execute(query, query_args)
res = cr.fetchall()
if not res:
return [('id','=','0')]
return [('id','in', [x[0] for x in res])]
def _get_partners(self, cr, uid, ids, context=None):
#this function search for the partners linked to all account.move.line 'ids' that have been changed
partners = set()
for aml in self.browse(cr, uid, ids, context=context):
if aml.partner_id:
partners.add(aml.partner_id.id)
return list(partners)
_inherit = "res.partner"
_columns = {
'payment_responsible_id':fields.many2one('res.users', ondelete='set null', string='Follow-up Responsible',
help="Optionally you can assign a user to this field, which will make him responsible for the action.",
track_visibility="onchange", copy=False),
'payment_note':fields.text('Customer Payment Promise', help="Payment Note", track_visibility="onchange", copy=False),
'payment_next_action':fields.text('Next Action', copy=False,
help="This is the next action to be taken. It will automatically be set when the partner gets a follow-up level that requires a manual action. ",
track_visibility="onchange"),
'payment_next_action_date': fields.date('Next Action Date', copy=False,
help="This is when the manual follow-up is needed. "
"The date will be set to the current date when the partner "
"gets a follow-up level that requires a manual action. "
"Can be practical to set manually e.g. to see if he keeps "
"his promises."),
'unreconciled_aml_ids':fields.one2many('account.move.line', 'partner_id', domain=['&', ('reconcile_id', '=', False), '&',
('account_id.active','=', True), '&', ('account_id.type', '=', 'receivable'), ('state', '!=', 'draft')]),
'latest_followup_date':fields.function(_get_latest, method=True, type='date', string="Latest Follow-up Date",
help="Latest date that the follow-up level of the partner was changed",
store=False, multi="latest"),
'latest_followup_level_id':fields.function(_get_latest, method=True,
type='many2one', relation='account_followup.followup.line', string="Latest Follow-up Level",
help="The maximum follow-up level",
store={
'res.partner': (lambda self, cr, uid, ids, c: ids,[],10),
'account.move.line': (_get_partners, ['followup_line_id'], 10),
},
multi="latest"),
'latest_followup_level_id_without_lit':fields.function(_get_latest, method=True,
type='many2one', relation='account_followup.followup.line', string="Latest Follow-up Level without litigation",
help="The maximum follow-up level without taking into account the account move lines with litigation",
store={
'res.partner': (lambda self, cr, uid, ids, c: ids,[],10),
'account.move.line': (_get_partners, ['followup_line_id'], 10),
},
multi="latest"),
'payment_amount_due':fields.function(_get_amounts_and_date,
type='float', string="Amount Due",
store = False, multi="followup",
fnct_search=_payment_due_search),
'payment_amount_overdue':fields.function(_get_amounts_and_date,
type='float', string="Amount Overdue",
store = False, multi="followup",
fnct_search = _payment_overdue_search),
'payment_earliest_due_date':fields.function(_get_amounts_and_date,
type='date',
string = "Worst Due Date",
multi="followup",
fnct_search=_payment_earliest_date_search),
}
class account_config_settings(osv.TransientModel):
_name = 'account.config.settings'
_inherit = 'account.config.settings'
def open_followup_level_form(self, cr, uid, ids, context=None):
res_ids = self.pool.get('account_followup.followup').search(cr, uid, [], context=context)
return {
'type': 'ir.actions.act_window',
'name': 'Payment Follow-ups',
'res_model': 'account_followup.followup',
'res_id': res_ids and res_ids[0] or False,
'view_mode': 'form,tree',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -8,076,486,105,784,649,000 | 3,414,130,598,767,600,600 | 56.434524 | 269 | 0.552631 | false |
JackDanger/airflow | airflow/jobs.py | 11 | 24429 | from builtins import str
from past.builtins import basestring
from collections import defaultdict
from datetime import datetime
import getpass
import logging
import signal
import socket
import subprocess
import sys
from time import sleep
from sqlalchemy import Column, Integer, String, DateTime, func, Index
from sqlalchemy.orm.session import make_transient
from airflow import executors, models, settings, utils
from airflow.configuration import conf
from airflow.utils import AirflowException, State
Base = models.Base
ID_LEN = models.ID_LEN
# Setting up a statsd client if needed
statsd = None
if conf.get('scheduler', 'statsd_on'):
from statsd import StatsClient
statsd = StatsClient(
host=conf.get('scheduler', 'statsd_host'),
port=conf.getint('scheduler', 'statsd_port'),
prefix=conf.get('scheduler', 'statsd_prefix'))
class BaseJob(Base):
"""
Abstract class to be derived for jobs. Jobs are processing items with state
and duration that aren't task instances. For instance a BackfillJob is
a collection of task instance runs, but should have it's own state, start
and end time.
"""
__tablename__ = "job"
id = Column(Integer, primary_key=True)
dag_id = Column(String(ID_LEN),)
state = Column(String(20))
job_type = Column(String(30))
start_date = Column(DateTime())
end_date = Column(DateTime())
latest_heartbeat = Column(DateTime())
executor_class = Column(String(500))
hostname = Column(String(500))
unixname = Column(String(1000))
__mapper_args__ = {
'polymorphic_on': job_type,
'polymorphic_identity': 'BaseJob'
}
__table_args__ = (
Index('job_type_heart', job_type, latest_heartbeat),
)
def __init__(
self,
executor=executors.DEFAULT_EXECUTOR,
heartrate=conf.getint('scheduler', 'JOB_HEARTBEAT_SEC'),
*args, **kwargs):
self.hostname = socket.gethostname()
self.executor = executor
self.executor_class = executor.__class__.__name__
self.start_date = datetime.now()
self.latest_heartbeat = datetime.now()
self.heartrate = heartrate
self.unixname = getpass.getuser()
super(BaseJob, self).__init__(*args, **kwargs)
def is_alive(self):
return (
(datetime.now() - self.latest_heartbeat).seconds <
(conf.getint('scheduler', 'JOB_HEARTBEAT_SEC') * 2.1)
)
def kill(self):
session = settings.Session()
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.end_date = datetime.now()
try:
self.on_kill()
except:
logging.error('on_kill() method failed')
session.merge(job)
session.commit()
session.close()
raise AirflowException("Job shut down externally.")
def on_kill(self):
'''
Will be called when an external kill command is received
'''
pass
def heartbeat_callback(self):
pass
def heartbeat(self):
'''
Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heartbeat is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
'''
session = settings.Session()
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
if job.state == State.SHUTDOWN:
self.kill()
if job.latest_heartbeat:
sleep_for = self.heartrate - (
datetime.now() - job.latest_heartbeat).total_seconds()
if sleep_for > 0:
sleep(sleep_for)
job.latest_heartbeat = datetime.now()
session.merge(job)
session.commit()
session.close()
self.heartbeat_callback()
logging.debug('[heart] Boom.')
def run(self):
if statsd:
statsd.incr(self.__class__.__name__.lower()+'_start', 1, 1)
# Adding an entry in the DB
session = settings.Session()
self.state = State.RUNNING
session.add(self)
session.commit()
id_ = self.id
make_transient(self)
self.id = id_
# Run
self._execute()
# Marking the success in the DB
self.end_date = datetime.now()
self.state = State.SUCCESS
session.merge(self)
session.commit()
session.close()
if statsd:
statsd.incr(self.__class__.__name__.lower()+'_end', 1, 1)
def _execute(self):
raise NotImplemented("This method needs to be overridden")
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs indefinitely and constantly schedules the jobs
that are ready to run. It figures out the latest runs for each
task and see if the dependencies for the next schedules are met.
If so it triggers the task instance. It does this for each task
in each DAG and repeats.
:param dag_id: to run the scheduler for a single specific DAG
:type dag_id: string
:param subdir: to search for DAG under a certain folder only
:type subdir: string
:param test_mode: used for unit testing this class only, runs a single
schedule run
:type test_mode: bool
:param refresh_dags_every: force refresh the DAG definition every N
runs, as specified here
:type refresh_dags_every: int
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
def __init__(
self,
dag_id=None,
subdir=None,
test_mode=False,
refresh_dags_every=10,
num_runs=None,
*args, **kwargs):
self.dag_id = dag_id
self.subdir = subdir
if test_mode:
self.num_runs = 1
else:
self.num_runs = num_runs
self.refresh_dags_every = refresh_dags_every
super(SchedulerJob, self).__init__(*args, **kwargs)
self.heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
@utils.provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
Where assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.filter(TI.dag_id == dag.dag_id)
.filter(TI.state == State.SUCCESS)
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = datetime.now()
SlaMiss = models.SlaMiss
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if task.sla:
dttm += dag.schedule_interval
while dttm < datetime.now():
if dttm + task.sla + dag.schedule_interval < datetime.now():
session.merge(models.SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm += dag.schedule_interval
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.email_sent == False)
.filter(SlaMiss.dag_id == dag.dag_id)
.all()
)
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
from airflow import ascii
email_content = """\
Here's a list of tasks thas missed their SLAs:
<pre><code>{task_list}\n{ascii.bug}<code></pre>
""".format(**locals())
emails = []
for t in dag.tasks:
if t.email:
if isinstance(t.email, basestring):
l = [t.email]
elif isinstance(t.email, (list, tuple)):
l = t.email
for email in l:
if email not in emails:
emails.append(email)
if emails and len(slas):
utils.send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
for sla in slas:
sla.email_sent = True
session.merge(sla)
session.commit()
session.close()
def import_errors(self, dagbag):
session = settings.Session()
session.query(models.ImportError).delete()
for filename, stacktrace in list(dagbag.import_errors.items()):
session.add(models.ImportError(
filename=filename, stacktrace=stacktrace))
session.commit()
def process_dag(self, dag, executor):
"""
This method schedules a single DAG by looking at the latest
run for each task and attempting to schedule the following run.
As multiple schedulers may be running for redundancy, this
function takes a lock on the DAG and timestamps the last run
in ``last_scheduler_run``.
"""
DagModel = models.DagModel
session = settings.Session()
db_dag = session.query(
DagModel).filter(DagModel.dag_id == dag.dag_id).first()
last_scheduler_run = db_dag.last_scheduler_run or datetime(2000, 1, 1)
secs_since_last = (
datetime.now() - last_scheduler_run).total_seconds()
# if db_dag.scheduler_lock or
if secs_since_last < self.heartrate:
session.commit()
session.close()
return None
else:
# Taking a lock
db_dag.scheduler_lock = True
db_dag.last_scheduler_run = datetime.now()
session.commit()
TI = models.TaskInstance
logging.info(
"Getting latest instance "
"for all task in dag " + dag.dag_id)
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.filter(TI.dag_id == dag.dag_id)
.group_by(TI.task_id).subquery('sq')
)
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
)
logging.debug("Querying max dates for each task")
latest_ti = qry.all()
ti_dict = {ti.task_id: ti for ti in latest_ti}
session.expunge_all()
session.commit()
logging.debug("{} rows returned".format(len(latest_ti)))
for task in dag.tasks:
if task.adhoc:
continue
if task.task_id not in ti_dict:
# Brand new task, let's get started
ti = TI(task, task.start_date)
ti.refresh_from_db()
if ti.is_queueable(flag_upstream_failed=True):
logging.info(
'First run for {ti}'.format(**locals()))
executor.queue_task_instance(ti)
else:
ti = ti_dict[task.task_id]
ti.task = task # Hacky but worky
if ti.state == State.RUNNING:
continue # Only one task at a time
elif ti.state == State.UP_FOR_RETRY:
# If task instance if up for retry, make sure
# the retry delay is met
if ti.is_runnable():
logging.debug('Triggering retry: ' + str(ti))
executor.queue_task_instance(ti)
elif ti.state == State.QUEUED:
# If was queued we skipped so that in gets prioritized
# in self.prioritize_queued
continue
else:
# Trying to run the next schedule
next_schedule = (
ti.execution_date + task.schedule_interval)
if (
ti.task.end_date and
next_schedule > ti.task.end_date):
continue
ti = TI(
task=task,
execution_date=next_schedule,
)
ti.refresh_from_db()
if ti.is_queueable(flag_upstream_failed=True):
logging.debug('Queuing next run: ' + str(ti))
executor.queue_task_instance(ti)
# Releasing the lock
logging.debug("Unlocking DAG (scheduler_lock)")
db_dag = (
session.query(DagModel)
.filter(DagModel.dag_id == dag.dag_id)
.first()
)
db_dag.scheduler_lock = False
session.merge(db_dag)
session.commit()
session.close()
@utils.provide_session
def prioritize_queued(self, session, executor, dagbag):
# Prioritizing queued task instances
pools = {p.pool: p for p in session.query(models.Pool).all()}
TI = models.TaskInstance
queued_tis = (
session.query(TI)
.filter(TI.state == State.QUEUED)
.all()
)
session.expunge_all()
d = defaultdict(list)
for ti in queued_tis:
if (
ti.dag_id not in dagbag.dags or not
dagbag.dags[ti.dag_id].has_task(ti.task_id)):
# Deleting queued jobs that don't exist anymore
session.delete(ti)
session.commit()
else:
d[ti.pool].append(ti)
for pool, tis in list(d.items()):
open_slots = pools[pool].open_slots(session=session)
if open_slots > 0:
tis = sorted(
tis, key=lambda ti: (-ti.priority_weight, ti.start_date))
for ti in tis[:open_slots]:
task = None
try:
task = dagbag.dags[ti.dag_id].get_task(ti.task_id)
except:
logging.error("Queued task {} seems gone".format(ti))
session.delete(ti)
if task:
ti.task = task
if ti.are_dependencies_met():
executor.queue_task_instance(ti, force=True)
else:
session.delete(ti)
session.commit()
def _execute(self):
dag_id = self.dag_id
def signal_handler(signum, frame):
logging.error("SIGINT (ctrl-c) received")
sys.exit(1)
signal.signal(signal.SIGINT, signal_handler)
utils.pessimistic_connection_handling()
logging.basicConfig(level=logging.DEBUG)
logging.info("Starting the scheduler")
dagbag = models.DagBag(self.subdir, sync_to_db=True)
executor = dagbag.executor
executor.start()
i = 0
while not self.num_runs or self.num_runs > i:
loop_start_dttm = datetime.now()
try:
self.prioritize_queued(executor=executor, dagbag=dagbag)
except Exception as e:
logging.exception(e)
i += 1
try:
if i % self.refresh_dags_every == 0:
dagbag = models.DagBag(self.subdir, sync_to_db=True)
else:
dagbag.collect_dags(only_if_updated=True)
except:
logging.error("Failed at reloading the dagbag")
if statsd:
statsd.incr('dag_refresh_error', 1, 1)
sleep(5)
if dag_id:
dags = [dagbag.dags[dag_id]]
else:
dags = [
dag for dag in dagbag.dags.values() if not dag.parent_dag]
paused_dag_ids = dagbag.paused_dags()
for dag in dags:
logging.debug("Scheduling {}".format(dag.dag_id))
dag = dagbag.get_dag(dag.dag_id)
if not dag or (dag.dag_id in paused_dag_ids):
continue
try:
self.process_dag(dag, executor)
self.manage_slas(dag)
except Exception as e:
logging.exception(e)
logging.info(
"Done queuing tasks, calling the executor's heartbeat")
duration_sec = (datetime.now() - loop_start_dttm).total_seconds()
logging.info("Loop took: {} seconds".format(duration_sec))
try:
self.import_errors(dagbag)
except Exception as e:
logging.exception(e)
try:
# We really just want the scheduler to never ever stop.
executor.heartbeat()
self.heartbeat()
except Exception as e:
logging.exception(e)
logging.error("Tachycardia!")
def heartbeat_callback(self):
if statsd:
statsd.gauge('scheduler_heartbeat', 1, 1)
class BackfillJob(BaseJob):
"""
A backfill job consists of a dag or subdag for a specific time range. It
triggers a set of task instance runs, in the right order and lasts for
as long as it takes for the set of task instance to be completed.
"""
__mapper_args__ = {
'polymorphic_identity': 'BackfillJob'
}
def __init__(
self,
dag, start_date=None, end_date=None, mark_success=False,
include_adhoc=False,
donot_pickle=False,
ignore_dependencies=False,
*args, **kwargs):
self.dag = dag
dag.override_start_date(start_date)
self.dag_id = dag.dag_id
self.bf_start_date = start_date
self.bf_end_date = end_date
self.mark_success = mark_success
self.include_adhoc = include_adhoc
self.donot_pickle = donot_pickle
self.ignore_dependencies = ignore_dependencies
super(BackfillJob, self).__init__(*args, **kwargs)
def _execute(self):
"""
Runs a dag for a specified date range.
"""
session = settings.Session()
start_date = self.bf_start_date
end_date = self.bf_end_date
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle = models.DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.executor
executor.start()
# Build a list of all instances to run
tasks_to_run = {}
failed = []
succeeded = []
started = []
wont_run = []
for task in self.dag.tasks:
if (not self.include_adhoc) and task.adhoc:
continue
start_date = start_date or task.start_date
end_date = end_date or task.end_date or datetime.now()
for dttm in utils.date_range(
start_date, end_date, task.dag.schedule_interval):
ti = models.TaskInstance(task, dttm)
tasks_to_run[ti.key] = ti
# Triggering what is ready to get triggered
while tasks_to_run:
for key, ti in list(tasks_to_run.items()):
ti.refresh_from_db()
if ti.state == State.SUCCESS and key in tasks_to_run:
succeeded.append(key)
del tasks_to_run[key]
elif ti.is_runnable():
executor.queue_task_instance(
ti,
mark_success=self.mark_success,
task_start_date=self.bf_start_date,
pickle_id=pickle_id,
ignore_dependencies=self.ignore_dependencies)
ti.state = State.RUNNING
if key not in started:
started.append(key)
self.heartbeat()
executor.heartbeat()
# Reacting to events
for key, state in list(executor.get_event_buffer().items()):
dag_id, task_id, execution_date = key
if key not in tasks_to_run:
continue
ti = tasks_to_run[key]
ti.refresh_from_db()
if ti.state == State.FAILED:
failed.append(key)
logging.error("Task instance " + str(key) + " failed")
del tasks_to_run[key]
# Removing downstream tasks from the one that has failed
for t in self.dag.get_task(task_id).get_flat_relatives(
upstream=False):
key = (ti.dag_id, t.task_id, execution_date)
if key in tasks_to_run:
wont_run.append(key)
del tasks_to_run[key]
elif ti.state == State.SUCCESS:
succeeded.append(key)
del tasks_to_run[key]
msg = (
"[backfill progress] "
"waiting: {0} | "
"succeeded: {1} | "
"kicked_off: {2} | "
"failed: {3} | "
"skipped: {4} ").format(
len(tasks_to_run),
len(succeeded),
len(started),
len(failed),
len(wont_run))
logging.info(msg)
executor.end()
session.close()
if failed:
raise AirflowException(
"Some tasks instances failed, here's the list:\n"+str(failed))
logging.info("All done. Exiting.")
class LocalTaskJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'LocalTaskJob'
}
def __init__(
self,
task_instance,
ignore_dependencies=False,
force=False,
mark_success=False,
pickle_id=None,
task_start_date=None,
*args, **kwargs):
self.task_instance = task_instance
self.ignore_dependencies = ignore_dependencies
self.force = force
self.pickle_id = pickle_id
self.mark_success = mark_success
self.task_start_date = task_start_date
super(LocalTaskJob, self).__init__(*args, **kwargs)
def _execute(self):
command = self.task_instance.command(
raw=True,
ignore_dependencies=self.ignore_dependencies,
force=self.force,
pickle_id=self.pickle_id,
mark_success=self.mark_success,
task_start_date=self.task_start_date,
job_id=self.id,
)
self.process = subprocess.Popen(['bash', '-c', command])
return_code = None
while return_code is None:
self.heartbeat()
return_code = self.process.poll()
def on_kill(self):
self.process.terminate()
| apache-2.0 | 602,133,035,293,789,200 | -4,483,767,338,343,905,300 | 33.749644 | 80 | 0.527406 | false |
froststars/aws-cfn-templates | solutions/CWLogs2S3/ExportCloudWatchLogsToS3Template.py | 1 | 8006 | # -*- encoding: utf-8 -*-
__author__ = 'kotaimen'
__date__ = '04/06/2017'
from troposphere import Base64, FindInMap, GetAtt, Join, Select, Sub
from troposphere import ImportValue, Export
from troposphere import Condition, And, Equals, If, Not, Or
from troposphere import Template, Parameter, Ref, Tags, Output
from troposphere import AWS_ACCOUNT_ID, AWS_REGION, AWS_STACK_ID, \
AWS_STACK_NAME, AWS_NO_VALUE
from troposphere import Delete, Retain, Snapshot
from troposphere.policies import CreationPolicy, ResourceSignal, UpdatePolicy, \
AutoScalingReplacingUpdate, AutoScalingRollingUpdate
import troposphere.cloudformation as cloudformation
import troposphere.iam as iam
import troposphere.awslambda as awslambda
import troposphere.events as events
import troposphere.stepfunctions as stepfunctions
from awacs.aws import Policy, Allow, Deny, Statement, Principal, Everybody
from awacs.aws import Condition, Bool, ArnEquals, StringEquals, IpAddress, Null
from awacs.aws import CurrentTime, EpochTime, MultiFactorAuthAge, Referer, \
SecureTransport, SourceArn, SourceIp, UserAgent
import awacs.sts
import awacs.cloudformation
import awacs.iam
import awacs.awslambda
import awacs.logs
import awacs.aws
import cfnutil
#
# Template
#
t = Template()
t.add_version('2010-09-09')
t.add_description('Export CloudWatch Logs to S3 on a schedule.')
#
# Interface
#
parameter_groups = [
{
'Label': {'default': 'Export Configuration'},
'Parameters': [
'LogGroupName',
'BucketStack',
'ExportPrefix',
'ExportStatus',
'ExportInterval',
],
},
]
t.add_metadata(
{
'AWS::CloudFormation::Interface': {
'ParameterGroups': parameter_groups,
'ParameterLabels':
dict(cfnutil.generate_parameter_labels(parameter_groups))
}
}
)
#
# Parameters
#
param_loggroup_name = t.add_parameter(Parameter(
'LogGroupName',
Type='String',
Description='Name of the CloudWatch Logs LogGroup to export',
Default='Logs'
))
param_bucket_stack = t.add_parameter(Parameter(
'BucketStack',
Description='Name of a stack exporting s3 bucket name',
Default='SampleStack',
Type='String',
MinLength=1,
MaxLength=128,
AllowedPattern='^[a-zA-Z][-a-zA-Z0-9]*$',
ConstraintDescription='must be a valid stack name.'
))
param_export_prefix = t.add_parameter(Parameter(
'ExportPrefix',
Type='String',
Description='S3 prefix of the export',
Default='exportedlogs'
))
param_export_status = t.add_parameter(Parameter(
'ExportStatus',
Type='String',
Description='Whether the schedule is enabled',
AllowedValues=['ENABLED', 'DISABLED'],
Default='DISABLED'
))
param_export_interval = t.add_parameter(Parameter(
'ExportInterval',
Type='String',
Description='Export interval',
AllowedValues=['day', 'week'],
Default='day'
))
#
# Condition
#
t.add_condition(
'IsDailySchedule',
Equals(Ref(param_export_interval), 'day'),
)
#
# Resource
#
lambda_execution_role = t.add_resource(iam.Role(
'LambdaExecutionRole',
AssumeRolePolicyDocument=Policy(
Statement=[Statement(
Effect=Allow,
Action=[awacs.sts.AssumeRole],
Principal=Principal('Service', ['lambda.amazonaws.com'])
)]
),
ManagedPolicyArns=[
'arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole',
],
Policies=[
iam.Policy(
PolicyName='AllowCreateExportTask',
PolicyDocument=Policy(
Version='2012-10-17',
Statement=[Statement(
Effect=Allow,
Action=[
awacs.logs.CreateExportTask,
awacs.logs.DescribeExportTasks,
],
Resource=['*']
)]
)
),
]
))
lambda_function = t.add_resource(awslambda.Function(
'LambdaFunction',
Description='Extract zip file contents to S3',
Code=awslambda.Code(
ZipFile=cfnutil.load_python_lambda('lambdas/cwlogs-export.py')
),
Handler='index.lambda_handler',
Role=GetAtt(lambda_execution_role, 'Arn'),
Runtime='python2.7',
MemorySize='128',
Timeout='15',
Environment=awslambda.Environment(
Variables={
'EXPORT_INTERVAL': Ref(param_export_interval),
'EXPORT_LOGGROUP': Ref(param_loggroup_name),
'EXPORT_DST_BUCKET': ImportValue(
Sub('${BucketStack}-BucketName')),
'EXPORT_DST_PREFIX': Ref(param_export_prefix)
}
),
))
states_execution_role = t.add_resource(iam.Role(
'StatesExecutionRole',
AssumeRolePolicyDocument=Policy(
Statement=[Statement(
Effect=Allow,
Action=[awacs.sts.AssumeRole],
Principal=Principal('Service',
[Sub('states.${AWS::Region}.amazonaws.com')])
)]
),
ManagedPolicyArns=[
],
Policies=[
iam.Policy(
PolicyName='AllowCreateExportTask',
PolicyDocument=Policy(
Version='2012-10-17',
Statement=[Statement(
Effect=Allow,
Action=[
awacs.awslambda.InvokeFunction,
],
Resource=[GetAtt(lambda_function, 'Arn')]
)]
)
),
]
))
states_machine = t.add_resource(stepfunctions.StateMachine(
'StateMachine',
RoleArn=GetAtt(states_execution_role, 'Arn'),
DefinitionString=Sub('''{
"Comment": "A Retry example of the Amazon States Language using an AWS Lambda Function",
"StartAt": "CreateExportTask",
"States": {
"CreateExportTask": {
"Type": "Task",
"Resource": "${EXPORT_LAMBDA_FUNCTION}",
"Retry": [
{
"ErrorEquals": [ "States.Timeout" ],
"MaxAttempts": 0
},
{
"ErrorEquals": [ "States.ALL" ],
"IntervalSeconds": 30,
"MaxAttempts": 10,
"BackoffRate": 2.0
}
],
"End": true
}
}
}''', EXPORT_LAMBDA_FUNCTION=GetAtt(lambda_function, 'Arn'))
))
events_execution_role = t.add_resource(iam.Role(
'EventsExecutionRole',
AssumeRolePolicyDocument=Policy(
Statement=[Statement(
Effect=Allow,
Action=[awacs.sts.AssumeRole],
Principal=Principal('Service',
[Sub('events.amazonaws.com')])
)]
),
ManagedPolicyArns=[
],
Policies=[
iam.Policy(
PolicyName='AllowCreateExportTask',
PolicyDocument=Policy(
Version='2012-10-17',
Statement=[Statement(
Effect=Allow,
Action=[
awacs.aws.Action(prefix='states',
action='StartExecution'),
],
Resource=[Ref(states_machine)]
)]
)
),
]
))
# XXX Cloudformation doesnot support rules->step functions yet
# events_rule = t.add_resource(events.Rule(
# 'ScheduledRule',
# ScheduleExpression=If('IsDailySchedule',
# 'cron(5 0 * * ? *)',
# 'cron(5 0 ? * 2 *)'),
# State=Ref(param_export_status),
# RoleArn=GetAtt(events_execution_role, 'Arn'),
# Targets=[
# events.Target(
# Arn=Ref(states_machine),
# Id='1',
# )
# ]
# ))
#
# Output
#
#
# Write
#
with open(__file__.replace('Template.py', '.template'), 'w') as f:
f.write(t.to_json(indent=2))
| mit | -1,933,941,832,912,177,400 | -7,384,770,924,017,748,000 | 25.865772 | 96 | 0.565576 | false |
kasioumis/invenio | invenio/modules/search/searchext/services/LHCBeamStatusService.py | 15 | 2564 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebSearch service to display LHC beam status
"""
from invenio.modules.search.services import SearchService
from invenio.base.i18n import gettext_set_language
from invenio.config import CFG_SITE_LANG, CFG_SITE_URL
__plugin_version__ = "Search Service Plugin API 1.0"
class LHCBeamStatusService(SearchService):
"""
Display LHC Beam Status
"""
def get_description(self, ln=CFG_SITE_LANG):
"Return service description"
return "Return LHC Beam status info"
def answer(self, req, user_info, of, cc, colls_to_search, p, f, search_units, ln):
"""
Answer question given by context.
Return (relevance, html_string) where relevance is integer
from 0 to 100 indicating how relevant to the question the
answer is (see C{CFG_WEBSEARCH_SERVICE_MAX_SERVICE_ANSWER_RELEVANCE} for details) ,
and html_string being a formatted answer.
"""
if f:
return (0, '')
words = [unit[1].lower() for unit in search_units if unit[2] == ""]
if not words:
return (0, '')
_ = gettext_set_language(ln)
if 'vistars' in words or \
(('lhc' in words or 'beam' in words) and \
'status' in words):
out = '''
<img id="vistar" src="%(CFG_SITE_URL)s/img/loading.gif"/>
<script language="javascript" type="text/javascript">
function refresh()
{
imgobj = $("#vistar")
imgobj.attr("src", 'http://cs-ccr-www3.cern.ch/vistar_capture/lhc1.png'+ '?'+Math.random()).stop(true,true).hide().show();
imgobj.attr("style", "max-width:600px");
setTimeout("refresh()", 8000);
}
$(document).ready(function(){
refresh();
});
</script>
''' % {'CFG_SITE_URL': CFG_SITE_URL}
return (70, out)
return (0, '')
| gpl-2.0 | 2,922,630,604,877,419,500 | -2,580,582,593,307,238,400 | 32.736842 | 123 | 0.652496 | false |
mattstibbs/blockbuster-server | blockbuster/bb_dbconnector_base.py | 1 | 4377 | class DBConnector:
def __init__(self):
pass
def db_status_check(self):
raise NotImplementedError()
def db_stats_check(self):
raise NotImplementedError()
def db_version_check(self):
raise NotImplementedError()
def checkifregexists(self):
raise NotImplementedError()
def add_log_table_entry(self):
raise NotImplementedError()
def remove_registration(self):
raise NotImplementedError()
class DBAnalyticsConnector:
def __init__(self):
pass
def add_analytics_record(self):
raise NotImplementedError()
class DBLogsConnector:
def __init__(self):
pass
def add_transaction_record(self):
raise NotImplementedError()
class DBBlocksConnector:
def __init__(self):
raise NotImplementedError()
def add_block_record(self):
raise NotImplementedError()
def get_list_of_blocks_for_blockee(self):
raise NotImplementedError()
def get_count_of_blocks_for_blockee(self):
raise NotImplementedError()
def get_list_of_blocks_for_blocker(self):
raise NotImplementedError()
def add_move_request(self):
raise NotImplementedError()
def remove_blocks(self):
raise NotImplementedError()
def get_open_move_requests(self):
raise NotImplementedError()
def remove_move_request(self):
raise NotImplementedError()
class DBPushoverConnector:
def __init__(self):
raise NotImplementedError()
def add_pushover_token_for_user(self):
raise NotImplementedError()
def get_pushover_user_token_from_mobile(self):
raise NotImplementedError()
def turn_push_notifications_on(self):
raise NotImplementedError()
def turn_push_notifications_off(self):
raise NotImplementedError()
class DBEmailConnector:
def __init__(self):
raise NotImplementedError()
def enable_email_notifications(self):
raise NotImplementedError()
def disable_email_notifications(self):
raise NotImplementedError()
def update_email_address(self):
raise NotImplementedError()
def get_email_address(self):
raise NotImplementedError()
class DBUserConnector:
def __init__(self):
raise NotImplementedError()
def get_api_credentials(self):
raise NotImplementedError()
def number_is_registered(self):
raise NotImplementedError()
def mobile_sharing_enabled(self):
raise NotImplementedError()
def get_notification_preferences_for_user(self):
raise NotImplementedError()
def get_landline_from_reg(self):
raise NotImplementedError()
def get_name_from_mobile(self):
raise NotImplementedError()
def get_name_from_reg(self):
raise NotImplementedError()
def get_reg_from_mobile(self):
raise NotImplementedError()
def get_blocker_mobile_from_blockee_mobile(self):
raise NotImplementedError()
def remove_registration(self):
raise NotImplementedError()
def enable_mobile_number_sharing(self, mobile_number):
raise NotImplementedError()
def disable_mobile_number_sharing(self):
raise NotImplementedError()
def update_alternative_contact_text(self):
raise NotImplementedError()
def get_user_dict_from_mobile(self):
raise NotImplementedError()
def get_user_dict_from_reg(self):
raise NotImplementedError()
def remove_alternative_contact_text(self):
raise NotImplementedError()
class DBCarsConnector:
def __init__(self):
raise NotImplementedError()
def getCarDetailsAsDictionary(self):
raise NotImplementedError()
def register_new_car(self):
raise NotImplementedError()
class DBApiConnector:
def __init__(self):
raise NotImplementedError()
def api_registrations_get(self, registration):
raise NotImplementedError()
def api_registrations_getall(self):
raise NotImplementedError()
def api_blocks_getall(self):
raise NotImplementedError()
def api_smslogs_get(self):
raise NotImplementedError()
def api_logs_get(self):
raise NotImplementedError()
def api_logsms_get(self):
raise NotImplementedError() | mit | 5,565,893,760,965,917,000 | 8,429,132,376,677,736,000 | 18.371681 | 58 | 0.661412 | false |
backmari/moose | python/chigger/base/ChiggerFilterSourceBase.py | 6 | 5979 | #pylint: disable=missing-docstring
#################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
#################################################################
import copy
import vtk
import mooseutils
from ChiggerSourceBase import ChiggerSourceBase
class ChiggerFilterSourceBase(ChiggerSourceBase):
"""
A base class for creating "source" objects (in VTK something that needs an vtkActor) that
require additional input into the mapper and are capable of accepting filters.
This class adds two main items:
1. A getSource method is provided, this method should provide a VTK object that will be connect
to the mapper or chain of filters (see 2).
2. Defines a method for adding filters and controlling the types and order in which they are
applied, see ExodusSource for example.
Inputs:
vtkactor_type: The VTK actor type to build, must be an instance of VTKACTOR_TYPE
vtkmapper_type: The VTK mapper type to build, must be an instance of VTKMAPPER_TYPE
**kwargs: The key, value options for this object.
"""
# The base class actor/mapper that this object to which ownership is restricted
VTKACTOR_TYPE = vtk.vtkProp
VTKMAPPER_TYPE = vtk.vtkAbstractMapper
# The list of filter types allowed, in the order they should be connected
FILTER_TYPES = []
@staticmethod
def getOptions():
opt = ChiggerSourceBase.getOptions()
opt.add('filters', [], "A list of Filter objects to apply to this mapper.")
return opt
def __init__(self, *args, **kwargs):
super(ChiggerFilterSourceBase, self).__init__(*args, **kwargs)
self._filters = []
self._required_filters = []
def getVTKSource(self):
"""
Return the "source" vtk object. (abstract)
Deriving classes must override this method. The VTK object returned from this function will
be connected to the first filter, if then exist, or the vtkAbstractMapper object. See the
'update' method for this class for how the connections are made.
"""
raise mooseutils.MooseException('The {}."getSource()" method must be overridden by your '
'mapper object and return the source vtk object to connect '
'to the filers and mapper.'.format(self.__class__.__name__))
def getFilters(self):
"""
Return the list of filter objects.
"""
return self._filters
def needsUpdate(self):
"""
Return True if the object needs to be updated.
"""
changed = [super(ChiggerFilterSourceBase, self).needsUpdate()]
for f in self._filters:
changed.append(f.needsUpdate())
return any(changed)
def update(self, **kwargs):
"""
Updates the object by connecting the VTK objects. (override)
Inputs:
see ChiggerSourceBase
"""
super(ChiggerFilterSourceBase, self).update(**kwargs)
self.__connectFilters()
# Initialize and update filters
for f in self._filters:
if f.needsInitialize():
f.initializeFilter(self)
if f.needsUpdate():
f.update()
def __connectFilters(self):
"""
Helper function for connecting filter to vtkMapper object.
"""
def debug(src, fltr):
"""
Inline function for debug messages.
"""
mooseutils.mooseDebug('{} --> {}'.format(type(src).__name__, type(fltr).__name__),
color='GREEN')
# Create a list of filters to apply to the VTK pipeline, this is done by
# combining the required filters with the 'filters' options. This combined list
# is then sorted based on the list provided in FILTER_TYPES.
filters = []
filters_in = copy.copy(self._required_filters) # shallow copy (don't modify require list)
if self.isOptionValid('filters'):
filters_in += self.getOption('filters')
for f in filters_in:
for i, order in enumerate(self.FILTER_TYPES):
if isinstance(f, order):
filters.append((f, i))
self._filters = [f[0] for f in sorted(filters, key=lambda x: x[1])]
# Connect the filters, if any exist
if self._filters:
debug(self.getVTKSource(), self._filters[0].getVTKFilter())
self._filters[0].getVTKFilter().SetInputConnection(self.getVTKSource().GetOutputPort())
for i in range(1, len(self._filters)):
debug(self._filters[i-1].getVTKFilter(), self._filters[i].getVTKFilter())
f = self._filters[i-1].getVTKFilter().GetOutputPort()
self._filters[i].getVTKFilter().SetInputConnection(f)
if self._vtkmapper:
debug(self._filters[-1].getVTKFilter(), self._vtkmapper)
self._vtkmapper.SetInputConnection(self._filters[-1].getVTKFilter().GetOutputPort())
elif self._vtkmapper:
debug(self.getVTKSource(), self._vtkmapper)
self._vtkmapper.SetInputConnection(self.getVTKSource().GetOutputPort())
| lgpl-2.1 | -4,861,716,968,150,373,000 | -2,140,870,336,250,557,200 | 40.811189 | 100 | 0.567319 | false |
dhruvsrivastava/OJ | python/lib/python2.7/site-packages/pip/req/req_file.py | 239 | 9670 | """
Requirements file parsing
"""
from __future__ import absolute_import
import os
import re
import shlex
import optparse
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves import filterfalse
import pip
from pip.download import get_file_content
from pip.req.req_install import InstallRequirement
from pip.exceptions import (RequirementsFileParseError)
from pip.utils import normalize_name
from pip import cmdoptions
__all__ = ['parse_requirements']
SCHEME_RE = re.compile(r'^(http|https|file):', re.I)
COMMENT_RE = re.compile(r'(^|\s)+#.*$')
SUPPORTED_OPTIONS = [
cmdoptions.constraints,
cmdoptions.editable,
cmdoptions.requirements,
cmdoptions.no_index,
cmdoptions.index_url,
cmdoptions.find_links,
cmdoptions.extra_index_url,
cmdoptions.allow_external,
cmdoptions.allow_all_external,
cmdoptions.no_allow_external,
cmdoptions.allow_unsafe,
cmdoptions.no_allow_unsafe,
cmdoptions.use_wheel,
cmdoptions.no_use_wheel,
cmdoptions.always_unzip,
cmdoptions.no_binary,
cmdoptions.only_binary,
]
# options to be passed to requirements
SUPPORTED_OPTIONS_REQ = [
cmdoptions.install_options,
cmdoptions.global_options
]
# the 'dest' string values
SUPPORTED_OPTIONS_REQ_DEST = [o().dest for o in SUPPORTED_OPTIONS_REQ]
def parse_requirements(filename, finder=None, comes_from=None, options=None,
session=None, constraint=False, wheel_cache=None):
"""Parse a requirements file and yield InstallRequirement instances.
:param filename: Path or url of requirements file.
:param finder: Instance of pip.index.PackageFinder.
:param comes_from: Origin description of requirements.
:param options: Global options.
:param session: Instance of pip.download.PipSession.
:param constraint: If true, parsing a constraint file rather than
requirements file.
:param wheel_cache: Instance of pip.wheel.WheelCache
"""
if session is None:
raise TypeError(
"parse_requirements() missing 1 required keyword argument: "
"'session'"
)
_, content = get_file_content(
filename, comes_from=comes_from, session=session
)
lines = content.splitlines()
lines = ignore_comments(lines)
lines = join_lines(lines)
lines = skip_regex(lines, options)
for line_number, line in enumerate(lines, 1):
req_iter = process_line(line, filename, line_number, finder,
comes_from, options, session, wheel_cache,
constraint=constraint)
for req in req_iter:
yield req
def process_line(line, filename, line_number, finder=None, comes_from=None,
options=None, session=None, wheel_cache=None,
constraint=False):
"""Process a single requirements line; This can result in creating/yielding
requirements, or updating the finder.
For lines that contain requirements, the only options that have an effect
are from SUPPORTED_OPTIONS_REQ, and they are scoped to the
requirement. Other options from SUPPORTED_OPTIONS may be present, but are
ignored.
For lines that do not contain requirements, the only options that have an
effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may
be present, but are ignored. These lines may contain multiple options
(although our docs imply only one is supported), and all our parsed and
affect the finder.
:param constraint: If True, parsing a constraints file.
"""
parser = build_parser()
defaults = parser.get_default_values()
defaults.index_url = None
if finder:
# `finder.format_control` will be updated during parsing
defaults.format_control = finder.format_control
args_str, options_str = break_args_options(line)
opts, _ = parser.parse_args(shlex.split(options_str), defaults)
# preserve for the nested code path
line_comes_from = '%s %s (line %s)' % (
'-c' if constraint else '-r', filename, line_number)
# yield a line requirement
if args_str:
isolated = options.isolated_mode if options else False
if options:
cmdoptions.check_install_build_global(options, opts)
# get the options that apply to requirements
req_options = {}
for dest in SUPPORTED_OPTIONS_REQ_DEST:
if dest in opts.__dict__ and opts.__dict__[dest]:
req_options[dest] = opts.__dict__[dest]
yield InstallRequirement.from_line(
args_str, line_comes_from, constraint=constraint,
isolated=isolated, options=req_options, wheel_cache=wheel_cache
)
# yield an editable requirement
elif opts.editables:
isolated = options.isolated_mode if options else False
default_vcs = options.default_vcs if options else None
yield InstallRequirement.from_editable(
opts.editables[0], comes_from=line_comes_from,
constraint=constraint, default_vcs=default_vcs, isolated=isolated,
wheel_cache=wheel_cache
)
# parse a nested requirements file
elif opts.requirements or opts.constraints:
if opts.requirements:
req_path = opts.requirements[0]
nested_constraint = False
else:
req_path = opts.constraints[0]
nested_constraint = True
# original file is over http
if SCHEME_RE.search(filename):
# do a url join so relative paths work
req_path = urllib_parse.urljoin(filename, req_path)
# original file and nested file are paths
elif not SCHEME_RE.search(req_path):
# do a join so relative paths work
req_dir = os.path.dirname(filename)
req_path = os.path.join(os.path.dirname(filename), req_path)
# TODO: Why not use `comes_from='-r {} (line {})'` here as well?
parser = parse_requirements(
req_path, finder, comes_from, options, session,
constraint=nested_constraint, wheel_cache=wheel_cache
)
for req in parser:
yield req
# set finder options
elif finder:
if opts.index_url:
finder.index_urls = [opts.index_url]
if opts.use_wheel is False:
finder.use_wheel = False
pip.index.fmt_ctl_no_use_wheel(finder.format_control)
if opts.no_index is True:
finder.index_urls = []
if opts.allow_all_external:
finder.allow_all_external = opts.allow_all_external
if opts.extra_index_urls:
finder.index_urls.extend(opts.extra_index_urls)
if opts.allow_external:
finder.allow_external |= set(
[normalize_name(v).lower() for v in opts.allow_external])
if opts.allow_unverified:
# Remove after 7.0
finder.allow_unverified |= set(
[normalize_name(v).lower() for v in opts.allow_unverified])
if opts.find_links:
# FIXME: it would be nice to keep track of the source
# of the find_links: support a find-links local path
# relative to a requirements file.
value = opts.find_links[0]
req_dir = os.path.dirname(os.path.abspath(filename))
relative_to_reqs_file = os.path.join(req_dir, value)
if os.path.exists(relative_to_reqs_file):
value = relative_to_reqs_file
finder.find_links.append(value)
def break_args_options(line):
"""Break up the line into an args and options string. We only want to shlex
(and then optparse) the options, not the args. args can contain markers
which are corrupted by shlex.
"""
tokens = line.split(' ')
args = []
options = tokens[:]
for token in tokens:
if token.startswith('-') or token.startswith('--'):
break
else:
args.append(token)
options.pop(0)
return ' '.join(args), ' '.join(options)
def build_parser():
"""
Return a parser for parsing requirement lines
"""
parser = optparse.OptionParser(add_help_option=False)
option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ
for option_factory in option_factories:
option = option_factory()
parser.add_option(option)
# By default optparse sys.exits on parsing errors. We want to wrap
# that in our own exception.
def parser_exit(self, msg):
raise RequirementsFileParseError(msg)
parser.exit = parser_exit
return parser
def join_lines(iterator):
"""
Joins a line ending in '\' with the previous line.
"""
lines = []
for line in iterator:
if not line.endswith('\\'):
if lines:
lines.append(line)
yield ''.join(lines)
lines = []
else:
yield line
else:
lines.append(line.strip('\\'))
# TODO: handle space after '\'.
# TODO: handle '\' on last line.
def ignore_comments(iterator):
"""
Strips and filters empty or commented lines.
"""
for line in iterator:
line = COMMENT_RE.sub('', line)
line = line.strip()
if line:
yield line
def skip_regex(lines, options):
"""
Optionally exclude lines that match '--skip-requirements-regex'
"""
skip_regex = options.skip_requirements_regex if options else None
if skip_regex:
lines = filterfalse(re.compile(skip_regex).search, lines)
return lines
| bsd-3-clause | 4,474,282,344,824,024,600 | -2,687,236,122,968,229,000 | 33.29078 | 80 | 0.634436 | false |
wangyou/XX-Net | code/default/python27/1.0/lib/encodings/iso8859_10.py | 593 | 13845 | """ Python Character Mapping Codec iso8859_10 generated from 'MAPPINGS/ISO8859/8859-10.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-10',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u0112' # 0xA2 -> LATIN CAPITAL LETTER E WITH MACRON
u'\u0122' # 0xA3 -> LATIN CAPITAL LETTER G WITH CEDILLA
u'\u012a' # 0xA4 -> LATIN CAPITAL LETTER I WITH MACRON
u'\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE
u'\u0136' # 0xA6 -> LATIN CAPITAL LETTER K WITH CEDILLA
u'\xa7' # 0xA7 -> SECTION SIGN
u'\u013b' # 0xA8 -> LATIN CAPITAL LETTER L WITH CEDILLA
u'\u0110' # 0xA9 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u0160' # 0xAA -> LATIN CAPITAL LETTER S WITH CARON
u'\u0166' # 0xAB -> LATIN CAPITAL LETTER T WITH STROKE
u'\u017d' # 0xAC -> LATIN CAPITAL LETTER Z WITH CARON
u'\xad' # 0xAD -> SOFT HYPHEN
u'\u016a' # 0xAE -> LATIN CAPITAL LETTER U WITH MACRON
u'\u014a' # 0xAF -> LATIN CAPITAL LETTER ENG
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
u'\u0113' # 0xB2 -> LATIN SMALL LETTER E WITH MACRON
u'\u0123' # 0xB3 -> LATIN SMALL LETTER G WITH CEDILLA
u'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
u'\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE
u'\u0137' # 0xB6 -> LATIN SMALL LETTER K WITH CEDILLA
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\u013c' # 0xB8 -> LATIN SMALL LETTER L WITH CEDILLA
u'\u0111' # 0xB9 -> LATIN SMALL LETTER D WITH STROKE
u'\u0161' # 0xBA -> LATIN SMALL LETTER S WITH CARON
u'\u0167' # 0xBB -> LATIN SMALL LETTER T WITH STROKE
u'\u017e' # 0xBC -> LATIN SMALL LETTER Z WITH CARON
u'\u2015' # 0xBD -> HORIZONTAL BAR
u'\u016b' # 0xBE -> LATIN SMALL LETTER U WITH MACRON
u'\u014b' # 0xBF -> LATIN SMALL LETTER ENG
u'\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
u'\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA
u'\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\u0168' # 0xD7 -> LATIN CAPITAL LETTER U WITH TILDE
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
u'\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
u'\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA
u'\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\u0169' # 0xF7 -> LATIN SMALL LETTER U WITH TILDE
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
u'\u0138' # 0xFF -> LATIN SMALL LETTER KRA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-2-clause | 3,825,468,721,734,345,700 | 1,506,899,350,705,541,600 | 44.09772 | 109 | 0.539545 | false |
doug-fish/horizon | openstack_dashboard/test/test_data/cinder_data.py | 21 | 13932 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient.v2 import availability_zones
from cinderclient.v2 import pools
from cinderclient.v2 import qos_specs
from cinderclient.v2 import quotas
from cinderclient.v2 import services
from cinderclient.v2 import volume_backups as vol_backups
from cinderclient.v2 import volume_encryption_types as vol_enc_types
from cinderclient.v2 import volume_snapshots as vol_snaps
from cinderclient.v2 import volume_transfers
from cinderclient.v2 import volume_types
from cinderclient.v2 import volumes
from openstack_dashboard import api
from openstack_dashboard.usage import quotas as usage_quotas
from openstack_dashboard.test.test_data import utils
def data(TEST):
TEST.cinder_services = utils.TestDataContainer()
TEST.cinder_volumes = utils.TestDataContainer()
TEST.cinder_volume_backups = utils.TestDataContainer()
TEST.cinder_volume_encryption_types = utils.TestDataContainer()
TEST.cinder_volume_types = utils.TestDataContainer()
TEST.cinder_volume_encryption = utils.TestDataContainer()
TEST.cinder_bootable_volumes = utils.TestDataContainer()
TEST.cinder_qos_specs = utils.TestDataContainer()
TEST.cinder_qos_spec_associations = utils.TestDataContainer()
TEST.cinder_volume_snapshots = utils.TestDataContainer()
TEST.cinder_quotas = utils.TestDataContainer()
TEST.cinder_quota_usages = utils.TestDataContainer()
TEST.cinder_availability_zones = utils.TestDataContainer()
TEST.cinder_volume_transfers = utils.TestDataContainer()
TEST.cinder_pools = utils.TestDataContainer()
# Services
service_1 = services.Service(services.ServiceManager(None), {
"service": "cinder-scheduler",
"status": "enabled",
"binary": "cinder-scheduler",
"zone": "internal",
"state": "up",
"updated_at": "2013-07-08T05:21:00.000000",
"host": "devstack001",
"disabled_reason": None
})
service_2 = services.Service(services.ServiceManager(None), {
"service": "cinder-volume",
"status": "enabled",
"binary": "cinder-volume",
"zone": "nova",
"state": "up",
"updated_at": "2013-07-08T05:20:51.000000",
"host": "devstack001",
"disabled_reason": None
})
TEST.cinder_services.add(service_1)
TEST.cinder_services.add(service_2)
# Volumes - Cinder v1
volume = volumes.Volume(
volumes.VolumeManager(None),
{'id': "11023e92-8008-4c8b-8059-7f2293ff3887",
'status': 'available',
'size': 40,
'display_name': 'Volume name',
'display_description': 'Volume description',
'created_at': '2014-01-27 10:30:00',
'volume_type': None,
'attachments': []})
nameless_volume = volumes.Volume(
volumes.VolumeManager(None),
{"id": "4b069dd0-6eaa-4272-8abc-5448a68f1cce",
"status": 'available',
"size": 10,
"display_name": '',
"display_description": '',
"device": "/dev/hda",
"created_at": '2010-11-21 18:34:25',
"volume_type": 'vol_type_1',
"attachments": []})
other_volume = volumes.Volume(
volumes.VolumeManager(None),
{'id': "21023e92-8008-1234-8059-7f2293ff3889",
'status': 'in-use',
'size': 10,
'display_name': u'my_volume',
'display_description': '',
'created_at': '2013-04-01 10:30:00',
'volume_type': None,
'attachments': [{"id": "1", "server_id": '1',
"device": "/dev/hda"}]})
volume_with_type = volumes.Volume(
volumes.VolumeManager(None),
{'id': "7dcb47fd-07d9-42c2-9647-be5eab799ebe",
'name': 'my_volume2',
'status': 'in-use',
'size': 10,
'display_name': u'my_volume2',
'display_description': '',
'created_at': '2013-04-01 10:30:00',
'volume_type': 'vol_type_2',
'attachments': [{"id": "2", "server_id": '2',
"device": "/dev/hdb"}]})
non_bootable_volume = volumes.Volume(
volumes.VolumeManager(None),
{'id': "21023e92-8008-1234-8059-7f2293ff3890",
'status': 'in-use',
'size': 10,
'display_name': u'my_volume',
'display_description': '',
'created_at': '2013-04-01 10:30:00',
'volume_type': None,
'bootable': False,
'attachments': [{"id": "1", "server_id": '1',
"device": "/dev/hda"}]})
volume.bootable = 'true'
nameless_volume.bootable = 'true'
other_volume.bootable = 'true'
TEST.cinder_volumes.add(api.cinder.Volume(volume))
TEST.cinder_volumes.add(api.cinder.Volume(nameless_volume))
TEST.cinder_volumes.add(api.cinder.Volume(other_volume))
TEST.cinder_volumes.add(api.cinder.Volume(volume_with_type))
TEST.cinder_bootable_volumes.add(api.cinder.Volume(non_bootable_volume))
vol_type1 = volume_types.VolumeType(volume_types.VolumeTypeManager(None),
{'id': u'1',
'name': u'vol_type_1',
'extra_specs': {'foo': 'bar'}})
vol_type2 = volume_types.VolumeType(volume_types.VolumeTypeManager(None),
{'id': u'2',
'name': u'vol_type_2'})
TEST.cinder_volume_types.add(vol_type1, vol_type2)
# Volumes - Cinder v2
volume_v2 = volumes.Volume(
volumes.VolumeManager(None),
{'id': "31023e92-8008-4c8b-8059-7f2293ff1234",
'name': 'v2_volume',
'description': "v2 Volume Description",
'status': 'available',
'size': 20,
'created_at': '2014-01-27 10:30:00',
'volume_type': None,
'os-vol-host-attr:host': 'host@backend-name#pool',
'bootable': 'true',
'attachments': []})
volume_v2.bootable = 'true'
TEST.cinder_volumes.add(api.cinder.Volume(volume_v2))
snapshot = vol_snaps.Snapshot(
vol_snaps.SnapshotManager(None),
{'id': '5f3d1c33-7d00-4511-99df-a2def31f3b5d',
'display_name': 'test snapshot',
'display_description': 'volume snapshot',
'size': 40,
'status': 'available',
'volume_id': '11023e92-8008-4c8b-8059-7f2293ff3887'})
snapshot2 = vol_snaps.Snapshot(
vol_snaps.SnapshotManager(None),
{'id': 'c9d0881a-4c0b-4158-a212-ad27e11c2b0f',
'name': '',
'description': 'v2 volume snapshot description',
'size': 80,
'status': 'available',
'volume_id': '31023e92-8008-4c8b-8059-7f2293ff1234'})
snapshot.bootable = 'true'
snapshot2.bootable = 'true'
TEST.cinder_volume_snapshots.add(api.cinder.VolumeSnapshot(snapshot))
TEST.cinder_volume_snapshots.add(api.cinder.VolumeSnapshot(snapshot2))
TEST.cinder_volume_snapshots.first()._volume = volume
# Volume Type Encryption
vol_enc_type1 = vol_enc_types.VolumeEncryptionType(
vol_enc_types.VolumeEncryptionTypeManager(None),
{'volume_type_id': u'1',
'control_location': "front-end",
'key_size': 512,
'provider': "a-provider",
'cipher': "a-cipher"})
vol_enc_type2 = vol_enc_types.VolumeEncryptionType(
vol_enc_types.VolumeEncryptionTypeManager(None),
{'volume_type_id': u'2',
'control_location': "front-end",
'key_size': 256,
'provider': "a-provider",
'cipher': "a-cipher"})
vol_unenc_type1 = vol_enc_types.VolumeEncryptionType(
vol_enc_types.VolumeEncryptionTypeManager(None), {})
TEST.cinder_volume_encryption_types.add(vol_enc_type1, vol_enc_type2,
vol_unenc_type1)
volume_backup1 = vol_backups.VolumeBackup(
vol_backups.VolumeBackupManager(None),
{'id': 'a374cbb8-3f99-4c3f-a2ef-3edbec842e31',
'name': 'backup1',
'description': 'volume backup 1',
'size': 10,
'status': 'available',
'container_name': 'volumebackups',
'volume_id': '11023e92-8008-4c8b-8059-7f2293ff3887'})
volume_backup2 = vol_backups.VolumeBackup(
vol_backups.VolumeBackupManager(None),
{'id': 'c321cbb8-3f99-4c3f-a2ef-3edbec842e52',
'name': 'backup2',
'description': 'volume backup 2',
'size': 20,
'status': 'available',
'container_name': 'volumebackups',
'volume_id': '31023e92-8008-4c8b-8059-7f2293ff1234'})
TEST.cinder_volume_backups.add(volume_backup1)
TEST.cinder_volume_backups.add(volume_backup2)
# Volume Encryption
vol_enc_metadata1 = volumes.Volume(
volumes.VolumeManager(None),
{'cipher': 'test-cipher',
'key_size': 512,
'provider': 'test-provider',
'control_location': 'front-end'})
vol_unenc_metadata1 = volumes.Volume(
volumes.VolumeManager(None),
{})
TEST.cinder_volume_encryption.add(vol_enc_metadata1)
TEST.cinder_volume_encryption.add(vol_unenc_metadata1)
# Quota Sets
quota_data = dict(volumes='1',
snapshots='1',
gigabytes='1000')
quota = quotas.QuotaSet(quotas.QuotaSetManager(None), quota_data)
TEST.cinder_quotas.add(api.base.QuotaSet(quota))
# Quota Usages
quota_usage_data = {'gigabytes': {'used': 0,
'quota': 1000},
'instances': {'used': 0,
'quota': 10},
'snapshots': {'used': 0,
'quota': 10}}
quota_usage = usage_quotas.QuotaUsage()
for k, v in quota_usage_data.items():
quota_usage.add_quota(api.base.Quota(k, v['quota']))
quota_usage.tally(k, v['used'])
TEST.cinder_quota_usages.add(quota_usage)
# Availability Zones
# Cinder returns the following structure from os-availability-zone
# {"availabilityZoneInfo":
# [{"zoneState": {"available": true}, "zoneName": "nova"}]}
# Note that the default zone is still "nova" even though this is cinder
TEST.cinder_availability_zones.add(
availability_zones.AvailabilityZone(
availability_zones.AvailabilityZoneManager(None),
{
'zoneName': 'nova',
'zoneState': {'available': True}
}
)
)
# Cinder Limits
limits = {"absolute": {"totalVolumesUsed": 1,
"totalGigabytesUsed": 5,
"maxTotalVolumeGigabytes": 1000,
"maxTotalVolumes": 10}}
TEST.cinder_limits = limits
# QOS Specs
qos_spec1 = qos_specs.QoSSpecs(
qos_specs.QoSSpecsManager(None),
{"id": "418db45d-6992-4674-b226-80aacad2073c",
"name": "high_iops",
"consumer": "back-end",
"specs": {"minIOPS": "1000", "maxIOPS": '100000'}})
qos_spec2 = qos_specs.QoSSpecs(
qos_specs.QoSSpecsManager(None),
{"id": "6ed7035f-992e-4075-8ed6-6eff19b3192d",
"name": "high_bws",
"consumer": "back-end",
"specs": {"maxBWS": '5000'}})
TEST.cinder_qos_specs.add(qos_spec1, qos_spec2)
vol_type1.associated_qos_spec = qos_spec1.name
TEST.cinder_qos_spec_associations.add(vol_type1)
# volume_transfers
transfer_1 = volume_transfers.VolumeTransfer(
volume_transfers.VolumeTransferManager(None), {
'id': '99999999-8888-7777-6666-555555555555',
'name': 'test transfer',
'volume_id': volume.id,
'auth_key': 'blah',
'created_at': ''})
TEST.cinder_volume_transfers.add(transfer_1)
# Pools
pool1 = pools.Pool(
pools.PoolManager(None), {
"QoS_support": False,
"allocated_capacity_gb": 0,
"driver_version": "3.0.0",
"free_capacity_gb": 10,
"extra_specs": {
"description": "LVM Extra specs",
"display_name": "LVMDriver",
"namespace": "OS::Cinder::LVMDriver",
"type": "object",
},
"name": "devstack@lvmdriver-1#lvmdriver-1",
"pool_name": "lvmdriver-1",
"reserved_percentage": 0,
"storage_protocol": "iSCSI",
"total_capacity_gb": 10,
"vendor_name": "Open Source",
"volume_backend_name": "lvmdriver-1"})
pool2 = pools.Pool(
pools.PoolManager(None), {
"QoS_support": False,
"allocated_capacity_gb": 2,
"driver_version": "3.0.0",
"free_capacity_gb": 15,
"extra_specs": {
"description": "LVM Extra specs",
"display_name": "LVMDriver",
"namespace": "OS::Cinder::LVMDriver",
"type": "object",
},
"name": "devstack@lvmdriver-2#lvmdriver-2",
"pool_name": "lvmdriver-2",
"reserved_percentage": 0,
"storage_protocol": "iSCSI",
"total_capacity_gb": 10,
"vendor_name": "Open Source",
"volume_backend_name": "lvmdriver-2"})
TEST.cinder_pools.add(pool1)
TEST.cinder_pools.add(pool2)
| apache-2.0 | -7,629,699,632,636,346,000 | 8,847,220,789,926,565,000 | 37.486188 | 78 | 0.58118 | false |
jeromeLB/client175 | cherrypy/process/win32.py | 17 | 5875 | """Windows service. Requires pywin32."""
import os
import thread
import win32api
import win32con
import win32event
import win32service
import win32serviceutil
from cherrypy.process import wspbus, plugins
class ConsoleCtrlHandler(plugins.SimplePlugin):
"""A WSPBus plugin for handling Win32 console events (like Ctrl-C)."""
def __init__(self, bus):
self.is_set = False
plugins.SimplePlugin.__init__(self, bus)
def start(self):
if self.is_set:
self.bus.log('Handler for console events already set.', level=40)
return
result = win32api.SetConsoleCtrlHandler(self.handle, 1)
if result == 0:
self.bus.log('Could not SetConsoleCtrlHandler (error %r)' %
win32api.GetLastError(), level=40)
else:
self.bus.log('Set handler for console events.', level=40)
self.is_set = True
def stop(self):
if not self.is_set:
self.bus.log('Handler for console events already off.', level=40)
return
try:
result = win32api.SetConsoleCtrlHandler(self.handle, 0)
except ValueError:
# "ValueError: The object has not been registered"
result = 1
if result == 0:
self.bus.log('Could not remove SetConsoleCtrlHandler (error %r)' %
win32api.GetLastError(), level=40)
else:
self.bus.log('Removed handler for console events.', level=40)
self.is_set = False
def handle(self, event):
"""Handle console control events (like Ctrl-C)."""
if event in (win32con.CTRL_C_EVENT, win32con.CTRL_LOGOFF_EVENT,
win32con.CTRL_BREAK_EVENT, win32con.CTRL_SHUTDOWN_EVENT,
win32con.CTRL_CLOSE_EVENT):
self.bus.log('Console event %s: shutting down bus' % event)
# Remove self immediately so repeated Ctrl-C doesn't re-call it.
try:
self.stop()
except ValueError:
pass
self.bus.exit()
# 'First to return True stops the calls'
return 1
return 0
class Win32Bus(wspbus.Bus):
"""A Web Site Process Bus implementation for Win32.
Instead of time.sleep, this bus blocks using native win32event objects.
"""
def __init__(self):
self.events = {}
wspbus.Bus.__init__(self)
def _get_state_event(self, state):
"""Return a win32event for the given state (creating it if needed)."""
try:
return self.events[state]
except KeyError:
event = win32event.CreateEvent(None, 0, 0,
u"WSPBus %s Event (pid=%r)" %
(state.name, os.getpid()))
self.events[state] = event
return event
def _get_state(self):
return self._state
def _set_state(self, value):
self._state = value
event = self._get_state_event(value)
win32event.PulseEvent(event)
state = property(_get_state, _set_state)
def wait(self, state, interval=0.1):
"""Wait for the given state(s), KeyboardInterrupt or SystemExit.
Since this class uses native win32event objects, the interval
argument is ignored.
"""
if isinstance(state, (tuple, list)):
# Don't wait for an event that beat us to the punch ;)
if self.state not in state:
events = tuple([self._get_state_event(s) for s in state])
win32event.WaitForMultipleObjects(events, 0, win32event.INFINITE)
else:
# Don't wait for an event that beat us to the punch ;)
if self.state != state:
event = self._get_state_event(state)
win32event.WaitForSingleObject(event, win32event.INFINITE)
class _ControlCodes(dict):
"""Control codes used to "signal" a service via ControlService.
User-defined control codes are in the range 128-255. We generally use
the standard Python value for the Linux signal and add 128. Example:
>>> signal.SIGUSR1
10
control_codes['graceful'] = 128 + 10
"""
def key_for(self, obj):
"""For the given value, return its corresponding key."""
for key, val in self.iteritems():
if val is obj:
return key
raise ValueError("The given object could not be found: %r" % obj)
control_codes = _ControlCodes({'graceful': 138})
def signal_child(service, command):
if command == 'stop':
win32serviceutil.StopService(service)
elif command == 'restart':
win32serviceutil.RestartService(service)
else:
win32serviceutil.ControlService(service, control_codes[command])
class PyWebService(win32serviceutil.ServiceFramework):
"""Python Web Service."""
_svc_name_ = "Python Web Service"
_svc_display_name_ = "Python Web Service"
_svc_deps_ = None # sequence of service names on which this depends
_exe_name_ = "pywebsvc"
_exe_args_ = None # Default to no arguments
# Only exists on Windows 2000 or later, ignored on windows NT
_svc_description_ = "Python Web Service"
def SvcDoRun(self):
from cherrypy import process
process.bus.start()
process.bus.block()
def SvcStop(self):
from cherrypy import process
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
process.bus.exit()
def SvcOther(self, control):
process.bus.publish(control_codes.key_for(control))
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(PyWebService)
| gpl-3.0 | 3,035,687,172,111,568,000 | 8,525,064,863,401,764,000 | 32.571429 | 81 | 0.586213 | false |
whitehorse-io/encarnia | pyenv/lib/python2.7/site-packages/twisted/spread/test/test_jelly.py | 13 | 19990 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{jelly} object serialization.
"""
from __future__ import absolute_import, division
import datetime
import decimal
from twisted.python.compat import unicode
from twisted.spread import jelly, pb
from twisted.trial import unittest
from twisted.test.proto_helpers import StringTransport
class TestNode(jelly.Jellyable, object):
"""
An object to test jellyfying of new style class instances.
"""
classAttr = 4
def __init__(self, parent=None):
if parent:
self.id = parent.id + 1
parent.children.append(self)
else:
self.id = 1
self.parent = parent
self.children = []
class A:
"""
Dummy class.
"""
def amethod(self):
"""
Method to be used in serialization tests.
"""
def afunc(self):
"""
A dummy function to test function serialization.
"""
class B:
"""
Dummy class.
"""
def bmethod(self):
"""
Method to be used in serialization tests.
"""
class C:
"""
Dummy class.
"""
def cmethod(self):
"""
Method to be used in serialization tests.
"""
class D(object):
"""
Dummy new-style class.
"""
class E(object):
"""
Dummy new-style class with slots.
"""
__slots__ = ("x", "y")
def __init__(self, x=None, y=None):
self.x = x
self.y = y
def __getstate__(self):
return {"x" : self.x, "y" : self.y}
def __setstate__(self, state):
self.x = state["x"]
self.y = state["y"]
class SimpleJellyTest:
def __init__(self, x, y):
self.x = x
self.y = y
def isTheSameAs(self, other):
return self.__dict__ == other.__dict__
class JellyTests(unittest.TestCase):
"""
Testcases for L{jelly} module serialization.
@cvar decimalData: serialized version of decimal data, to be used in tests.
@type decimalData: L{list}
"""
decimalData = [b'list', [b'decimal', 995, -2], [b'decimal', 0, 0],
[b'decimal', 123456, 0], [b'decimal', -78901, -3]]
def _testSecurity(self, inputList, atom):
"""
Helper test method to test security options for a type.
@param inputList: a sample input for the type.
@type inputList: L{list}
@param atom: atom identifier for the type.
@type atom: L{str}
"""
c = jelly.jelly(inputList)
taster = jelly.SecurityOptions()
taster.allowBasicTypes()
# By default, it should succeed
jelly.unjelly(c, taster)
taster.allowedTypes.pop(atom)
# But it should raise an exception when disallowed
self.assertRaises(jelly.InsecureJelly, jelly.unjelly, c, taster)
def test_methodsNotSelfIdentity(self):
"""
If a class change after an instance has been created, L{jelly.unjelly}
shoud raise a C{TypeError} when trying to unjelly the instance.
"""
a = A()
b = B()
c = C()
a.bmethod = c.cmethod
b.a = a
savecmethod = C.cmethod
del C.cmethod
try:
self.assertRaises(TypeError, jelly.unjelly, jelly.jelly(b))
finally:
C.cmethod = savecmethod
def test_newStyle(self):
"""
Test that a new style class can be jellied and unjellied with its
objects and attribute values preserved.
"""
n = D()
n.x = 1
n2 = D()
n.n2 = n2
n.n3 = n2
c = jelly.jelly(n)
m = jelly.unjelly(c)
self.assertIsInstance(m, D)
self.assertIs(m.n2, m.n3)
self.assertEqual(m.x, 1)
def test_newStyleWithSlots(self):
"""
A class defined with I{slots} can be jellied and unjellied with the
values for its attributes preserved.
"""
n = E()
n.x = 1
c = jelly.jelly(n)
m = jelly.unjelly(c)
self.assertIsInstance(m, E)
self.assertEqual(n.x, 1)
def test_typeOldStyle(self):
"""
Test that an old style class type can be jellied and unjellied
to the original type.
"""
t = [C]
r = jelly.unjelly(jelly.jelly(t))
self.assertEqual(t, r)
def test_typeNewStyle(self):
"""
Test that a new style class type can be jellied and unjellied
to the original type.
"""
t = [D]
r = jelly.unjelly(jelly.jelly(t))
self.assertEqual(t, r)
def test_typeBuiltin(self):
"""
Test that a builtin type can be jellied and unjellied to the original
type.
"""
t = [str]
r = jelly.unjelly(jelly.jelly(t))
self.assertEqual(t, r)
def test_dateTime(self):
"""
Jellying L{datetime.timedelta} instances and then unjellying the result
should produce objects which represent the values of the original
inputs.
"""
dtn = datetime.datetime.now()
dtd = datetime.datetime.now() - dtn
inputList = [dtn, dtd]
c = jelly.jelly(inputList)
output = jelly.unjelly(c)
self.assertEqual(inputList, output)
self.assertIsNot(inputList, output)
def test_decimal(self):
"""
Jellying L{decimal.Decimal} instances and then unjellying the result
should produce objects which represent the values of the original
inputs.
"""
inputList = [decimal.Decimal('9.95'),
decimal.Decimal(0),
decimal.Decimal(123456),
decimal.Decimal('-78.901')]
c = jelly.jelly(inputList)
output = jelly.unjelly(c)
self.assertEqual(inputList, output)
self.assertIsNot(inputList, output)
def test_decimalUnjelly(self):
"""
Unjellying the s-expressions produced by jelly for L{decimal.Decimal}
instances should result in L{decimal.Decimal} instances with the values
represented by the s-expressions.
This test also verifies that L{decimalData} contains valid jellied
data. This is important since L{test_decimalMissing} re-uses
L{decimalData} and is expected to be unable to produce
L{decimal.Decimal} instances even though the s-expression correctly
represents a list of them.
"""
expected = [decimal.Decimal('9.95'),
decimal.Decimal(0),
decimal.Decimal(123456),
decimal.Decimal('-78.901')]
output = jelly.unjelly(self.decimalData)
self.assertEqual(output, expected)
def test_decimalSecurity(self):
"""
By default, C{decimal} objects should be allowed by
L{jelly.SecurityOptions}. If not allowed, L{jelly.unjelly} should raise
L{jelly.InsecureJelly} when trying to unjelly it.
"""
inputList = [decimal.Decimal('9.95')]
self._testSecurity(inputList, b"decimal")
def test_set(self):
"""
Jellying C{set} instances and then unjellying the result
should produce objects which represent the values of the original
inputs.
"""
inputList = [set([1, 2, 3])]
output = jelly.unjelly(jelly.jelly(inputList))
self.assertEqual(inputList, output)
self.assertIsNot(inputList, output)
def test_frozenset(self):
"""
Jellying L{frozenset} instances and then unjellying the result
should produce objects which represent the values of the original
inputs.
"""
inputList = [frozenset([1, 2, 3])]
output = jelly.unjelly(jelly.jelly(inputList))
self.assertEqual(inputList, output)
self.assertIsNot(inputList, output)
def test_setSecurity(self):
"""
By default, C{set} objects should be allowed by
L{jelly.SecurityOptions}. If not allowed, L{jelly.unjelly} should raise
L{jelly.InsecureJelly} when trying to unjelly it.
"""
inputList = [set([1, 2, 3])]
self._testSecurity(inputList, b"set")
def test_frozensetSecurity(self):
"""
By default, L{frozenset} objects should be allowed by
L{jelly.SecurityOptions}. If not allowed, L{jelly.unjelly} should raise
L{jelly.InsecureJelly} when trying to unjelly it.
"""
inputList = [frozenset([1, 2, 3])]
self._testSecurity(inputList, b"frozenset")
def test_oldSets(self):
"""
Test jellying C{sets.Set}: it should serialize to the same thing as
C{set} jelly, and be unjellied as C{set} if available.
"""
inputList = [jelly._sets.Set([1, 2, 3])]
inputJelly = jelly.jelly(inputList)
self.assertEqual(inputJelly, jelly.jelly([set([1, 2, 3])]))
output = jelly.unjelly(inputJelly)
# Even if the class is different, it should coerce to the same list
self.assertEqual(list(inputList[0]), list(output[0]))
if set is jelly._sets.Set:
self.assertIsInstance(output[0], jelly._sets.Set)
else:
self.assertIsInstance(output[0], set)
if not jelly._sets:
test_oldSets.skip = "sets.Set is gone in Python 3 and higher"
def test_oldImmutableSets(self):
"""
Test jellying C{sets.ImmutableSet}: it should serialize to the same
thing as L{frozenset} jelly, and be unjellied as L{frozenset} if
available.
"""
inputList = [jelly._sets.ImmutableSet([1, 2, 3])]
inputJelly = jelly.jelly(inputList)
self.assertEqual(inputJelly, jelly.jelly([frozenset([1, 2, 3])]))
output = jelly.unjelly(inputJelly)
# Even if the class is different, it should coerce to the same list
self.assertEqual(list(inputList[0]), list(output[0]))
if frozenset is jelly._sets.ImmutableSet:
self.assertIsInstance(output[0], jelly._sets.ImmutableSet)
else:
self.assertIsInstance(output[0], frozenset)
if not jelly._sets:
test_oldImmutableSets.skip = (
"sets.ImmutableSets is gone in Python 3 and higher")
def test_simple(self):
"""
Simplest test case.
"""
self.assertTrue(SimpleJellyTest('a', 'b').isTheSameAs(
SimpleJellyTest('a', 'b')))
a = SimpleJellyTest(1, 2)
cereal = jelly.jelly(a)
b = jelly.unjelly(cereal)
self.assertTrue(a.isTheSameAs(b))
def test_identity(self):
"""
Test to make sure that objects retain identity properly.
"""
x = []
y = (x)
x.append(y)
x.append(y)
self.assertIs(x[0], x[1])
self.assertIs(x[0][0], x)
s = jelly.jelly(x)
z = jelly.unjelly(s)
self.assertIs(z[0], z[1])
self.assertIs(z[0][0], z)
def test_unicode(self):
x = unicode('blah')
y = jelly.unjelly(jelly.jelly(x))
self.assertEqual(x, y)
self.assertEqual(type(x), type(y))
def test_stressReferences(self):
reref = []
toplevelTuple = ({'list': reref}, reref)
reref.append(toplevelTuple)
s = jelly.jelly(toplevelTuple)
z = jelly.unjelly(s)
self.assertIs(z[0]['list'], z[1])
self.assertIs(z[0]['list'][0], z)
def test_moreReferences(self):
a = []
t = (a,)
a.append((t,))
s = jelly.jelly(t)
z = jelly.unjelly(s)
self.assertIs(z[0][0][0], z)
def test_typeSecurity(self):
"""
Test for type-level security of serialization.
"""
taster = jelly.SecurityOptions()
dct = jelly.jelly({})
self.assertRaises(jelly.InsecureJelly, jelly.unjelly, dct, taster)
def test_newStyleClasses(self):
uj = jelly.unjelly(D)
self.assertIs(D, uj)
def test_lotsaTypes(self):
"""
Test for all types currently supported in jelly
"""
a = A()
jelly.unjelly(jelly.jelly(a))
jelly.unjelly(jelly.jelly(a.amethod))
items = [afunc, [1, 2, 3], not bool(1), bool(1), 'test', 20.3,
(1, 2, 3), None, A, unittest, {'a': 1}, A.amethod]
for i in items:
self.assertEqual(i, jelly.unjelly(jelly.jelly(i)))
def test_setState(self):
global TupleState
class TupleState:
def __init__(self, other):
self.other = other
def __getstate__(self):
return (self.other,)
def __setstate__(self, state):
self.other = state[0]
def __hash__(self):
return hash(self.other)
a = A()
t1 = TupleState(a)
t2 = TupleState(a)
t3 = TupleState((t1, t2))
d = {t1: t1, t2: t2, t3: t3, "t3": t3}
t3prime = jelly.unjelly(jelly.jelly(d))["t3"]
self.assertIs(t3prime.other[0].other, t3prime.other[1].other)
def test_classSecurity(self):
"""
Test for class-level security of serialization.
"""
taster = jelly.SecurityOptions()
taster.allowInstancesOf(A, B)
a = A()
b = B()
c = C()
# add a little complexity to the data
a.b = b
a.c = c
# and a backreference
a.x = b
b.c = c
# first, a friendly insecure serialization
friendly = jelly.jelly(a, taster)
x = jelly.unjelly(friendly, taster)
self.assertIsInstance(x.c, jelly.Unpersistable)
# now, a malicious one
mean = jelly.jelly(a)
self.assertRaises(jelly.InsecureJelly, jelly.unjelly, mean, taster)
self.assertIs(x.x, x.b, "Identity mismatch")
# test class serialization
friendly = jelly.jelly(A, taster)
x = jelly.unjelly(friendly, taster)
self.assertIs(x, A, "A came back: %s" % x)
def test_unjellyable(self):
"""
Test that if Unjellyable is used to deserialize a jellied object,
state comes out right.
"""
class JellyableTestClass(jelly.Jellyable):
pass
jelly.setUnjellyableForClass(JellyableTestClass, jelly.Unjellyable)
input = JellyableTestClass()
input.attribute = 'value'
output = jelly.unjelly(jelly.jelly(input))
self.assertEqual(output.attribute, 'value')
self.assertIsInstance(output, jelly.Unjellyable)
def test_persistentStorage(self):
perst = [{}, 1]
def persistentStore(obj, jel, perst = perst):
perst[1] = perst[1] + 1
perst[0][perst[1]] = obj
return str(perst[1])
def persistentLoad(pidstr, unj, perst = perst):
pid = int(pidstr)
return perst[0][pid]
a = SimpleJellyTest(1, 2)
b = SimpleJellyTest(3, 4)
c = SimpleJellyTest(5, 6)
a.b = b
a.c = c
c.b = b
jel = jelly.jelly(a, persistentStore = persistentStore)
x = jelly.unjelly(jel, persistentLoad = persistentLoad)
self.assertIs(x.b, x.c.b)
self.assertTrue(perst[0], "persistentStore was not called.")
self.assertIs(x.b, a.b, "Persistent storage identity failure.")
def test_newStyleClassesAttributes(self):
n = TestNode()
n1 = TestNode(n)
TestNode(n1)
TestNode(n)
# Jelly it
jel = jelly.jelly(n)
m = jelly.unjelly(jel)
# Check that it has been restored ok
self._check_newstyle(n, m)
def _check_newstyle(self, a, b):
self.assertEqual(a.id, b.id)
self.assertEqual(a.classAttr, 4)
self.assertEqual(b.classAttr, 4)
self.assertEqual(len(a.children), len(b.children))
for x, y in zip(a.children, b.children):
self._check_newstyle(x, y)
def test_referenceable(self):
"""
A L{pb.Referenceable} instance jellies to a structure which unjellies to
a L{pb.RemoteReference}. The C{RemoteReference} has a I{luid} that
matches up with the local object key in the L{pb.Broker} which sent the
L{Referenceable}.
"""
ref = pb.Referenceable()
jellyBroker = pb.Broker()
jellyBroker.makeConnection(StringTransport())
j = jelly.jelly(ref, invoker=jellyBroker)
unjellyBroker = pb.Broker()
unjellyBroker.makeConnection(StringTransport())
uj = jelly.unjelly(j, invoker=unjellyBroker)
self.assertIn(uj.luid, jellyBroker.localObjects)
class JellyDeprecationTests(unittest.TestCase):
"""
Tests for deprecated Jelly things
"""
def test_deprecatedInstanceAtom(self):
"""
L{jelly.instance_atom} is deprecated since 15.0.0.
"""
jelly.instance_atom
warnings = self.flushWarnings([self.test_deprecatedInstanceAtom])
self.assertEqual(len(warnings), 1)
self.assertEqual(
warnings[0]['message'],
'twisted.spread.jelly.instance_atom was deprecated in Twisted '
'15.0.0: instance_atom is unused within Twisted.')
self.assertEqual(
warnings[0]['category'],
DeprecationWarning)
def test_deprecatedUnjellyingInstanceAtom(self):
"""
Unjellying the instance atom is deprecated with 15.0.0.
"""
jelly.unjelly(
["instance",
["class", "twisted.spread.test.test_jelly.A"],
["dictionary"]])
warnings = self.flushWarnings()
self.assertEqual(len(warnings), 1)
self.assertEqual(
warnings[0]['message'],
"Unjelly support for the instance atom is deprecated since "
"Twisted 15.0.0. Upgrade peer for modern instance support.")
self.assertEqual(
warnings[0]['category'],
DeprecationWarning)
class ClassA(pb.Copyable, pb.RemoteCopy):
def __init__(self):
self.ref = ClassB(self)
class ClassB(pb.Copyable, pb.RemoteCopy):
def __init__(self, ref):
self.ref = ref
class CircularReferenceTests(unittest.TestCase):
"""
Tests for circular references handling in the jelly/unjelly process.
"""
def test_simpleCircle(self):
jelly.setUnjellyableForClass(ClassA, ClassA)
jelly.setUnjellyableForClass(ClassB, ClassB)
a = jelly.unjelly(jelly.jelly(ClassA()))
self.assertIs(a.ref.ref, a,
"Identity not preserved in circular reference")
def test_circleWithInvoker(self):
class DummyInvokerClass:
pass
dummyInvoker = DummyInvokerClass()
dummyInvoker.serializingPerspective = None
a0 = ClassA()
jelly.setUnjellyableForClass(ClassA, ClassA)
jelly.setUnjellyableForClass(ClassB, ClassB)
j = jelly.jelly(a0, invoker=dummyInvoker)
a1 = jelly.unjelly(j)
self.failUnlessIdentical(a1.ref.ref, a1,
"Identity not preserved in circular reference")
def test_set(self):
"""
Check that a C{set} can contain a circular reference and be serialized
and unserialized without losing the reference.
"""
s = set()
a = SimpleJellyTest(s, None)
s.add(a)
res = jelly.unjelly(jelly.jelly(a))
self.assertIsInstance(res.x, set)
self.assertEqual(list(res.x), [res])
def test_frozenset(self):
"""
Check that a L{frozenset} can contain a circular reference and be
serialized and unserialized without losing the reference.
"""
a = SimpleJellyTest(None, None)
s = frozenset([a])
a.x = s
res = jelly.unjelly(jelly.jelly(a))
self.assertIsInstance(res.x, frozenset)
self.assertEqual(list(res.x), [res])
| mit | 5,678,625,004,187,131,000 | -4,037,111,565,378,062,000 | 27.971014 | 80 | 0.578689 | false |
Kesmon/VolumCalc | calc.py | 1 | 2474 | # Volum kalkulator
# made by Kristian Sundal/(Kesmon/)
# Release 1.0
pi=3.14159265359
start = '\n\nVelg mellom\n"kjegle","pyramide","tpyramide","sylinder","kule","prisme" eller "tprisme"\n"avslutt" for å avslutte'
trekant = 'Vil du ha trekant versjonen? "ja/nei"'
fase=''
print(start)
fase = input('> ')
while fase != 'avslutt':
if fase=='sylinder':
print('\nLengden på radiusen')
rad=int(input('> '))
print('Høyden på sylinderen')
hoyde=int(input('> '))
volum = pi*rad*rad*hoyde
print('\n',volum)
print(start)
fase=input('> ')
elif fase=='kule':
print('\nLengden på radiusen')
rad= int(input('> '))
volum=4*pi*rad*rad*rad/3
print('\n',volum)
print(start)
fase=input('> ')
elif fase=='prisme':
print('\nLengden')
lengden=int(input('> '))
print('Høyden')
hoyde=int(input('> '))
print('Bredden')
bredde=int(input('> '))
volum=lengden*hoyde*bredde
print('\n',volum)
print(start)
fase=input('> ')
elif fase=='tprisme':
print('\nGrunnlinje')
glinja=int(input('> '))
print('Høyden')
hoyden=int(input('> '))
print('Dybden')
dybde=int(input('> '))
volum=glinja*hoyden/2*dybde
print('\n',volum)
print(start)
fase=input('> ')
elif fase=='kjegle' :
print('\nRadius')
radius = int(input('> '))
print('Høyde')
hoyde=int(input('> '))
volum=pi*radius*radius*hoyde/3
print('\n', volum)
print(start)
fase=input('> ')
elif fase=='tpyramide':
print('\nGrunnlinje')
glinja=int(input('> '))
print('Høyden')
hoyden=int(input('> '))
print('Dybden')
dybde=int(input('> '))
volum=glinja*hoyden/2*dybde/3
print('\n',volum)
print(start)
fase=input('> ')
elif fase=='pyramide':
print('\nLengden')
lengden=int(input('> '))
print('Bredden')
bredde=int(input('> '))
print('Høyden')
hoyde=int(input('> '))
volum=lengden*bredde*hoyde/3
print('\n',volum)
print(start)
fase=input('> ')
else:
print('\n\n\nVelg noen av alternativene')
print(start)
fase=input('> ')
| unlicense | -911,282,691,503,711,000 | 3,652,645,176,717,392,400 | 18.100775 | 127 | 0.493912 | false |
chirilo/mozillians | vendor-local/lib/python/djcelery/tests/test_loaders.py | 11 | 1436 | from __future__ import absolute_import
from celery import loaders
from djcelery import loaders as djloaders
from djcelery.tests.utils import unittest
class TestDjangoLoader(unittest.TestCase):
def setUp(self):
self.loader = djloaders.DjangoLoader()
def test_get_loader_cls(self):
self.assertEqual(loaders.get_loader_cls("django"),
self.loader.__class__)
# Execute cached branch.
self.assertEqual(loaders.get_loader_cls("django"),
self.loader.__class__)
def test_on_worker_init(self):
from django.conf import settings
old_imports = getattr(settings, "CELERY_IMPORTS", None)
settings.CELERY_IMPORTS = ("xxx.does.not.exist", )
try:
self.assertRaises(ImportError, self.loader.import_default_modules)
finally:
settings.CELERY_IMPORTS = old_imports
def test_race_protection(self):
djloaders._RACE_PROTECTION = True
try:
self.assertFalse(self.loader.on_worker_init())
finally:
djloaders._RACE_PROTECTION = False
def test_find_related_module_no_path(self):
self.assertFalse(djloaders.find_related_module("sys", "tasks"))
def test_find_related_module_no_related(self):
self.assertFalse(djloaders.find_related_module("someapp",
"frobulators"))
| bsd-3-clause | -5,121,403,605,137,187,000 | 71,840,518,282,395,770 | 32.395349 | 78 | 0.619777 | false |
jezdez/kuma | vendor/packages/jsonpickle/handlers.py | 22 | 8846 | # -*- coding: utf-8 -*-
"""
Custom handlers may be created to handle other objects. Each custom handler
must derive from :class:`jsonpickle.handlers.BaseHandler` and
implement ``flatten`` and ``restore``.
A handler can be bound to other types by calling :func:`jsonpickle.handlers.register`.
:class:`jsonpickle.customhandlers.SimpleReduceHandler` is suitable for handling
objects that implement the reduce protocol::
from jsonpickle import handlers
class MyCustomObject(handlers.BaseHandler):
...
def __reduce__(self):
return MyCustomObject, self._get_args()
handlers.register(MyCustomObject, handlers.SimpleReduceHandler)
"""
import collections
import copy
import datetime
import decimal
import re
import sys
import time
from jsonpickle import util
from jsonpickle.compat import unicode
from jsonpickle.compat import queue
class Registry(object):
def __init__(self):
self._handlers = {}
self._base_handlers = {}
def get(self, cls_or_name, default=None):
"""
:param cls_or_name: the type or its fully qualified name
:param default: default value, if a matching handler is not found
Looks up a handler by type reference or its fully qualified name. If a direct match
is not found, the search is performed over all handlers registered with base=True.
"""
handler = self._handlers.get(cls_or_name)
if handler is None and util.is_type(cls_or_name): # attempt to find a base class
for cls, base_handler in self._base_handlers.items():
if issubclass(cls_or_name, cls):
return base_handler
return default if handler is None else handler
def register(self, cls, handler=None, base=False):
"""Register the a custom handler for a class
:param cls: The custom object class to handle
:param handler: The custom handler class (if None, a decorator wrapper is returned)
:param base: Indicates whether the handler should be registered for all subclasses
This function can be also used as a decorator by omitting the `handler` argument:
@jsonpickle.handlers.register(Foo, base=True)
class FooHandler(jsonpickle.handlers.BaseHandler):
pass
"""
if handler is None:
def _register(handler_cls):
self.register(cls, handler=handler_cls, base=base)
return handler_cls
return _register
if not util.is_type(cls):
raise TypeError('{0!r} is not a class/type'.format(cls))
# store both the name and the actual type for the ugly cases like
# _sre.SRE_Pattern that cannot be loaded back directly
self._handlers[util.importable_name(cls)] = self._handlers[cls] = handler
if base:
# only store the actual type for subclass checking
self._base_handlers[cls] = handler
def unregister(self, cls):
self._handlers.pop(cls, None)
self._handlers.pop(util.importable_name(cls), None)
self._base_handlers.pop(cls, None)
registry = Registry()
register = registry.register
unregister = registry.unregister
get = registry.get
class BaseHandler(object):
def __init__(self, context):
"""
Initialize a new handler to handle a registered type.
:Parameters:
- `context`: reference to pickler/unpickler
"""
self.context = context
def flatten(self, obj, data):
"""
Flatten `obj` into a json-friendly form and write result to `data`.
:param object obj: The object to be serialized.
:param dict data: A partially filled dictionary which will contain the
json-friendly representation of `obj` once this method has
finished.
"""
raise NotImplementedError('You must implement flatten() in %s' %
self.__class__)
def restore(self, obj):
"""
Restore an object of the registered type from the json-friendly
representation `obj` and return it.
"""
raise NotImplementedError('You must implement restore() in %s' %
self.__class__)
@classmethod
def handles(self, cls):
"""
Register this handler for the given class. Suitable as a decorator,
e.g.::
@SimpleReduceHandler.handles
class MyCustomClass:
def __reduce__(self):
...
"""
registry.register(cls, self)
return cls
class DatetimeHandler(BaseHandler):
"""Custom handler for datetime objects
Datetime objects use __reduce__, and they generate binary strings encoding
the payload. This handler encodes that payload to reconstruct the
object.
"""
def flatten(self, obj, data):
pickler = self.context
if not pickler.unpicklable:
return unicode(obj)
cls, args = obj.__reduce__()
flatten = pickler.flatten
payload = util.b64encode(args[0])
args = [payload] + [flatten(i, reset=False) for i in args[1:]]
data['__reduce__'] = (flatten(cls, reset=False), args)
return data
def restore(self, data):
cls, args = data['__reduce__']
unpickler = self.context
restore = unpickler.restore
cls = restore(cls, reset=False)
value = util.b64decode(args[0])
params = (value,) + tuple([restore(i, reset=False) for i in args[1:]])
return cls.__new__(cls, *params)
DatetimeHandler.handles(datetime.datetime)
DatetimeHandler.handles(datetime.date)
DatetimeHandler.handles(datetime.time)
class RegexHandler(BaseHandler):
"""Flatten _sre.SRE_Pattern (compiled regex) objects"""
def flatten(self, obj, data):
data['pattern'] = obj.pattern
return data
def restore(self, data):
return re.compile(data['pattern'])
RegexHandler.handles(type(re.compile('')))
class SimpleReduceHandler(BaseHandler):
"""Follow the __reduce__ protocol to pickle an object.
As long as the factory and its arguments are pickleable, this should
pickle any object that implements the reduce protocol.
"""
def flatten(self, obj, data):
flatten = self.context.flatten
data['__reduce__'] = [flatten(i, reset=False) for i in obj.__reduce__()]
return data
def restore(self, data):
restore = self.context.restore
factory, args = [restore(i, reset=False) for i in data['__reduce__']]
return factory(*args)
class OrderedDictReduceHandler(SimpleReduceHandler):
"""Serialize OrderedDict on Python 3.4+
Python 3.4+ returns multiple entries in an OrderedDict's
reduced form. Previous versions return a two-item tuple.
OrderedDictReduceHandler makes the formats compatible.
"""
def flatten(self, obj, data):
# __reduce__() on older pythons returned a list of
# [key, value] list pairs inside a tuple.
# Recreate that structure so that the file format
# is consistent between python versions.
flatten = self.context.flatten
reduced = obj.__reduce__()
factory = flatten(reduced[0], reset=False)
pairs = [list(x) for x in reduced[-1]]
args = flatten((pairs,), reset=False)
data['__reduce__'] = [factory, args]
return data
SimpleReduceHandler.handles(time.struct_time)
SimpleReduceHandler.handles(datetime.timedelta)
SimpleReduceHandler.handles(collections.deque)
if sys.version_info >= (2, 7):
SimpleReduceHandler.handles(collections.Counter)
if sys.version_info >= (3, 4):
OrderedDictReduceHandler.handles(collections.OrderedDict)
else:
SimpleReduceHandler.handles(collections.OrderedDict)
if sys.version_info >= (3, 0):
SimpleReduceHandler.handles(decimal.Decimal)
try:
import posix
SimpleReduceHandler.handles(posix.stat_result)
except ImportError:
pass
class QueueHandler(BaseHandler):
"""Opaquely serializes Queue objects
Queues contains mutex and condition variables which cannot be serialized.
Construct a new Queue instance when restoring.
"""
def flatten(self, obj, data):
return data
def restore(self, data):
return queue.Queue()
QueueHandler.handles(queue.Queue)
class CloneFactory(object):
"""Serialization proxy for collections.defaultdict's default_factory"""
def __init__(self, exemplar):
self.exemplar = exemplar
def __call__(self, clone=copy.copy):
"""Create new instances by making copies of the provided exemplar"""
return clone(self.exemplar)
def __repr__(self):
return ('<CloneFactory object at 0x%x (%s)>' % (id(self), self.exemplar))
| mpl-2.0 | -6,509,865,776,244,615,000 | -3,923,813,463,941,829,600 | 30.592857 | 91 | 0.644924 | false |
AGG2017/diaphora | pygments/lexers/_postgres_builtins.py | 48 | 11210 | # -*- coding: utf-8 -*-
"""
pygments.lexers._postgres_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Self-updating data files for PostgreSQL lexer.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Autogenerated: please edit them if you like wasting your time.
KEYWORDS = (
'ABORT',
'ABSOLUTE',
'ACCESS',
'ACTION',
'ADD',
'ADMIN',
'AFTER',
'AGGREGATE',
'ALL',
'ALSO',
'ALTER',
'ALWAYS',
'ANALYSE',
'ANALYZE',
'AND',
'ANY',
'ARRAY',
'AS',
'ASC',
'ASSERTION',
'ASSIGNMENT',
'ASYMMETRIC',
'AT',
'ATTRIBUTE',
'AUTHORIZATION',
'BACKWARD',
'BEFORE',
'BEGIN',
'BETWEEN',
'BIGINT',
'BINARY',
'BIT',
'BOOLEAN',
'BOTH',
'BY',
'CACHE',
'CALLED',
'CASCADE',
'CASCADED',
'CASE',
'CAST',
'CATALOG',
'CHAIN',
'CHAR',
'CHARACTER',
'CHARACTERISTICS',
'CHECK',
'CHECKPOINT',
'CLASS',
'CLOSE',
'CLUSTER',
'COALESCE',
'COLLATE',
'COLLATION',
'COLUMN',
'COMMENT',
'COMMENTS',
'COMMIT',
'COMMITTED',
'CONCURRENTLY',
'CONFIGURATION',
'CONNECTION',
'CONSTRAINT',
'CONSTRAINTS',
'CONTENT',
'CONTINUE',
'CONVERSION',
'COPY',
'COST',
'CREATE',
'CROSS',
'CSV',
'CURRENT',
'CURRENT_CATALOG',
'CURRENT_DATE',
'CURRENT_ROLE',
'CURRENT_SCHEMA',
'CURRENT_TIME',
'CURRENT_TIMESTAMP',
'CURRENT_USER',
'CURSOR',
'CYCLE',
'DATA',
'DATABASE',
'DAY',
'DEALLOCATE',
'DEC',
'DECIMAL',
'DECLARE',
'DEFAULT',
'DEFAULTS',
'DEFERRABLE',
'DEFERRED',
'DEFINER',
'DELETE',
'DELIMITER',
'DELIMITERS',
'DESC',
'DICTIONARY',
'DISABLE',
'DISCARD',
'DISTINCT',
'DO',
'DOCUMENT',
'DOMAIN',
'DOUBLE',
'DROP',
'EACH',
'ELSE',
'ENABLE',
'ENCODING',
'ENCRYPTED',
'END',
'ENUM',
'ESCAPE',
'EVENT',
'EXCEPT',
'EXCLUDE',
'EXCLUDING',
'EXCLUSIVE',
'EXECUTE',
'EXISTS',
'EXPLAIN',
'EXTENSION',
'EXTERNAL',
'EXTRACT',
'FALSE',
'FAMILY',
'FETCH',
'FILTER',
'FIRST',
'FLOAT',
'FOLLOWING',
'FOR',
'FORCE',
'FOREIGN',
'FORWARD',
'FREEZE',
'FROM',
'FULL',
'FUNCTION',
'FUNCTIONS',
'GLOBAL',
'GRANT',
'GRANTED',
'GREATEST',
'GROUP',
'HANDLER',
'HAVING',
'HEADER',
'HOLD',
'HOUR',
'IDENTITY',
'IF',
'ILIKE',
'IMMEDIATE',
'IMMUTABLE',
'IMPLICIT',
'IN',
'INCLUDING',
'INCREMENT',
'INDEX',
'INDEXES',
'INHERIT',
'INHERITS',
'INITIALLY',
'INLINE',
'INNER',
'INOUT',
'INPUT',
'INSENSITIVE',
'INSERT',
'INSTEAD',
'INT',
'INTEGER',
'INTERSECT',
'INTERVAL',
'INTO',
'INVOKER',
'IS',
'ISNULL',
'ISOLATION',
'JOIN',
'KEY',
'LABEL',
'LANGUAGE',
'LARGE',
'LAST',
'LATERAL',
'LC_COLLATE',
'LC_CTYPE',
'LEADING',
'LEAKPROOF',
'LEAST',
'LEFT',
'LEVEL',
'LIKE',
'LIMIT',
'LISTEN',
'LOAD',
'LOCAL',
'LOCALTIME',
'LOCALTIMESTAMP',
'LOCATION',
'LOCK',
'MAPPING',
'MATCH',
'MATERIALIZED',
'MAXVALUE',
'MINUTE',
'MINVALUE',
'MODE',
'MONTH',
'MOVE',
'NAME',
'NAMES',
'NATIONAL',
'NATURAL',
'NCHAR',
'NEXT',
'NO',
'NONE',
'NOT',
'NOTHING',
'NOTIFY',
'NOTNULL',
'NOWAIT',
'NULL',
'NULLIF',
'NULLS',
'NUMERIC',
'OBJECT',
'OF',
'OFF',
'OFFSET',
'OIDS',
'ON',
'ONLY',
'OPERATOR',
'OPTION',
'OPTIONS',
'OR',
'ORDER',
'ORDINALITY',
'OUT',
'OUTER',
'OVER',
'OVERLAPS',
'OVERLAY',
'OWNED',
'OWNER',
'PARSER',
'PARTIAL',
'PARTITION',
'PASSING',
'PASSWORD',
'PLACING',
'PLANS',
'POLICY',
'POSITION',
'PRECEDING',
'PRECISION',
'PREPARE',
'PREPARED',
'PRESERVE',
'PRIMARY',
'PRIOR',
'PRIVILEGES',
'PROCEDURAL',
'PROCEDURE',
'PROGRAM',
'QUOTE',
'RANGE',
'READ',
'REAL',
'REASSIGN',
'RECHECK',
'RECURSIVE',
'REF',
'REFERENCES',
'REFRESH',
'REINDEX',
'RELATIVE',
'RELEASE',
'RENAME',
'REPEATABLE',
'REPLACE',
'REPLICA',
'RESET',
'RESTART',
'RESTRICT',
'RETURNING',
'RETURNS',
'REVOKE',
'RIGHT',
'ROLE',
'ROLLBACK',
'ROW',
'ROWS',
'RULE',
'SAVEPOINT',
'SCHEMA',
'SCROLL',
'SEARCH',
'SECOND',
'SECURITY',
'SELECT',
'SEQUENCE',
'SEQUENCES',
'SERIALIZABLE',
'SERVER',
'SESSION',
'SESSION_USER',
'SET',
'SETOF',
'SHARE',
'SHOW',
'SIMILAR',
'SIMPLE',
'SMALLINT',
'SNAPSHOT',
'SOME',
'STABLE',
'STANDALONE',
'START',
'STATEMENT',
'STATISTICS',
'STDIN',
'STDOUT',
'STORAGE',
'STRICT',
'STRIP',
'SUBSTRING',
'SYMMETRIC',
'SYSID',
'SYSTEM',
'TABLE',
'TABLES',
'TABLESPACE',
'TEMP',
'TEMPLATE',
'TEMPORARY',
'TEXT',
'THEN',
'TIME',
'TIMESTAMP',
'TO',
'TRAILING',
'TRANSACTION',
'TREAT',
'TRIGGER',
'TRIM',
'TRUE',
'TRUNCATE',
'TRUSTED',
'TYPE',
'TYPES',
'UNBOUNDED',
'UNCOMMITTED',
'UNENCRYPTED',
'UNION',
'UNIQUE',
'UNKNOWN',
'UNLISTEN',
'UNLOGGED',
'UNTIL',
'UPDATE',
'USER',
'USING',
'VACUUM',
'VALID',
'VALIDATE',
'VALIDATOR',
'VALUE',
'VALUES',
'VARCHAR',
'VARIADIC',
'VARYING',
'VERBOSE',
'VERSION',
'VIEW',
'VIEWS',
'VOLATILE',
'WHEN',
'WHERE',
'WHITESPACE',
'WINDOW',
'WITH',
'WITHIN',
'WITHOUT',
'WORK',
'WRAPPER',
'WRITE',
'XML',
'XMLATTRIBUTES',
'XMLCONCAT',
'XMLELEMENT',
'XMLEXISTS',
'XMLFOREST',
'XMLPARSE',
'XMLPI',
'XMLROOT',
'XMLSERIALIZE',
'YEAR',
'YES',
'ZONE',
)
DATATYPES = (
'bigint',
'bigserial',
'bit',
'bit varying',
'bool',
'boolean',
'box',
'bytea',
'char',
'character',
'character varying',
'cidr',
'circle',
'date',
'decimal',
'double precision',
'float4',
'float8',
'inet',
'int',
'int2',
'int4',
'int8',
'integer',
'interval',
'json',
'jsonb',
'line',
'lseg',
'macaddr',
'money',
'numeric',
'path',
'pg_lsn',
'point',
'polygon',
'real',
'serial',
'serial2',
'serial4',
'serial8',
'smallint',
'smallserial',
'text',
'time',
'timestamp',
'timestamptz',
'timetz',
'tsquery',
'tsvector',
'txid_snapshot',
'uuid',
'varbit',
'varchar',
'with time zone',
'without time zone',
'xml',
)
PSEUDO_TYPES = (
'any',
'anyelement',
'anyarray',
'anynonarray',
'anyenum',
'anyrange',
'cstring',
'internal',
'language_handler',
'fdw_handler',
'record',
'trigger',
'void',
'opaque',
)
# Remove 'trigger' from types
PSEUDO_TYPES = tuple(sorted(set(PSEUDO_TYPES) - set(map(str.lower, KEYWORDS))))
PLPGSQL_KEYWORDS = (
'ALIAS', 'CONSTANT', 'DIAGNOSTICS', 'ELSIF', 'EXCEPTION', 'EXIT',
'FOREACH', 'GET', 'LOOP', 'NOTICE', 'OPEN', 'PERFORM', 'QUERY', 'RAISE',
'RETURN', 'REVERSE', 'SQLSTATE', 'WHILE',
)
if __name__ == '__main__': # pragma: no cover
import re
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
from pygments.util import format_lines
# One man's constant is another man's variable.
SOURCE_URL = 'https://github.com/postgres/postgres/raw/master'
KEYWORDS_URL = SOURCE_URL + '/doc/src/sgml/keywords.sgml'
DATATYPES_URL = SOURCE_URL + '/doc/src/sgml/datatype.sgml'
def update_myself():
data_file = list(urlopen(DATATYPES_URL))
datatypes = parse_datatypes(data_file)
pseudos = parse_pseudos(data_file)
keywords = parse_keywords(urlopen(KEYWORDS_URL))
update_consts(__file__, 'DATATYPES', datatypes)
update_consts(__file__, 'PSEUDO_TYPES', pseudos)
update_consts(__file__, 'KEYWORDS', keywords)
def parse_keywords(f):
kw = []
for m in re.finditer(
r'\s*<entry><token>([^<]+)</token></entry>\s*'
r'<entry>([^<]+)</entry>', f.read()):
kw.append(m.group(1))
if not kw:
raise ValueError('no keyword found')
kw.sort()
return kw
def parse_datatypes(f):
dt = set()
for line in f:
if '<sect1' in line:
break
if '<entry><type>' not in line:
continue
# Parse a string such as
# time [ (<replaceable>p</replaceable>) ] [ without time zone ]
# into types "time" and "without time zone"
# remove all the tags
line = re.sub("<replaceable>[^<]+</replaceable>", "", line)
line = re.sub("<[^>]+>", "", line)
# Drop the parts containing braces
for tmp in [t for tmp in line.split('[')
for t in tmp.split(']') if "(" not in t]:
for t in tmp.split(','):
t = t.strip()
if not t: continue
dt.add(" ".join(t.split()))
dt = list(dt)
dt.sort()
return dt
def parse_pseudos(f):
dt = []
re_start = re.compile(r'\s*<table id="datatype-pseudotypes-table">')
re_entry = re.compile(r'\s*<entry><type>([^<]+)</></entry>')
re_end = re.compile(r'\s*</table>')
f = iter(f)
for line in f:
if re_start.match(line) is not None:
break
else:
raise ValueError('pseudo datatypes table not found')
for line in f:
m = re_entry.match(line)
if m is not None:
dt.append(m.group(1))
if re_end.match(line) is not None:
break
else:
raise ValueError('end of pseudo datatypes table not found')
if not dt:
raise ValueError('pseudo datatypes not found')
return dt
def update_consts(filename, constname, content):
with open(filename) as f:
data = f.read()
# Line to start/end inserting
re_match = re.compile(r'^%s\s*=\s*\($.*?^\s*\)$' % constname, re.M | re.S)
m = re_match.search(data)
if not m:
raise ValueError('Could not find existing definition for %s' %
(constname,))
new_block = format_lines(constname, content)
data = data[:m.start()] + new_block + data[m.end():]
with open(filename, 'w') as f:
f.write(data)
update_myself()
| gpl-2.0 | 1,895,367,137,763,671,300 | -4,329,372,926,381,468,700 | 17.05153 | 82 | 0.479037 | false |
charlesbrandt/medley | player/shared.py | 1 | 2332 | """
all_contents is the global place to store all content objects
if a requested content source is already loaded,
use it instead of loading a new one
#TODO:
#consider a simpler global variable / object for all_contents
#rather than passing around everywhere...
#could then be imported by any other module that needs it
#this is meant to be a global place
#to store all loaded content objects
#with the source path as the key
#then when other playlists are loaded with the same content
#they can reference the same object
#self.all_contents = {}
#moving this to a separate module so that it can be easily imported
#anywhere that it is needed.
#cumbersome to try to pass this around
#TODO:
#commented out all references to passed in 'all_contents'
#can go and remove once global approach is verified
"""
from builtins import object
from medley.helpers import load_json, save_json
# simple place to keep track of all loaded content objects
all_contents = {}
#making it easier to pass these in to the list_tree from player for loading
cli_items = []
# rather than pass this in everywhere, allow it to be imported
#since this is a widget, it probably needs to created like before
#main_player = None
#main_player = PlayerWidget(self)
#main_player = PlayerWidget(None)
config_source = 'configs.json'
class Configs(object):
def __init__(self):
global config_source
self.configs = load_json(config_source, create=True)
#aka drive_dir ??? (be consistent with content Object?)
#maybe last_folder is a different configuration
if 'last_folder' in self.configs:
self.last_folder = self.config['last_folder']
else:
self.last_folder = '/'
def get(self, key):
"""
automatcially check if we have the key
return blank if none exists
"""
if key in self.configs:
return self.configs[key]
else:
return ''
def save_configs(self):
"""
save self.configs to local 'configs.json' file
"""
global config_source
#save_json(self.config_source, self.configs)
save_json(config_source, self.configs)
configs = Configs()
| mit | 3,554,856,330,945,993,700 | 2,266,753,910,041,157,000 | 30.093333 | 75 | 0.651372 | false |
Ayub-Khan/edx-platform | lms/djangoapps/course_structure_api/v0/views.py | 12 | 27921 | """ API implementation for course-oriented interactions. """
from collections import namedtuple
import json
import logging
from django.conf import settings
from django.http import Http404
from rest_framework.authentication import SessionAuthentication
from rest_framework_oauth.authentication import OAuth2Authentication
from rest_framework.exceptions import AuthenticationFailed, ParseError
from rest_framework.generics import RetrieveAPIView, ListAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.reverse import reverse
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.keys import CourseKey
from course_structure_api.v0 import serializers
from courseware import courses
from courseware.access import has_access
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from openedx.core.lib.api.view_utils import view_course_access, view_auth_classes
from openedx.core.djangoapps.content.course_structures.api.v0 import api, errors
from openedx.core.lib.exceptions import CourseNotFoundError
from student.roles import CourseInstructorRole, CourseStaffRole
from util.module_utils import get_dynamic_descriptor_children
log = logging.getLogger(__name__)
class CourseViewMixin(object):
"""
Mixin for views dealing with course content. Also handles authorization and authentication.
"""
lookup_field = 'course_id'
authentication_classes = (OAuth2Authentication, SessionAuthentication,)
permission_classes = (IsAuthenticated,)
def get_course_or_404(self):
"""
Retrieves the specified course, or raises an Http404 error if it does not exist.
Also checks to ensure the user has permissions to view the course
"""
try:
course_id = self.kwargs.get('course_id')
course_key = CourseKey.from_string(course_id)
course = courses.get_course(course_key)
self.check_course_permissions(self.request.user, course_key)
return course
except ValueError:
raise Http404
@staticmethod
def course_check(func):
"""Decorator responsible for catching errors finding and returning a 404 if the user does not have access
to the API function.
:param func: function to be wrapped
:returns: the wrapped function
"""
def func_wrapper(self, *args, **kwargs):
"""Wrapper function for this decorator.
:param *args: the arguments passed into the function
:param **kwargs: the keyword arguments passed into the function
:returns: the result of the wrapped function
"""
try:
course_id = self.kwargs.get('course_id')
self.course_key = CourseKey.from_string(course_id)
self.check_course_permissions(self.request.user, self.course_key)
return func(self, *args, **kwargs)
except CourseNotFoundError:
raise Http404
return func_wrapper
def user_can_access_course(self, user, course):
"""
Determines if the user is staff or an instructor for the course.
Always returns True if DEBUG mode is enabled.
"""
return bool(
settings.DEBUG
or has_access(user, CourseStaffRole.ROLE, course)
or has_access(user, CourseInstructorRole.ROLE, course)
)
def check_course_permissions(self, user, course):
"""
Checks if the request user can access the course.
Raises 404 if the user does not have course access.
"""
if not self.user_can_access_course(user, course):
raise Http404
def perform_authentication(self, request):
"""
Ensures that the user is authenticated (e.g. not an AnonymousUser), unless DEBUG mode is enabled.
"""
super(CourseViewMixin, self).perform_authentication(request)
if request.user.is_anonymous() and not settings.DEBUG:
raise AuthenticationFailed
class CourseList(CourseViewMixin, ListAPIView):
"""
**Use Case**
Get a paginated list of courses in the edX Platform.
The list can be filtered by course_id.
Each page in the list can contain up to 10 courses.
**Example Requests**
GET /api/course_structure/v0/courses/
GET /api/course_structure/v0/courses/?course_id={course_id1},{course_id2}
**Response Values**
* count: The number of courses in the edX platform.
* next: The URI to the next page of courses.
* previous: The URI to the previous page of courses.
* num_pages: The number of pages listing courses.
* results: A list of courses returned. Each collection in the list
contains these fields.
* id: The unique identifier for the course.
* name: The name of the course.
* category: The type of content. In this case, the value is always
"course".
* org: The organization specified for the course.
* run: The run of the course.
* course: The course number.
* uri: The URI to use to get details of the course.
* image_url: The URI for the course's main image.
* start: The course start date.
* end: The course end date. If course end date is not specified, the
value is null.
"""
serializer_class = serializers.CourseSerializer
def get_queryset(self):
course_ids = self.request.query_params.get('course_id', None)
results = []
if course_ids:
course_ids = course_ids.split(',')
for course_id in course_ids:
course_key = CourseKey.from_string(course_id)
course_descriptor = courses.get_course(course_key)
results.append(course_descriptor)
else:
results = modulestore().get_courses()
# Ensure only course descriptors are returned.
results = (course for course in results if course.scope_ids.block_type == 'course')
# Ensure only courses accessible by the user are returned.
results = (course for course in results if self.user_can_access_course(self.request.user, course))
# Sort the results in a predictable manner.
return sorted(results, key=lambda course: unicode(course.id))
class CourseDetail(CourseViewMixin, RetrieveAPIView):
"""
**Use Case**
Get details for a specific course.
**Example Request**:
GET /api/course_structure/v0/courses/{course_id}/
**Response Values**
* id: The unique identifier for the course.
* name: The name of the course.
* category: The type of content.
* org: The organization that is offering the course.
* run: The run of the course.
* course: The course number.
* uri: The URI to use to get details about the course.
* image_url: The URI for the course's main image.
* start: The course start date.
* end: The course end date. If course end date is not specified, the
value is null.
"""
serializer_class = serializers.CourseSerializer
def get_object(self, queryset=None):
return self.get_course_or_404()
class CourseStructure(CourseViewMixin, RetrieveAPIView):
"""
**Use Case**
Get the course structure. This endpoint returns all blocks in the
course.
**Example requests**:
GET /api/course_structure/v0/course_structures/{course_id}/
**Response Values**
* root: The ID of the root node of the course structure.
* blocks: A dictionary that maps block IDs to a collection of
information about each block. Each block contains the following
fields.
* id: The ID of the block.
* type: The type of block. Possible values include sequential,
vertical, html, problem, video, and discussion. The type can also be
the name of a custom type of block used for the course.
* display_name: The display name configured for the block.
* graded: Whether or not the sequential or problem is graded. The
value is true or false.
* format: The assignment type.
* children: If the block has child blocks, a list of IDs of the child
blocks in the order they appear in the course.
"""
@CourseViewMixin.course_check
def get(self, request, **kwargs):
try:
return Response(api.course_structure(self.course_key))
except errors.CourseStructureNotAvailableError:
# If we don't have data stored, we will try to regenerate it, so
# return a 503 and as them to retry in 2 minutes.
return Response(status=503, headers={'Retry-After': '120'})
class CourseGradingPolicy(CourseViewMixin, ListAPIView):
"""
**Use Case**
Get the course grading policy.
**Example requests**:
GET /api/course_structure/v0/grading_policies/{course_id}/
**Response Values**
* assignment_type: The type of the assignment, as configured by course
staff. For example, course staff might make the assignment types Homework,
Quiz, and Exam.
* count: The number of assignments of the type.
* dropped: Number of assignments of the type that are dropped.
* weight: The weight, or effect, of the assignment type on the learner's
final grade.
"""
allow_empty = False
@CourseViewMixin.course_check
def get(self, request, **kwargs):
return Response(api.course_grading_policy(self.course_key))
@view_auth_classes()
class CourseBlocksAndNavigation(ListAPIView):
"""
**Use Case**
The following endpoints return the content of the course according to the requesting user's access level.
* Blocks - Get the course's blocks.
* Navigation - Get the course's navigation information per the navigation depth requested.
* Blocks+Navigation - Get both the course's blocks and the course's navigation information.
**Example requests**:
GET api/course_structure/v0/courses/{course_id}/blocks/
GET api/course_structure/v0/courses/{course_id}/navigation/
GET api/course_structure/v0/courses/{course_id}/blocks+navigation/
&block_count=video
&block_json={"video":{"profiles":["mobile_low"]}}
&fields=graded,format,multi_device
**Parameters**:
* block_json: (dict) Indicates for which block types to return student_view_json data. The key is the block
type and the value is the "context" that is passed to the block's student_view_json method.
Example: block_json={"video":{"profiles":["mobile_high","mobile_low"]}}
* block_count: (list) Indicates for which block types to return the aggregate count of the blocks.
Example: block_count="video,problem"
* fields: (list) Indicates which additional fields to return for each block.
Default is "children,graded,format,multi_device"
Example: fields=graded,format,multi_device
* navigation_depth (integer) Indicates how far deep to traverse into the course hierarchy before bundling
all the descendants.
Default is 3 since typical navigational views of the course show a maximum of chapter->sequential->vertical.
Example: navigation_depth=3
**Response Values**
The following fields are returned with a successful response.
Only either one of blocks, navigation, or blocks+navigation is returned depending on which endpoint is used.
The "root" field is returned for all endpoints.
* root: The ID of the root node of the course blocks.
* blocks: A dictionary that maps block usage IDs to a collection of information about each block.
Each block contains the following fields. Returned only if using the "blocks" endpoint.
* id: (string) The usage ID of the block.
* type: (string) The type of block. Possible values include course, chapter, sequential, vertical, html,
problem, video, and discussion. The type can also be the name of a custom type of block used for the course.
* display_name: (string) The display name of the block.
* children: (list) If the block has child blocks, a list of IDs of the child blocks.
Returned only if the "children" input parameter is True.
* block_count: (dict) For each block type specified in the block_count parameter to the endpoint, the
aggregate number of blocks of that type for this block and all of its descendants.
Returned only if the "block_count" input parameter contains this block's type.
* block_json: (dict) The JSON data for this block.
Returned only if the "block_json" input parameter contains this block's type.
* block_url: (string) The URL to retrieve the HTML rendering of this block. The HTML could include
CSS and Javascript code. This URL can be used as a fallback if the custom block_json for this
block type is not requested and not supported.
* web_url: (string) The URL to the website location of this block. This URL can be used as a further
fallback if the block_url and the block_json is not supported.
* graded (boolean) Whether or not the block or any of its descendants is graded.
Returned only if "graded" is included in the "fields" parameter.
* format: (string) The assignment type of the block.
Possible values can be "Homework", "Lab", "Midterm Exam", and "Final Exam".
Returned only if "format" is included in the "fields" parameter.
* multi_device: (boolean) Whether or not the block's rendering obtained via block_url has support
for multiple devices.
Returned only if "multi_device" is included in the "fields" parameter.
* navigation: A dictionary that maps block IDs to a collection of navigation information about each block.
Each block contains the following fields. Returned only if using the "navigation" endpoint.
* descendants: (list) A list of IDs of the children of the block if the block's depth in the
course hierarchy is less than the navigation_depth. Otherwise, a list of IDs of the aggregate descendants
of the block.
* blocks+navigation: A dictionary that combines both the blocks and navigation data.
Returned only if using the "blocks+navigation" endpoint.
"""
class RequestInfo(object):
"""
A class for encapsulating the request information, including what optional fields are requested.
"""
DEFAULT_FIELDS = "children,graded,format,multi_device"
def __init__(self, request, course):
self.request = request
self.course = course
self.field_data_cache = None
# check what fields are requested
try:
# fields
self.fields = set(request.GET.get('fields', self.DEFAULT_FIELDS).split(","))
# block_count
self.block_count = request.GET.get('block_count', "")
self.block_count = (
self.block_count.split(",") if self.block_count else []
)
# navigation_depth
# See docstring for why we default to 3.
self.navigation_depth = int(request.GET.get('navigation_depth', '3'))
# block_json
self.block_json = json.loads(request.GET.get('block_json', "{}"))
if self.block_json and not isinstance(self.block_json, dict):
raise ParseError
except:
raise ParseError
class ResultData(object):
"""
A class for encapsulating the result information, specifically the blocks and navigation data.
"""
def __init__(self, return_blocks, return_nav):
self.blocks = {}
self.navigation = {}
if return_blocks and return_nav:
self.navigation = self.blocks
def update_response(self, response, return_blocks, return_nav):
"""
Updates the response object with result information.
"""
if return_blocks and return_nav:
response["blocks+navigation"] = self.blocks
elif return_blocks:
response["blocks"] = self.blocks
elif return_nav:
response["navigation"] = self.navigation
class BlockInfo(object):
"""
A class for encapsulating a block's information as needed during traversal of a block hierarchy.
"""
def __init__(self, block, request_info, parent_block_info=None):
# the block for which the recursion is being computed
self.block = block
# the type of the block
self.type = block.category
# the block's depth in the block hierarchy
self.depth = 0
# the block's children
self.children = []
# descendants_of_parent: the list of descendants for this block's parent
self.descendants_of_parent = []
self.descendants_of_self = []
# if a parent block was provided, update this block's data based on the parent's data
if parent_block_info:
# increment this block's depth value
self.depth = parent_block_info.depth + 1
# set this blocks' descendants_of_parent
self.descendants_of_parent = parent_block_info.descendants_of_self
# add ourselves to the parent's children, if requested.
if 'children' in request_info.fields:
parent_block_info.value.setdefault("children", []).append(unicode(block.location))
# the block's data to include in the response
self.value = {
"id": unicode(block.location),
"type": self.type,
"display_name": block.display_name,
"web_url": reverse(
"jump_to",
kwargs={"course_id": unicode(request_info.course.id), "location": unicode(block.location)},
request=request_info.request,
),
"block_url": reverse(
"courseware.views.render_xblock",
kwargs={"usage_key_string": unicode(block.location)},
request=request_info.request,
),
}
@view_course_access(depth=None)
def list(self, request, course, return_blocks=True, return_nav=True, *args, **kwargs):
"""
REST API endpoint for listing all the blocks and/or navigation information in the course,
while regarding user access and roles.
Arguments:
request - Django request object
course - course module object
return_blocks - If true, returns the blocks information for the course.
return_nav - If true, returns the navigation information for the course.
"""
# set starting point
start_block = course
# initialize request and result objects
request_info = self.RequestInfo(request, course)
result_data = self.ResultData(return_blocks, return_nav)
# create and populate a field data cache by pre-fetching for the course (with depth=None)
request_info.field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=None,
)
# start the recursion with the start_block
self.recurse_blocks_nav(request_info, result_data, self.BlockInfo(start_block, request_info))
# return response
response = {"root": unicode(start_block.location)}
result_data.update_response(response, return_blocks, return_nav)
return Response(response)
def recurse_blocks_nav(self, request_info, result_data, block_info):
"""
A depth-first recursive function that supports calculation of both the list of blocks in the course
and the navigation information up to the requested navigation_depth of the course.
Arguments:
request_info - Object encapsulating the request information.
result_data - Running result data that is updated during the recursion.
block_info - Information about the current block in the recursion.
"""
# bind user data to the block
block_info.block = get_module_for_descriptor(
request_info.request.user,
request_info.request,
block_info.block,
request_info.field_data_cache,
request_info.course.id,
course=request_info.course
)
# verify the user has access to this block
if (block_info.block is None or not has_access(
request_info.request.user,
'load',
block_info.block,
course_key=request_info.course.id
)):
return
# add the block's value to the result
result_data.blocks[unicode(block_info.block.location)] = block_info.value
# descendants
self.update_descendants(request_info, result_data, block_info)
# children: recursively call the function for each of the children, while supporting dynamic children.
if block_info.block.has_children:
block_info.children = get_dynamic_descriptor_children(block_info.block, request_info.request.user.id)
for child in block_info.children:
self.recurse_blocks_nav(
request_info,
result_data,
self.BlockInfo(child, request_info, parent_block_info=block_info)
)
# block count
self.update_block_count(request_info, result_data, block_info)
# block JSON data
self.add_block_json(request_info, block_info)
# multi-device support
if 'multi_device' in request_info.fields:
block_info.value['multi_device'] = block_info.block.has_support(
getattr(block_info.block, 'student_view', None),
'multi_device'
)
# additional fields
self.add_additional_fields(request_info, block_info)
def update_descendants(self, request_info, result_data, block_info):
"""
Updates the descendants data for the current block.
The current block is added to its parent's descendants if it is visible in the navigation
(i.e., the 'hide_from_toc' setting is False).
Additionally, the block's depth is compared with the navigation_depth parameter to determine whether the
descendants of the block should be added to its own descendants (if block.depth <= navigation_depth)
or to the descendants of the block's parents (if block.depth > navigation_depth).
block_info.descendants_of_self is the list of descendants that is passed to this block's children.
It should be either:
descendants_of_parent - if this block's depth is greater than the requested navigation_depth.
a dangling [] - if this block's hide_from_toc is True.
a referenced [] in navigation[block.location]["descendants"] - if this block's depth is within
the requested navigation depth.
"""
# Blocks with the 'hide_from_toc' setting are accessible, just not navigatable from the table-of-contents.
# If the 'hide_from_toc' setting is set on the block, do not add this block to the parent's descendants
# list and let the block's descendants add themselves to a dangling (unreferenced) descendants list.
if not block_info.block.hide_from_toc:
# add this block to the parent's descendants
block_info.descendants_of_parent.append(unicode(block_info.block.location))
# if this block's depth in the hierarchy is greater than the requested navigation depth,
# have the block's descendants add themselves to the parent's descendants.
if block_info.depth > request_info.navigation_depth:
block_info.descendants_of_self = block_info.descendants_of_parent
# otherwise, have the block's descendants add themselves to this block's descendants by
# referencing/attaching descendants_of_self from this block's navigation value.
else:
result_data.navigation.setdefault(
unicode(block_info.block.location), {}
)["descendants"] = block_info.descendants_of_self
def update_block_count(self, request_info, result_data, block_info):
"""
For all the block types that are requested to be counted, include the count of that block type as
aggregated from the block's descendants.
Arguments:
request_info - Object encapsulating the request information.
result_data - Running result data that is updated during the recursion.
block_info - Information about the current block in the recursion.
"""
for b_type in request_info.block_count:
block_info.value.setdefault("block_count", {})[b_type] = (
sum(
result_data.blocks.get(unicode(child.location), {}).get("block_count", {}).get(b_type, 0)
for child in block_info.children
) +
(1 if b_type == block_info.type else 0)
)
def add_block_json(self, request_info, block_info):
"""
If the JSON data for this block's type is requested, and the block supports the 'student_view_json'
method, add the response from the 'student_view_json" method as the data for the block.
"""
if block_info.type in request_info.block_json:
if getattr(block_info.block, 'student_view_data', None):
block_info.value["block_json"] = block_info.block.student_view_data(
context=request_info.block_json[block_info.type]
)
# A mapping of API-exposed field names to xBlock field names and API field defaults.
BlockApiField = namedtuple('BlockApiField', 'block_field_name api_field_default')
FIELD_MAP = {
'graded': BlockApiField(block_field_name='graded', api_field_default=False),
'format': BlockApiField(block_field_name='format', api_field_default=None),
}
def add_additional_fields(self, request_info, block_info):
"""
Add additional field names and values of the block as requested in the request_info.
"""
for field_name in request_info.fields:
if field_name in self.FIELD_MAP:
block_info.value[field_name] = getattr(
block_info.block,
self.FIELD_MAP[field_name].block_field_name,
self.FIELD_MAP[field_name].api_field_default,
)
def perform_authentication(self, request):
"""
Ensures that the user is authenticated (e.g. not an AnonymousUser)
"""
super(CourseBlocksAndNavigation, self).perform_authentication(request)
if request.user.is_anonymous():
raise AuthenticationFailed
| agpl-3.0 | -7,537,644,080,810,032,000 | 4,236,140,579,261,430,000 | 39.348266 | 120 | 0.632678 | false |
katsikas/gnuradio | gr-vocoder/python/qa_g723_24_vocoder.py | 10 | 1231 | #!/usr/bin/env python
#
# Copyright 2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from vocoder_swig import *
class test_g723_24_vocoder (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block()
def tearDown (self):
self.tb = None
def test001_module_load (self):
enc = g723_24_encode_sb();
dec = g723_24_decode_bs();
if __name__ == '__main__':
gr_unittest.run(test_g723_24_vocoder, "test_g723_24_vocoder.xml")
| gpl-3.0 | 5,493,530,868,898,678,000 | 4,602,583,792,544,405,500 | 30.564103 | 70 | 0.705118 | false |
ran5515/DeepDecision | tensorflow/contrib/ndlstm/python/__init__.py | 135 | 1103 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Init file, giving convenient access to all ndlstm ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,g-importing-member
from tensorflow.contrib.ndlstm.python.lstm1d import *
from tensorflow.contrib.ndlstm.python.lstm2d import *
from tensorflow.contrib.ndlstm.python.misc import *
# pylint: enable=wildcard-import
| apache-2.0 | -7,624,825,422,775,839,000 | 1,575,272,655,613,591,000 | 43.12 | 80 | 0.720762 | false |
DiegoQueiroz/scriptLattes | scriptLattes/producoesUnitarias/idioma.py | 3 | 1490 | #!/usr/bin/python
# encoding: utf-8
# filename: idioma.py
#
# scriptLattes V8
# Copyright 2005-2013: Jesús P. Mena-Chalco e Roberto M. Cesar-Jr.
# http://scriptlattes.sourceforge.net/
#
#
# Este programa é um software livre; você pode redistribui-lo e/ou
# modifica-lo dentro dos termos da Licença Pública Geral GNU como
# publicada pela Fundação do Software Livre (FSF); na versão 2 da
# Licença, ou (na sua opinião) qualquer versão.
#
# Este programa é distribuído na esperança que possa ser util,
# mas SEM NENHUMA GARANTIA; sem uma garantia implicita de ADEQUAÇÂO a qualquer
# MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a
# Licença Pública Geral GNU para maiores detalhes.
#
# Você deve ter recebido uma cópia da Licença Pública Geral GNU
# junto com este programa, se não, escreva para a Fundação do Software
# Livre(FSF) Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
class Idioma:
descricao = ''
nome = ''
proficiencia = ''
def __init__(self, partesDoItem):
# partesDoItem[0]: Nome do idioma
# partesDoItem[1]: Descricao da proficiencia do idioma
self.nome = partesDoItem[0].strip()
self.proficiencia = partesDoItem[1].strip()
# ------------------------------------------------------------------------ #
def __str__(self):
s = "\n[IDIOMA] \n"
s += "+NOME : " + self.nome.encode('utf8','replace') + "\n"
s += "+PROFICIENCIA: " + self.proficiencia.encode('utf8','replace') + "\n"
return s
| gpl-2.0 | 8,701,932,617,931,628,000 | -5,273,842,147,015,761,000 | 33.023256 | 79 | 0.65892 | false |
gridcf/gct | myproxy/oauth/source/myproxyoauth/views.py | 4 | 14096 | #
# Copyright 2010-2011 University of Chicago
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import socket
import cgi
import random
import base64
import httplib
import json
import time
import oauth2 as oauth
import Crypto.PublicKey.RSA
import myproxy
import pkgutil
import os
import sys
from myproxyoauth import application
from myproxyoauth.database import db_session, Admin, Client, Transaction
from urllib import quote
def bad_request(start_response):
status = "400 Bad Request"
headers = [ ("Content-Type", "text/plain") ]
start_response(status, headers, sys.exc_info())
return "Bad request\n"
def get_template(name):
template_data = None
if hasattr(pkgutil, "get_data"):
template_data = pkgutil.get_data("myproxyoauth.templates", name)
else:
template_path = os.path.join(
os.path.dirname(__file__), 'templates', name)
template_file = file(template_path, "r")
try:
template_data = template_file.read()
finally:
template_file.close()
return template_data
def render_template(name, **kwargs):
template = get_template(name)
for template_token in kwargs:
template = template.replace(
"{{ " + template_token + " }}", kwargs[template_token])
return str(template)
def url_reconstruct(environ):
url = environ['wsgi.url_scheme']+'://'
if environ.get('HTTP_HOST'):
url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
url += quote(environ.get('SCRIPT_NAME', ''))
url += quote(environ.get('PATH_INFO', ''))
if environ.get('QUERY_STRING'):
url += '?' + environ['QUERY_STRING']
return url
@application.route('/test')
def test(environ, start_response):
status = "403 Forbidden"
headers = [("Content-Type", "text/plain")]
start_response(status, headers)
return "bad"
@application.teardown_request
def shutdown_session(exception=None):
db_session.remove()
"""
Implementation of OAuth for MyProxy Protocol,
https://docs.google.com/document/pub?id=10SC7oSURc-EgxMQjcCS50gz0u2HzDJAFiG5hEHiSdxA
"""
@application.route('/initiate', methods=['GET'])
def initiate(environ, start_response):
try:
request = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)
oauth_signature_method = request.getvalue('oauth_signature_method')
if oauth_signature_method is None:
oauth_signature_method='RSA-SHA1'
oauth_signature = str(request.getvalue('oauth_signature'))
oauth_timestamp = int(request.getvalue('oauth_timestamp'))
oauth_nonce = int(request.getvalue('oauth_nonce'))
oauth_version = str(request.getvalue('oauth_version'))
oauth_consumer_key = str(request.getvalue('oauth_consumer_key'))
oauth_callback = str(request.getvalue('oauth_callback'))
certlifetime = request.getvalue('certlifetime')
if certlifetime is not None:
certlifetime = int(certlifetime)
else:
certlifetime = 86400
clients = db_session.get_client(Client(oauth_consumer_key=oauth_consumer_key))
client = None
if len(clients) > 0:
client = clients[0]
if client is None:
application.logger.error('Unregistered client requested a temporary token.')
status = "403 Not authorized"
headers = [
("Content-Type", "text/plain") ]
start_response(status, headers)
return "Uregistered client"
if hasattr(Crypto.PublicKey.RSA, 'importKey'):
key = Crypto.PublicKey.RSA.importKey(client.oauth_client_pubkey)
else:
import M2Crypto.RSA
import M2Crypto.BIO
import struct
import sys
bio = M2Crypto.BIO.MemoryBuffer(str(client.oauth_client_pubkey))
k = None
try:
k = M2Crypto.RSA.load_pub_key_bio(bio)
def unpack_from(fmt, data, offs):
unpack_len = struct.calcsize(fmt)
return struct.unpack(fmt, data[offs:offs+unpack_len])
def decode(n):
len = reduce(lambda x,y: long(x*256+y),
unpack_from("4B", n, 0))
return reduce(lambda x,y: long(x*256+y),
unpack_from(str(len)+"B", n, 4))
keytuple = (decode(k.n), decode(k.e))
except:
application.logger.error(str(sys.exc_info()))
raise
key = Crypto.PublicKey.RSA.construct(keytuple)
method = environ['REQUEST_METHOD']
url = url_reconstruct(environ)
o_request = oauth.Request.from_request(method, url)
o_consumer = oauth.Consumer(client.oauth_consumer_key, key)
o_server = oauth.Server()
o_server.add_signature_method(oauth.SignatureMethod_RSA_SHA1())
try:
o_server.verify_request(o_request, o_consumer, None)
except:
e = sys.exc_info()
application.logger.error(str(e[1]))
status = "403 Not authorized"
headers = [
("Content-Type", "text/plain") ]
start_response(status, headers, e)
return str(e[1])
certreq = str(request.getvalue('certreq'))
oauth_temp_token = 'myproxy:oa4mp,2012:/tempCred/' \
+ ''.join([random.choice('0123456789abcdef') for i in range(32)]) \
+ '/' + str(int(time.time()))
transaction = Transaction()
transaction.temp_token = oauth_temp_token
transaction.temp_token_valid = 1
transaction.oauth_callback = oauth_callback
transaction.certreq = certreq
transaction.oauth_consumer_key = oauth_consumer_key
transaction.certlifetime = certlifetime
transaction.timestamp = int(time.time())
db_session.add_transaction(transaction)
db_session.commit()
status = "200 Ok"
headers = [
("Content-Type", "app/x-www-form-urlencoded") ]
start_response(status, headers)
return "oauth_token=%s&oauth_callback_confirmed=true" % oauth_temp_token
except:
return bad_request(start_response)
@application.route('/authorize', methods=['GET'])
def get_authorize(environ, start_response):
try:
request = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)
oauth_temp_token = str(request.getvalue('oauth_token'))
transactions = db_session.get_transaction(
Transaction(temp_token=oauth_temp_token, temp_token_valid=1))
if len(transactions) == 0:
status = "403 Not authorized"
headers = [ ("Content-Type", "text/plain") ]
start_response(status, headers)
return 'Invalid temporary token'
transaction = transactions[0]
clients = db_session.get_client(Client(oauth_consumer_key=transaction.oauth_consumer_key))
if len(clients) == 0:
status = "403 Not authorized"
headers = [ ("Content-Type", "text/plain") ]
start_response(status, headers)
return 'Unregistered client'
client = clients[0]
transaction.temp_token_valid = 0
db_session.update_transaction(transaction)
db_session.commit()
styles = ['static/oauth.css']
css_path = os.path.join(
os.path.dirname(__file__), 'static', 'site.css')
if os.path.exists(css_path):
styles.append("static/site.css")
res = render_template('authorize.html',
client_name=client.name,
client_url=client.home_url,
temp_token=oauth_temp_token,
retry_message="",
stylesheets="\n".join(
[("<link rel='stylesheet' type='text/css' href='%s' >" % x) for x in styles]))
status = "200 Ok"
headers = [ ("Content-Type", "text/html")]
start_response(status, headers)
return res
except:
return bad_request(start_response)
@application.route('/authorize', methods=['POST'])
def post_authorize(environ, start_response):
try:
request = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)
oauth_temp_token = str(request.getvalue('oauth_token'))
username = str(request.getvalue('username'))
passphrase = str(request.getvalue('passphrase'))
transactions = db_session.get_transaction(Transaction(temp_token=oauth_temp_token))
transaction = None
if len(transactions) > 0:
transaction = transactions[0]
clients = db_session.get_client(Client(oauth_consumer_key=transaction.oauth_consumer_key))
client = None
if len(clients) > 0:
client = clients[0]
cert = None
try:
certreq = "-----BEGIN CERTIFICATE REQUEST-----\n" + str(transaction.certreq) + "-----END CERTIFICATE REQUEST-----\n"
cert = myproxy.myproxy_logon(certreq,
transaction.certlifetime,
username, passphrase, client.myproxy_server)
except:
e = sys.exc_info()
application.logger.error(str(e[1]))
status = "200 Ok"
headers = [ ("Content-Type", "text/html") ]
styles = ['static/oauth.css']
css_path = os.path.join(
os.path.dirname(__file__), 'static', 'site.css')
if os.path.exists(css_path):
styles.append("static/site.css")
res = render_template('authorize.html',
client_name=client.name,
client_url=client.home_url,
temp_token=oauth_temp_token,
retry_message=str(e[1]),
stylesheets="\n".join(
[("<link rel='stylesheet' type='text/css' href='%s' >" % x) for x in styles]))
start_response(status, headers, e)
return res
oauth_verifier = 'myproxy:oa4mp,2012:/verifier/' \
+ ''.join([random.choice('0123456789abcdef') for i in range(32)]) \
+ '/' + str(int(time.time()))
transaction.oauth_verifier = oauth_verifier
transaction.certificate = cert
transaction.username = username
db_session.update_transaction(transaction)
db_session.commit()
status = "301 Moved Permanently"
joiner = "?"
if "?" in transaction.oauth_callback:
joiner="&"
headers = [
("Location", str("%s%soauth_token=%s&oauth_verifier=%s" % \
(transaction.oauth_callback, joiner, oauth_temp_token, oauth_verifier)))]
start_response(status, headers)
return ""
except:
return bad_request(start_response)
@application.route('/token', methods=['GET'])
def token(environ, start_response):
try:
args = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)
oauth_signature_method = args.getvalue('oauth_signature_method')
if oauth_signature_method is None:
oauth_signature_method='RSA-SHA1'
else:
oauth_signature_method=str(oauth_signature_method)
oauth_signature = str(args.getvalue('oauth_signature'))
oauth_timestamp = int(args.getvalue('oauth_timestamp'))
oauth_nonce = int(args.getvalue('oauth_nonce'))
oauth_version = str(args.getvalue('oauth_version'))
oauth_consumer_key = str(args.getvalue('oauth_consumer_key'))
oauth_temp_token = str(args.getvalue('oauth_token'))
oauth_verifier = str(args.getvalue('oauth_verifier'))
oauth_access_token = 'myproxy:oa4mp,2012:/accessToken/' \
+ ''.join([random.choice('0123456789abcdef') for i in range(32)]) \
+ '/' + str(int(time.time()))
transactions = db_session.get_transaction(Transaction(temp_token=oauth_temp_token))
transaction = None
if len(transactions) > 0:
transaction = transactions[0]
transaction.access_token = oauth_access_token
db_session.update_transaction(transaction)
db_session.commit()
status = "200 Ok"
headers = [('Content-Type', 'app/x-www-form-urlencoded')]
resp = start_response(status, headers)
return "oauth_token=%s" % str(oauth_access_token)
except:
return bad_request(start_response)
@application.route('/getcert', methods=['GET'])
def getcert(environ, start_response):
try:
args = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)
oauth_signature_method = args.getvalue('oauth_signature_method')
if oauth_signature_method is None:
oauth_signature_method = 'RSA-SHA1'
else:
oauth_signature_method = str(oauth_signature_method)
oauth_signature = str(args.getvalue('oauth_signature'))
oauth_timestamp = int(args.getvalue('oauth_timestamp'))
oauth_nonce = int(args.getvalue('oauth_nonce'))
oauth_version = str(args.getvalue('oauth_version'))
oauth_consumer_key = str(args.getvalue('oauth_consumer_key'))
oauth_access_token = str(args.getvalue('oauth_token'))
transactions = db_session.get_transaction(
Transaction(access_token=oauth_access_token))
transaction = None
if len(transactions) > 0:
transaction = transactions[0]
if transaction is None:
status = "403 Forbidden"
headers = [("Content-Type", "text/plain")]
start_response(status, headers)
return "Invalid access token"
# Clear database
old_transactions = [(t) for t in db_session.get_transaction() if t.timestamp < int(time.time())]
db_session.delete_transactions(old_transactions)
db_session.commit()
status = "200 Ok"
headers = [ ("Content-Type", "app/x-www-form-urlencoded") ]
start_response(status, headers)
return 'username=%s\n%s' % (str(transaction.username), str(transaction.certificate))
except:
return bad_request(start_response)
# vim: syntax=python: nospell:
| apache-2.0 | -1,372,846,164,749,796,400 | -1,506,630,803,248,493,000 | 35.14359 | 124 | 0.640253 | false |