text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
from . import _ccallback_c
import ctypes
PyCFuncPtr = ctypes.CFUNCTYPE(ctypes.c_void_p).__bases__[0]
ffi = None
class CData(object):
pass
def _import_cffi():
global ffi, CData
if ffi is not None:
return
try:
import cffi
ffi = cffi.FFI()
CData = ffi.CData
except ImportError:
ffi = False
class LowLevelCallable(tuple):
"""
Low-level callback function.
Parameters
----------
function : {PyCapsule, ctypes function pointer, cffi function pointer}
Low-level callback function.
user_data : {PyCapsule, ctypes void pointer, cffi void pointer}
User data to pass on to the callback function.
signature : str, optional
Signature of the function. If omitted, determined from *function*,
if possible.
Attributes
----------
function
Callback function given
user_data
User data given
signature
Signature of the function.
Methods
-------
from_cython
Class method for constructing callables from Cython C-exported
functions.
Notes
-----
The argument ``function`` can be one of:
- PyCapsule, whose name contains the C function signature
- ctypes function pointer
- cffi function pointer
The signature of the low-level callback must match one of those expected
by the routine it is passed to.
If constructing low-level functions from a PyCapsule, the name of the
capsule must be the corresponding signature, in the format::
return_type (arg1_type, arg2_type, ...)
For example::
"void (double)"
"double (double, int *, void *)"
The context of a PyCapsule passed in as ``function`` is used as ``user_data``,
if an explicit value for ``user_data`` was not given.
"""
# Make the class immutable
__slots__ = ()
def __new__(cls, function, user_data=None, signature=None):
# We need to hold a reference to the function & user data,
# to prevent them going out of scope
item = cls._parse_callback(function, user_data, signature)
return tuple.__new__(cls, (item, function, user_data))
def __repr__(self):
return "LowLevelCallable({!r}, {!r})".format(self.function, self.user_data)
@property
def function(self):
return tuple.__getitem__(self, 1)
@property
def user_data(self):
return tuple.__getitem__(self, 2)
@property
def signature(self):
return _ccallback_c.get_capsule_signature(tuple.__getitem__(self, 0))
def __getitem__(self, idx):
raise ValueError()
@classmethod
def from_cython(cls, module, name, user_data=None, signature=None):
"""
Create a low-level callback function from an exported Cython function.
Parameters
----------
module : module
Cython module where the exported function resides
name : str
Name of the exported function
user_data : {PyCapsule, ctypes void pointer, cffi void pointer}, optional
User data to pass on to the callback function.
signature : str, optional
Signature of the function. If omitted, determined from *function*.
"""
try:
function = module.__pyx_capi__[name]
except AttributeError:
raise ValueError("Given module is not a Cython module with __pyx_capi__ attribute")
except KeyError:
raise ValueError("No function {!r} found in __pyx_capi__ of the module".format(name))
return cls(function, user_data, signature)
@classmethod
def _parse_callback(cls, obj, user_data=None, signature=None):
_import_cffi()
if isinstance(obj, LowLevelCallable):
func = tuple.__getitem__(obj, 0)
elif isinstance(obj, PyCFuncPtr):
func, signature = _get_ctypes_func(obj, signature)
elif isinstance(obj, CData):
func, signature = _get_cffi_func(obj, signature)
elif _ccallback_c.check_capsule(obj):
func = obj
else:
raise ValueError("Given input is not a callable or a low-level callable (pycapsule/ctypes/cffi)")
if isinstance(user_data, ctypes.c_void_p):
context = _get_ctypes_data(user_data)
elif isinstance(user_data, CData):
context = _get_cffi_data(user_data)
elif user_data is None:
context = 0
elif _ccallback_c.check_capsule(user_data):
context = user_data
else:
raise ValueError("Given user data is not a valid low-level void* pointer (pycapsule/ctypes/cffi)")
return _ccallback_c.get_raw_capsule(func, signature, context)
#
# ctypes helpers
#
def _get_ctypes_func(func, signature=None):
# Get function pointer
func_ptr = ctypes.cast(func, ctypes.c_void_p).value
# Construct function signature
if signature is None:
signature = _typename_from_ctypes(func.restype) + " ("
for j, arg in enumerate(func.argtypes):
if j == 0:
signature += _typename_from_ctypes(arg)
else:
signature += ", " + _typename_from_ctypes(arg)
signature += ")"
return func_ptr, signature
def _typename_from_ctypes(item):
if item is None:
return "void"
elif item is ctypes.c_void_p:
return "void *"
name = item.__name__
pointer_level = 0
while name.startswith("LP_"):
pointer_level += 1
name = name[3:]
if name.startswith('c_'):
name = name[2:]
if pointer_level > 0:
name += " " + "*"*pointer_level
return name
def _get_ctypes_data(data):
# Get voidp pointer
return ctypes.cast(data, ctypes.c_void_p).value
#
# CFFI helpers
#
def _get_cffi_func(func, signature=None):
# Get function pointer
func_ptr = ffi.cast('uintptr_t', func)
# Get signature
if signature is None:
signature = ffi.getctype(ffi.typeof(func)).replace('(*)', ' ')
return func_ptr, signature
def _get_cffi_data(data):
# Get pointer
return ffi.cast('uintptr_t', data)
| Eric89GXL/scipy | scipy/_lib/_ccallback.py | Python | bsd-3-clause | 6,196 | 0.001453 |
#!/usr/bin/env python
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class JsonErrorResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Swagger model
:param dict swaggerTypes: The key is attribute name and the value is attribute type.
:param dict attributeMap: The key is attribute name and the value is json key in definition.
"""
self.swagger_types = {
'status': 'str',
'message': 'str'
}
self.attribute_map = {
'status': 'status',
'message': 'message'
}
# Status: \"ok\" or \"error\"
self.status = None # str
# Error message
self.message = None # str
def __repr__(self):
properties = []
for p in self.__dict__:
if p != 'swaggerTypes' and p != 'attributeMap':
properties.append('{prop}={val!r}'.format(prop=p, val=self.__dict__[p]))
return '<{name} {props}>'.format(name=__name__, props=' '.join(properties))
| QuantiModo/QuantiModo-SDK-Python | SwaggerPetstore/models/json_error_response.py | Python | gpl-2.0 | 1,773 | 0.00564 |
# -*- coding: utf-8 -*-
appid = 'example'
apikey = 'c5dd7e7dkjp27377l903c42c032b413b'
sender = '01000000000' # FIXME - MUST BE CHANGED AS REAL PHONE NUMBER
receivers = ['01000000000', ] # FIXME - MUST BE CHANGED AS REAL PHONE NUMBERS
content = u'나는 유리를 먹을 수 있어요. 그래도 아프지 않아요'
| BlueHouseLab/sms-openapi | python-requests/conf.py | Python | apache-2.0 | 324 | 0.003521 |
##
# Copyright 2009-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for software that is configured with CMake, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Ward Poelmans (Ghent University)
"""
import os
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import build_option
from easybuild.tools.environment import setvar
from easybuild.tools.run import run_cmd
class CMakeMake(ConfigureMake):
"""Support for configuring build with CMake instead of traditional configure script"""
@staticmethod
def extra_options(extra_vars=None):
"""Define extra easyconfig parameters specific to CMakeMake."""
extra_vars = ConfigureMake.extra_options(extra_vars)
extra_vars.update({
'srcdir': [None, "Source directory location to provide to cmake command", CUSTOM],
'separate_build_dir': [False, "Perform build in a separate directory", CUSTOM],
})
return extra_vars
def configure_step(self, srcdir=None, builddir=None):
"""Configure build using cmake"""
if builddir is not None:
self.log.nosupport("CMakeMake.configure_step: named argument 'builddir' (should be 'srcdir')", "2.0")
# Set the search paths for CMake
include_paths = os.pathsep.join(self.toolchain.get_variable("CPPFLAGS", list))
library_paths = os.pathsep.join(self.toolchain.get_variable("LDFLAGS", list))
setvar("CMAKE_INCLUDE_PATH", include_paths)
setvar("CMAKE_LIBRARY_PATH", library_paths)
default_srcdir = '.'
if self.cfg.get('separate_build_dir', False):
objdir = os.path.join(self.builddir, 'easybuild_obj')
try:
os.mkdir(objdir)
os.chdir(objdir)
except OSError, err:
raise EasyBuildError("Failed to create separate build dir %s in %s: %s", objdir, os.getcwd(), err)
default_srcdir = self.cfg['start_dir']
if srcdir is None:
if self.cfg.get('srcdir', None) is not None:
srcdir = self.cfg['srcdir']
else:
srcdir = default_srcdir
options = ['-DCMAKE_INSTALL_PREFIX=%s' % self.installdir]
env_to_options = {
'CC': 'CMAKE_C_COMPILER',
'CFLAGS': 'CMAKE_C_FLAGS',
'CXX': 'CMAKE_CXX_COMPILER',
'CXXFLAGS': 'CMAKE_CXX_FLAGS',
'F90': 'CMAKE_Fortran_COMPILER',
'FFLAGS': 'CMAKE_Fortran_FLAGS',
}
for env_name, option in env_to_options.items():
value = os.getenv(env_name)
if value is not None:
options.append("-D%s='%s'" % (option, value))
if build_option('rpath'):
# instruct CMake not to fiddle with RPATH when --rpath is used, since it will undo stuff on install...
# https://github.com/LLNL/spack/blob/0f6a5cd38538e8969d11bd2167f11060b1f53b43/lib/spack/spack/build_environment.py#L416
options.append('-DCMAKE_SKIP_RPATH=ON')
# show what CMake is doing by default
options.append('-DCMAKE_VERBOSE_MAKEFILE=ON')
options_string = ' '.join(options)
command = "%s cmake %s %s %s" % (self.cfg['preconfigopts'], srcdir, options_string, self.cfg['configopts'])
(out, _) = run_cmd(command, log_all=True, simple=False)
return out
| ULHPC/easybuild-easyblocks | easybuild/easyblocks/generic/cmakemake.py | Python | gpl-2.0 | 4,702 | 0.002552 |
# coding=utf-8
from __future__ import unicode_literals
"""
Name: MyArgparse
Author: Andy Liu
Email : [email protected]
Created: 3/26/2015
Copyright: All rights reserved.
Licence: This program is free software: you can redistribute it
and/or modify it under the terms of the GNU General Public License
as published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import logging
def parse_command_line():
parser = argparse.ArgumentParser(prog='PROG', description='%(prog)s can ...')
parser.add_argument('NoPre', action="store", help='help information')
parser.add_argument('-t', action="store_true", dest='boolean_switch', default=False, help='Set a switch to true')
parser.add_argument('-f', action="store_false", dest='boolean_switch', default=True, help='Set a switch to false')
parser.add_argument('-s', action="store", dest='simple_value', help="Store a simple value")
parser.add_argument('-st', action="store", dest="simple_value", type=int,
help='Store a simple value and define type')
parser.add_argument('-c', action='store_const', dest='constant_value', const='value-to-store',
help='Store a constant value')
parser.add_argument('-a', action='append', dest='collection', default=[], help='Add repeated values to a list')
parser.add_argument('-A', action='append_const', dest='const_collection', const='value-1-to-append', default=[],
help='Add different values to list')
parser.add_argument('-B', action='append_const', dest='const_collection', const='value-2-to-append',
help='Add different values to list')
args = parser.parse_args()
logging.debug('NoPre = %r' % args.NoPre)
logging.debug('simple_value = %r' % args.simple_value)
logging.debug('constant_value = %r' % args.constant_value)
logging.debug('boolean_switch = %r' % args.boolean_switch)
logging.debug('collection = %r' % args.collection)
logging.debug('const_collection = %r' % args.const_collection)
return args
if __name__ == '__main__':
from MyLog import init_logger
logger = init_logger()
parse_command_line()
| asiroliu/MyTools | MyArgparse.py | Python | gpl-2.0 | 2,738 | 0.004018 |
from django.contrib import admin
from trainer.models import Language, Word, Card, Set
admin.site.register(Language)
admin.site.register(Word)
admin.site.register(Card)
admin.site.register(Set)
| chrigu6/vocabulary | vocabulary/trainer/admin.py | Python | gpl-3.0 | 195 | 0 |
# -*- coding: utf-8 -*-
import logging
import random
from collections import (
namedtuple,
defaultdict,
)
from itertools import repeat
import cachetools
import gevent
from gevent.event import (
_AbstractLinkable,
AsyncResult,
Event,
)
from ethereum import slogging
from raiden.exceptions import (
InvalidAddress,
InvalidLocksRoot,
InvalidNonce,
TransferWhenClosed,
TransferUnwanted,
UnknownAddress,
UnknownTokenAddress,
)
from raiden.constants import (
UDP_MAX_MESSAGE_SIZE,
)
from raiden.settings import (
CACHE_TTL,
)
from raiden.messages import decode, Ack, Ping, SignedMessage
from raiden.utils import isaddress, sha3, pex
from raiden.utils.notifying_queue import NotifyingQueue
log = slogging.get_logger(__name__) # pylint: disable=invalid-name
ping_log = slogging.get_logger(__name__ + '.ping') # pylint: disable=invalid-name
# - async_result available for code that wants to block on message acknowledgment
# - receiver_address used to tie back the echohash to the receiver (mainly for
# logging purposes)
SentMessageState = namedtuple('SentMessageState', (
'async_result',
'receiver_address',
))
HealthEvents = namedtuple('HealthEvents', (
'event_healthy',
'event_unhealthy',
))
NODE_NETWORK_UNKNOWN = 'unknown'
NODE_NETWORK_UNREACHABLE = 'unreachable'
NODE_NETWORK_REACHABLE = 'reachable'
# GOALS:
# - Each netting channel must have the messages processed in-order, the
# protocol must detect unacknowledged messages and retry them.
# - A queue must not stall because of synchronization problems in other queues.
# - Assuming a queue can stall, the unhealthiness of a node must not be
# inferred from the lack of acknowledgement from a single queue, but healthiness
# may be safely inferred from it.
# - The state of the node must be synchronized among all tasks that are
# handling messages.
def event_first_of(*events):
""" Waits until one of `events` is set.
The event returned is /not/ cleared with any of the `events`, this value
must not be reused if the clearing behavior is used.
"""
first_finished = Event()
if not all(isinstance(e, _AbstractLinkable) for e in events):
raise ValueError('all events must be linkable')
for event in events:
event.rawlink(lambda _: first_finished.set())
return first_finished
def timeout_exponential_backoff(retries, timeout, maximum):
""" Timeouts generator with an exponential backoff strategy.
Timeouts start spaced by `timeout`, after `retries` exponentially increase
the retry delays until `maximum`, then maximum is returned indefinitely.
"""
yield timeout
tries = 1
while tries < retries:
tries += 1
yield timeout
while timeout < maximum:
timeout = min(timeout * 2, maximum)
yield timeout
while True:
yield maximum
def retry(protocol, data, receiver_address, event_stop, timeout_backoff):
""" Send data until it's acknowledged.
Exits when the first of the following happen:
- The packet is acknowledged.
- Event_stop is set.
- The iterator timeout_backoff runs out of values.
Returns:
bool: True if the message was acknowledged, False otherwise.
"""
async_result = protocol.send_raw_with_result(
data,
receiver_address,
)
event_quit = event_first_of(
async_result,
event_stop,
)
for timeout in timeout_backoff:
if event_quit.wait(timeout=timeout) is True:
break
protocol.send_raw_with_result(
data,
receiver_address,
)
return async_result.ready()
def wait_recovery(event_stop, event_healthy):
event_first_of(
event_stop,
event_healthy,
).wait()
if event_stop.is_set():
return
# There may be multiple threads waiting, do not restart them all at
# once to avoid message flood.
gevent.sleep(random.random())
def retry_with_recovery(
protocol,
data,
receiver_address,
event_stop,
event_healthy,
event_unhealthy,
backoff):
""" Send data while the node is healthy until it's acknowledged.
Note:
backoff must be an infinite iterator, otherwise this task will
become a hot loop.
"""
# The underlying unhealthy will be cleared, care must be taken to properly
# clear stop_or_unhealthy too.
stop_or_unhealthy = event_first_of(
event_stop,
event_unhealthy,
)
acknowledged = False
while not event_stop.is_set() and not acknowledged:
# Packets must not be sent to an unhealthy node, nor should the task
# wait for it to become available if the message has been acknowledged.
if event_unhealthy.is_set():
wait_recovery(
event_stop,
event_healthy,
)
# Assume wait_recovery returned because unhealthy was cleared and
# continue execution, this is safe to do because event_stop is
# checked below.
stop_or_unhealthy.clear()
if event_stop.is_set():
return
acknowledged = retry(
protocol,
data,
receiver_address,
# retry will stop when this event is set, allowing this task to
# wait for recovery when the node becomes unhealthy or to quit if
# the stop event is set.
stop_or_unhealthy,
# Intentionally reusing backoff to restart from the last
# timeout/number of iterations.
backoff,
)
return acknowledged
def single_queue_send(
protocol,
receiver_address,
queue,
event_stop,
event_healthy,
event_unhealthy,
message_retries,
message_retry_timeout,
message_retry_max_timeout):
""" Handles a single message queue for `receiver_address`.
Notes:
- This task must be the only consumer of queue.
- This task can be killed at any time, but the intended usage is to stop it
with the event_stop.
- If there are many queues for the same receiver_address, it is the
caller's responsibility to not start them together to avoid congestion.
- This task assumes the endpoint is never cleared after it's first known.
If this assumption changes the code must be updated to handle unknown
addresses.
"""
# A NotifyingQueue is required to implement cancelability, otherwise the
# task cannot be stoped while the greenlet waits for an element to be
# inserted in the queue.
if not isinstance(queue, NotifyingQueue):
raise ValueError('queue must be a NotifyingQueue.')
# Reusing the event, clear must be carefully done
data_or_stop = event_first_of(
queue,
event_stop,
)
# Wait for the endpoint registration or to quit
event_first_of(
event_healthy,
event_stop,
).wait()
while True:
data_or_stop.wait()
if event_stop.is_set():
return
# The queue is not empty at this point, so this won't raise Empty.
# This task being the only consumer is a requirement.
data = queue.peek(block=False)
backoff = timeout_exponential_backoff(
message_retries,
message_retry_timeout,
message_retry_max_timeout,
)
acknowledged = retry_with_recovery(
protocol,
data,
receiver_address,
event_stop,
event_healthy,
event_unhealthy,
backoff,
)
if acknowledged:
queue.get()
# Checking the length of the queue does not trigger a
# context-switch, so it's safe to assume the length of the queue
# won't change under our feet and when a new item will be added the
# event will be set again.
if not queue:
data_or_stop.clear()
if event_stop.is_set():
return
def healthcheck(
protocol,
receiver_address,
event_stop,
event_healthy,
event_unhealthy,
nat_keepalive_retries,
nat_keepalive_timeout,
nat_invitation_timeout,
ping_nonce):
""" Sends a periodical Ping to `receiver_address` to check its health. """
# The state of the node is unknown, the events are set to allow the tasks
# to do work.
protocol.set_node_network_state(
receiver_address,
NODE_NETWORK_UNKNOWN,
)
# Always call `clear` before `set`, since only `set` does context-switches
# it's easier to reason about tasks that are waiting on both events.
# Wait for the end-point registration or for the node to quit
try:
protocol.get_host_port(receiver_address)
except UnknownAddress:
event_healthy.clear()
event_unhealthy.set()
backoff = timeout_exponential_backoff(
nat_keepalive_retries,
nat_keepalive_timeout,
nat_invitation_timeout,
)
sleep = next(backoff)
while not event_stop.wait(sleep):
try:
protocol.get_host_port(receiver_address)
except UnknownAddress:
sleep = next(backoff)
else:
break
# Don't wait to send the first Ping and to start sending messages if the
# endpoint is known
sleep = 0
event_unhealthy.clear()
event_healthy.set()
while not event_stop.wait(sleep):
sleep = nat_keepalive_timeout
ping_nonce['nonce'] += 1
data = protocol.get_ping(
ping_nonce['nonce'],
)
# Send Ping a few times before setting the node as unreachable
acknowledged = retry(
protocol,
data,
receiver_address,
event_stop,
[nat_keepalive_timeout] * nat_keepalive_retries,
)
if event_stop.is_set():
return
if not acknowledged:
# The node is not healthy, clear the event to stop all queue
# tasks
protocol.set_node_network_state(
receiver_address,
NODE_NETWORK_UNREACHABLE,
)
event_healthy.clear()
event_unhealthy.set()
# Retry until recovery, used for:
# - Checking node status.
# - Nat punching.
acknowledged = retry(
protocol,
data,
receiver_address,
event_stop,
repeat(nat_invitation_timeout),
)
if acknowledged:
event_unhealthy.clear()
event_healthy.set()
protocol.set_node_network_state(
receiver_address,
NODE_NETWORK_REACHABLE,
)
class RaidenProtocol(object):
""" Encode the message into a packet and send it.
Each message received is stored by hash and if it is received twice the
previous answer is resent.
Repeat sending messages until an acknowledgment is received or the maximum
number of retries is hit.
"""
def __init__(
self,
transport,
discovery,
raiden,
retry_interval,
retries_before_backoff,
nat_keepalive_retries,
nat_keepalive_timeout,
nat_invitation_timeout):
self.transport = transport
self.discovery = discovery
self.raiden = raiden
self.retry_interval = retry_interval
self.retries_before_backoff = retries_before_backoff
self.nat_keepalive_retries = nat_keepalive_retries
self.nat_keepalive_timeout = nat_keepalive_timeout
self.nat_invitation_timeout = nat_invitation_timeout
self.event_stop = Event()
self.channel_queue = dict() # TODO: Change keys to the channel address
self.greenlets = list()
self.addresses_events = dict()
self.nodeaddresses_networkstatuses = defaultdict(lambda: NODE_NETWORK_UNKNOWN)
# Maps the echohash of received and *sucessfully* processed messages to
# its Ack, used to ignored duplicate messages and resend the Ack.
self.receivedhashes_to_acks = dict()
# Maps the echohash to a SentMessageState
self.senthashes_to_states = dict()
# Maps the addresses to a dict with the latest nonce (using a dict
# because python integers are immutable)
self.nodeaddresses_to_nonces = dict()
cache = cachetools.TTLCache(
maxsize=50,
ttl=CACHE_TTL,
)
cache_wrapper = cachetools.cached(cache=cache)
self.get_host_port = cache_wrapper(discovery.get)
def start(self):
self.transport.start()
def stop_and_wait(self):
# Stop handling incoming packets, but don't close the socket. The
# socket can only be safely closed after all outgoing tasks are stopped
self.transport.stop_accepting()
# Stop processing the outgoing queues
self.event_stop.set()
gevent.wait(self.greenlets)
# All outgoing tasks are stopped. Now it's safe to close the socket. At
# this point there might be some incoming message being processed,
# keeping the socket open is not useful for these.
self.transport.stop()
# Set all the pending results to False
for waitack in self.senthashes_to_states.itervalues():
waitack.async_result.set(False)
def get_health_events(self, receiver_address):
""" Starts a healthcheck taks for `receiver_address` and returns a
HealthEvents with locks to react on its current state.
"""
if receiver_address not in self.addresses_events:
self.start_health_check(receiver_address)
return self.addresses_events[receiver_address]
def start_health_check(self, receiver_address):
""" Starts a task for healthchecking `receiver_address` if there is not
one yet.
"""
if receiver_address not in self.addresses_events:
ping_nonce = self.nodeaddresses_to_nonces.setdefault(
receiver_address,
{'nonce': 0}, # HACK: Allows the task to mutate the object
)
events = HealthEvents(
event_healthy=Event(),
event_unhealthy=Event(),
)
self.addresses_events[receiver_address] = events
self.greenlets.append(gevent.spawn(
healthcheck,
self,
receiver_address,
self.event_stop,
events.event_healthy,
events.event_unhealthy,
self.nat_keepalive_retries,
self.nat_keepalive_timeout,
self.nat_invitation_timeout,
ping_nonce,
))
def get_channel_queue(self, receiver_address, token_address):
key = (
receiver_address,
token_address,
)
if key in self.channel_queue:
return self.channel_queue[key]
queue = NotifyingQueue()
self.channel_queue[key] = queue
events = self.get_health_events(receiver_address)
self.greenlets.append(gevent.spawn(
single_queue_send,
self,
receiver_address,
queue,
self.event_stop,
events.event_healthy,
events.event_unhealthy,
self.retries_before_backoff,
self.retry_interval,
self.retry_interval * 10,
))
if log.isEnabledFor(logging.DEBUG):
log.debug(
'new queue created for',
node=pex(self.raiden.address),
token=pex(token_address),
to=pex(receiver_address),
)
return queue
def send_async(self, receiver_address, message):
if not isaddress(receiver_address):
raise ValueError('Invalid address {}'.format(pex(receiver_address)))
if isinstance(message, (Ack, Ping)):
raise ValueError('Do not use send for Ack or Ping messages')
# Messages that are not unique per receiver can result in hash
# collision, e.g. Secret messages. The hash collision has the undesired
# effect of aborting message resubmission once /one/ of the nodes
# replied with an Ack, adding the receiver address into the echohash to
# avoid these collisions.
messagedata = message.encode()
echohash = sha3(messagedata + receiver_address)
if len(messagedata) > UDP_MAX_MESSAGE_SIZE:
raise ValueError(
'message size exceeds the maximum {}'.format(UDP_MAX_MESSAGE_SIZE)
)
# All messages must be ordered, but only on a per channel basis.
token_address = getattr(message, 'token', '')
# Ignore duplicated messages
if echohash not in self.senthashes_to_states:
async_result = AsyncResult()
self.senthashes_to_states[echohash] = SentMessageState(
async_result,
receiver_address,
)
queue = self.get_channel_queue(
receiver_address,
token_address,
)
if log.isEnabledFor(logging.DEBUG):
log.debug(
'SENDING MESSAGE',
to=pex(receiver_address),
node=pex(self.raiden.address),
message=message,
echohash=pex(echohash),
)
queue.put(messagedata)
else:
waitack = self.senthashes_to_states[echohash]
async_result = waitack.async_result
return async_result
def send_and_wait(self, receiver_address, message, timeout=None):
"""Sends a message and wait for the response ack."""
async_result = self.send_async(receiver_address, message)
return async_result.wait(timeout=timeout)
def maybe_send_ack(self, receiver_address, ack_message):
""" Send ack_message to receiver_address if the transport is running. """
if not isaddress(receiver_address):
raise ValueError('Invalid address {}'.format(pex(receiver_address)))
if not isinstance(ack_message, Ack):
raise ValueError('Use maybe_send_ack only for Ack messages')
messagedata = ack_message.encode()
self.receivedhashes_to_acks[ack_message.echo] = (receiver_address, messagedata)
self._maybe_send_ack(*self.receivedhashes_to_acks[ack_message.echo])
def _maybe_send_ack(self, receiver_address, messagedata):
""" ACK must not go into the queue, otherwise nodes will deadlock
waiting for the confirmation.
"""
host_port = self.get_host_port(receiver_address)
# ACKs are sent at the end of the receive method, after the message is
# sucessfully processed. It may be the case that the server is stopped
# after the message is received but before the ack is sent, under that
# circumstance the udp socket would be unavaiable and then an exception
# is raised.
#
# This check verifies the udp socket is still available before trying
# to send the ack. There must be *no context-switches after this test*.
if self.transport.server.started:
self.transport.send(
self.raiden,
host_port,
messagedata,
)
def get_ping(self, nonce):
""" Returns a signed Ping message.
Note: Ping messages don't have an enforced ordering, so a Ping message
with a higher nonce may be acknowledged first.
"""
message = Ping(nonce)
self.raiden.sign(message)
message_data = message.encode()
return message_data
def send_raw_with_result(self, data, receiver_address):
""" Sends data to receiver_address and returns an AsyncResult that will
be set once the message is acknowledged.
Always returns same AsyncResult instance for equal input.
"""
host_port = self.get_host_port(receiver_address)
echohash = sha3(data + receiver_address)
if echohash not in self.senthashes_to_states:
async_result = AsyncResult()
self.senthashes_to_states[echohash] = SentMessageState(
async_result,
receiver_address,
)
else:
async_result = self.senthashes_to_states[echohash].async_result
if not async_result.ready():
self.transport.send(
self.raiden,
host_port,
data,
)
return async_result
def set_node_network_state(self, node_address, node_state):
self.nodeaddresses_networkstatuses[node_address] = node_state
def receive(self, data):
if len(data) > UDP_MAX_MESSAGE_SIZE:
log.error('receive packet larger than maximum size', length=len(data))
return
# Repeat the ACK if the message has been handled before
echohash = sha3(data + self.raiden.address)
if echohash in self.receivedhashes_to_acks:
return self._maybe_send_ack(*self.receivedhashes_to_acks[echohash])
message = decode(data)
if isinstance(message, Ack):
waitack = self.senthashes_to_states.get(message.echo)
if waitack is None:
if log.isEnabledFor(logging.DEBUG):
log.debug(
'ACK FOR UNKNOWN ECHO',
node=pex(self.raiden.address),
echohash=pex(message.echo),
)
else:
if log.isEnabledFor(logging.DEBUG):
log.debug(
'ACK RECEIVED',
node=pex(self.raiden.address),
receiver=pex(waitack.receiver_address),
echohash=pex(message.echo),
)
waitack.async_result.set(True)
elif isinstance(message, Ping):
if ping_log.isEnabledFor(logging.DEBUG):
ping_log.debug(
'PING RECEIVED',
node=pex(self.raiden.address),
echohash=pex(echohash),
message=message,
sender=pex(message.sender),
)
ack = Ack(
self.raiden.address,
echohash,
)
self.maybe_send_ack(
message.sender,
ack,
)
elif isinstance(message, SignedMessage):
if log.isEnabledFor(logging.INFO):
log.info(
'MESSAGE RECEIVED',
node=pex(self.raiden.address),
echohash=pex(echohash),
message=message,
message_sender=pex(message.sender)
)
try:
self.raiden.on_message(message, echohash)
# only send the Ack if the message was handled without exceptions
ack = Ack(
self.raiden.address,
echohash,
)
try:
if log.isEnabledFor(logging.DEBUG):
log.debug(
'SENDING ACK',
node=pex(self.raiden.address),
to=pex(message.sender),
echohash=pex(echohash),
)
self.maybe_send_ack(
message.sender,
ack,
)
except (InvalidAddress, UnknownAddress) as e:
log.debug("Couldn't send the ACK", e=e)
except (UnknownAddress, InvalidNonce, TransferWhenClosed, TransferUnwanted) as e:
log.DEV('maybe unwanted transfer', e=e)
except (UnknownTokenAddress, InvalidLocksRoot) as e:
if log.isEnabledFor(logging.WARN):
log.warn(str(e))
elif log.isEnabledFor(logging.ERROR):
log.error(
'Invalid message',
message=data.encode('hex'),
)
| tomashaber/raiden | raiden/network/protocol.py | Python | mit | 24,753 | 0.000485 |
# Copyright (C) 2012-2015 ASTRON (Netherlands Institute for Radio Astronomy)
# P.O. Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The LOFAR software suite is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
'''default config for webservice'''
DEBUG = False
JSONIFY_PRETTYPRINT_REGULAR = False
print('default config loaded')
| kernsuite-debian/lofar | SAS/ResourceAssignment/ResourceAssignmentEditor/config/default.py | Python | gpl-3.0 | 967 | 0 |
# -*- coding: utf-8 -*-
import json
import logging
import vobject
from datetime import datetime
from contextlib import contextmanager
from radicale import ical
from yats.shortcuts import get_ticket_model, build_ticket_search_ext, touch_ticket, remember_changes, mail_ticket, jabber_ticket, check_references, add_history, mail_comment, jabber_comment
from yats.models import tickets_reports, UserProfile, get_flow_end, tickets_comments, ticket_resolution, get_default_resolution, convertPrio
from yats.forms import SimpleTickets
from django.contrib.auth.models import AnonymousUser, User
from django.http import QueryDict
from django.conf import settings
from django.utils import timezone
from django.utils.translation import ugettext as _
from djradicale.models import DBProperties
logger = logging.getLogger('djradicale')
ICAL_TYPES = (
ical.Event,
ical.Todo,
ical.Journal,
# ical.Card,
ical.Timezone,
)
class FakeRequest:
def __init__(self):
self.GET = {}
self.POST = {}
self.session = {}
self.user = AnonymousUser()
class Collection(ical.Collection):
@property
def headers(self):
return (
ical.Header('PRODID:-//YATS//NONSGML Radicale Server//EN'),
ical.Header('VERSION:%s' % self.version))
def delete(self):
repid = self._getReportFromUrl(self.path)
tickets_reports.objects.get(pk=repid).delete()
def append(self, name, text):
import pydevd
pydevd.settrace('192.168.33.1', 5678)
new_items = self._parse(text, ICAL_TYPES, name)
timezones = list(filter(
lambda x: x.tag == ical.Timezone.tag, new_items.values()))
request = self._getRequestFromUrl(self.path)
for new_item in new_items.values():
if new_item.tag == ical.Timezone.tag:
continue
if new_item.name not in self.items:
self.items[new_item.name] = new_item
text = ical.serialize(self.tag, self.headers, [new_item] + timezones)
cal = vobject.readOne(text)
# close ticket
if hasattr(cal.vtodo, 'status') and cal.vtodo.status.value == 'COMPLETED':
ticket = get_ticket_model()
try:
flow_end = get_flow_end()
resolution = get_default_resolution()
close_comment = _('closed via CalDAV')
tic = ticket.objects.get(uuid=cal.vtodo.uid.value)
tic.resolution = resolution
tic.closed = True
tic.close_date = timezone.now()
tic.state = flow_end
tic.save(user=request.user)
com = tickets_comments()
com.comment = _('ticket closed - resolution: %(resolution)s\n\n%(comment)s') % {'resolution': resolution.name, 'comment': close_comment}
com.ticket = tic
com.action = 1
com.save(user=request.user)
check_references(request, com)
touch_ticket(request.user, tic.id)
add_history(request, tic, 1, close_comment)
mail_comment(request, com.pk)
jabber_comment(request, com.pk)
except Exception:
pass
# change or new
else:
params = {
'caption': cal.vtodo.summary.value,
'description': cal.vtodo.description.value if hasattr(cal.vtodo, 'description') else None,
'uuid': cal.vtodo.uid.value,
'show_start': cal.vtodo.due.value if hasattr(cal.vtodo, 'due') else None,
'priority': convertPrio(cal.vtodo.priority.value) if hasattr(cal.vtodo, 'priority') else None
}
fakePOST = QueryDict(mutable=True)
fakePOST.update(params)
form = SimpleTickets(fakePOST)
if form.is_valid():
cd = form.cleaned_data
ticket = get_ticket_model()
# change ticket
try:
tic = ticket.objects.get(uuid=cal.vtodo.uid.value)
tic.caption = cd['caption']
tic.description = cd['description']
tic.priority = cd['priority']
# tic.assigned = cd['assigned']
tic.show_start = cd['show_start']
tic.save(user=request.user)
# new ticket
except ticket.DoesNotExist:
tic = ticket()
tic.caption = cd['caption']
tic.description = cd['description']
if 'priority' not in cd or not cd['priority']:
if hasattr(settings, 'KEEP_IT_SIMPLE_DEFAULT_PRIORITY') and settings.KEEP_IT_SIMPLE_DEFAULT_PRIORITY:
tic.priority_id = settings.KEEP_IT_SIMPLE_DEFAULT_PRIORITY
else:
tic.priority = cd['priority']
tic.assigned = request.user
if hasattr(settings, 'KEEP_IT_SIMPLE_DEFAULT_CUSTOMER') and settings.KEEP_IT_SIMPLE_DEFAULT_CUSTOMER:
if settings.KEEP_IT_SIMPLE_DEFAULT_CUSTOMER == -1:
tic.customer = request.organisation
else:
tic.customer_id = settings.KEEP_IT_SIMPLE_DEFAULT_CUSTOME
if hasattr(settings, 'KEEP_IT_SIMPLE_DEFAULT_COMPONENT') and settings.KEEP_IT_SIMPLE_DEFAULT_COMPONENT:
tic.component_id = settings.KEEP_IT_SIMPLE_DEFAULT_COMPONENT
tic.show_start = cd['show_start']
tic.uuid = cal.vtodo.uid.value
tic.save(user=request.user)
if tic.assigned:
touch_ticket(tic.assigned, tic.pk)
for ele in form.changed_data:
form.initial[ele] = ''
remember_changes(request, form, tic)
touch_ticket(request.user, tic.pk)
mail_ticket(request, tic.pk, form, rcpt=settings.TICKET_NEW_MAIL_RCPT, is_api=True)
jabber_ticket(request, tic.pk, form, rcpt=settings.TICKET_NEW_JABBER_RCPT, is_api=True)
else:
raise Exception(form.errors)
def remove(self, name):
pass
def replace(self, name, text):
self.append(name, text)
@property
def text(self):
return ical.serialize(self.tag, self.headers, self.items.values())
@classmethod
def children(cls, path):
"""Yield the children of the collection at local ``path``."""
request = cls._getRequestFromUrl(path)
children = list(tickets_reports.objects.filter(active_record=True, c_user=request.user).values_list('slug', flat=True))
children = ['%s/%s.ics' % (request.user.username, itm) for itm in children]
return map(cls, children)
@classmethod
def is_node(cls, path):
"""Return ``True`` if relative ``path`` is a node.
A node is a WebDAV collection whose members are other collections.
"""
request = cls._getRequestFromUrl(path)
if path == request.user.username:
return True
else:
return False
@classmethod
def is_leaf(cls, path):
"""Return ``True`` if relative ``path`` is a leaf.
A leaf is a WebDAV collection whose members are not collections.
"""
result = False
if '.ics' in path:
try:
request = cls._getRequestFromUrl(path)
rep = tickets_reports.objects.get(active_record=True, pk=cls._getReportFromUrl(path))
tic = get_ticket_model().objects.select_related('type', 'state', 'assigned', 'priority', 'customer').all()
search_params, tic = build_ticket_search_ext(request, tic, json.loads(rep.search))
result = (tic.exists())
except Exception:
import sys
a = sys.exc_info()
return result
@property
def last_modified(self):
try:
request = self._getRequestFromUrl(self.path)
rep = tickets_reports.objects.get(active_record=True, pk=self._getReportFromUrl(self.path))
tic = get_ticket_model().objects.select_related('type', 'state', 'assigned', 'priority', 'customer').all()
search_params, tic = build_ticket_search_ext(request, tic, json.loads(rep.search))
date = tic.latest('u_date')
return datetime.strftime(
date.last_action_date, '%a, %d %b %Y %H:%M:%S %z')
except Exception:
import sys
a = sys.exc_info()
@property
def tag(self):
with self.props as props:
if 'tag' not in props:
props['tag'] = 'VCALENDAR'
return props['tag']
@property
@contextmanager
def props(self):
# On enter
properties = {}
try:
props = DBProperties.objects.get(path=self.path)
except DBProperties.DoesNotExist:
pass
else:
properties.update(json.loads(props.text))
old_properties = properties.copy()
yield properties
# On exit
if old_properties != properties:
props, created = DBProperties.objects.get_or_create(path=self.path)
props.text = json.dumps(properties)
props.save()
@property
def items(self):
itms = {}
try:
request = self._getRequestFromUrl(self.path)
if self.path == request.user.username:
return itms
rep = tickets_reports.objects.get(active_record=True, pk=self._getReportFromUrl(self.path))
tic = get_ticket_model().objects.select_related('type', 'state', 'assigned', 'priority', 'customer').all()
search_params, tic = build_ticket_search_ext(request, tic, json.loads(rep.search))
for item in tic:
text = self._itemToICal(item)
itms.update(self._parse(text, ICAL_TYPES))
except Exception:
import sys
a = sys.exc_info()
return itms
@classmethod
def _getRequestFromUrl(cls, path):
user = path.split('/')[0]
request = FakeRequest()
request.user = User.objects.get(username=user)
request.organisation = UserProfile.objects.get(user=request.user).organisation
return request
@classmethod
def _getReportFromUrl(cls, path):
if '.ics' in path:
file = path.split('/')[-1]
file = file.replace('.ics', '')
repid = tickets_reports.objects.get(active_record=True, slug=file).pk
return repid
return 0
@classmethod
def _itemToICal(cls, item):
cal = vobject.iCalendar()
cal.add('vtodo')
cal.vtodo.add('summary').value = item.caption
cal.vtodo.add('uid').value = str(item.uuid)
cal.vtodo.add('created').value = item.c_date
if item.closed:
cal.vtodo.add('status').value = 'COMPLETED'
if item.priority:
cal.vtodo.add('priority').value = str(item.priority.caldav)
else:
cal.vtodo.add('priority').value = '0'
if item.description:
cal.vtodo.add('description').value = item.description
if item.show_start:
# cal.vtodo.add('dstart').value = item.show_start
cal.vtodo.add('due').value = item.show_start
cal.vtodo.add('valarm')
cal.vtodo.valarm.add('uuid').value = '%s-%s' % (str(item.uuid), item.pk)
cal.vtodo.valarm.add('x-wr-alarmuid').value = '%s-%s' % (str(item.uuid), item.pk)
cal.vtodo.valarm.add('action').value = 'DISPLAY'
# cal.vtodo.valarm.add('x-apple-proximity').value = 'DEPART'
cal.vtodo.valarm.add('description').value = 'Erinnerung an ein Ereignis'
# cal.vtodo.valarm.add('trigger').value =
# TRIGGER;VALUE=DATE-TIME:20180821T200000Z
cal.vtodo.add('x-radicale-name').value = '%s.ics' % str(item.uuid)
return cal.serialize()
| mediafactory/yats | modules/yats/caldav/storage.py | Python | mit | 12,605 | 0.002697 |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from mcfw.properties import bool_property, unicode_list_property, unicode_property, typed_property
class BankDataTO(object):
bankCode = unicode_property('bankCode')
name = unicode_property('name')
bic = unicode_property('bic')
class OpenIbanResultTO(object):
valid = bool_property('valid')
messages = unicode_list_property('message')
iban = unicode_property('iban')
bankData = typed_property('bankData', BankDataTO) # type: BankDataTO
checkResults = typed_property('checkResults', dict)
| our-city-app/oca-backend | src/rogerthat/bizz/payment/to.py | Python | apache-2.0 | 1,164 | 0.000859 |
# -*- coding: utf-8 -*-
# Copyright(C) 2012 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from .module import CmsoModule
__all__ = ['CmsoModule']
| laurent-george/weboob | modules/cmso/__init__.py | Python | agpl-3.0 | 788 | 0 |
import datetime
import itertools
import re
import urllib2
import mimetypes
import operator
import logging
import sys
import traceback
import warnings
import tagging
import tagging.models
import vidscraper
from bs4 import BeautifulSoup
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.comments.moderation import CommentModerator, moderator
from django.contrib.sites.models import Site
from django.contrib.contenttypes import generic
from django.core.exceptions import ValidationError
from django.core.mail import EmailMessage
from django.core.signals import request_finished
from django.core.validators import ipv4_re
from django.db import models
from django.template import Context, loader
from django.utils.html import escape as html_escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from haystack import connections, connection_router
from mptt.models import MPTTModel
from notification import models as notification
from slugify import slugify
from localtv import utils, settings as lsettings
from localtv.managers import SiteRelatedManager, VideoManager
from localtv.signals import post_video_from_vidscraper, submit_finished
from localtv.templatetags.filters import sanitize
VIDEO_SERVICE_REGEXES = (
('YouTube', r'http://gdata\.youtube\.com/feeds/'),
('YouTube', r'http://(www\.)?youtube\.com/'),
('blip.tv', r'http://(.+\.)?blip\.tv/'),
('Vimeo', r'http://(www\.)?vimeo\.com/'),
('Dailymotion', r'http://(www\.)?dailymotion\.com/rss'))
class Thumbnailable(models.Model):
"""
A type of Model that has thumbnails generated for it. Now that we're using
Daguerre for thumbnails, this is just for backwards compatibility.
"""
# we set this to "logo" for SiteSettings, 'icon' for WidgetSettings
thumbnail_attribute = 'thumbnail'
class Meta:
abstract = True
@property
def has_thumbnail(self):
warnings.warn("has_thumbnail is deprecated and will be removed in a "
"future version.", DeprecationWarning)
return bool(getattr(self, self.thumbnail_attribute))
@property
def thumbnail_path(self):
warnings.warn("thumbnail_path is deprecated and will be removed in a "
"future version.", DeprecationWarning)
thumb_file = getattr(self, self.thumbnail_attribute)
if thumb_file:
return thumb_file.name
else:
return ''
class SiteSettings(Thumbnailable):
"""
A model for storing Site-specific settings (feature switches, custom HTML
and CSS, etc) in the database rather than in settings files. Most of
these can thus be set by site admins rather than sysadmins. There are
also a few fields for storing site event state.
"""
thumbnail_attribute = 'logo'
#: Link to the Site these settings are for.
site = models.OneToOneField(Site)
## Site styles ##
#: Custom logo image for this site.
logo = models.ImageField(upload_to=utils.UploadTo('localtv/sitesettings/logo/%Y/%m/%d/'), blank=True)
#: Custom background image for this site.
background = models.ImageField(upload_to=utils.UploadTo('localtv/sitesettings/background/%Y/%m/%d/'),
blank=True)
#: Arbitrary custom css overrides.
css = models.TextField(blank=True)
## Custom HTML ##
#: Subheader for the site.
tagline = models.CharField(max_length=4096, blank=True)
#: Arbitrary custom HTML which (currently) is used as a site description
#: on the main page.
sidebar_html = models.TextField(blank=True)
#: Arbitrary custom HTML which displays in the footer of all non-admin pages.
footer_html = models.TextField(blank=True)
#: Arbitrary custom HTML which displays on the about page.
about_html = models.TextField(blank=True)
## Site permissions ##
#: A collection of Users who have administrative access to the site.
admins = models.ManyToManyField('auth.User', blank=True,
related_name='admin_for')
#: Whether or not the Submit Video button should display or not.
#: Doesn't affect whether videos can be submitted or not.
#: See http://bugzilla.pculture.org/show_bug.cgi?id=19809
display_submit_button = models.BooleanField(default=True)
#: Whether or not users need to log in to submit videos.
submission_requires_login = models.BooleanField(default=False)
#: Whether or not an email address needs to be given with an
#: unauthenticated video submission.
submission_requires_email = models.BooleanField(default=False)
## Feature switches ##
#: Whether playlist functionality is enabled.
playlists_enabled = models.IntegerField(default=1)
#: Whether the original publication date or date added to this site
#: should be used for sorting videos.
use_original_date = models.BooleanField(
default=True,
help_text="If set, use the original date the video was posted. "
"Otherwise, use the date the video was added to this site.")
#: Whether comments should be held for moderation.
screen_all_comments = models.BooleanField(
verbose_name='Hold comments for moderation',
default=True,
help_text="Hold all comments for moderation by default?")
#: Whether leaving a comment requires you to be logged in.
comments_required_login = models.BooleanField(
default=False,
verbose_name="Require Login",
help_text="If True, comments require the user to be logged in.")
## Tracking fields ##
#: Whether a user has elected to hide the "get started" section in
#: the admin interface.
hide_get_started = models.BooleanField(default=False)
objects = SiteRelatedManager()
def __unicode__(self):
return u'%s (%s)' % (self.site.name, self.site.domain)
def user_is_admin(self, user):
"""
Return True if the given User is an admin for this SiteSettings.
"""
if not user.is_authenticated() or not user.is_active:
return False
if user.is_superuser:
return True
return self.admins.filter(pk=user.pk).exists()
def should_show_dashboard(self):
"""Returns True for backwards-compatibility."""
warnings.warn("should_show_dashboard is deprecated and will be "
"removed in a future version.", DeprecationWarning)
return True
class WidgetSettingsManager(SiteRelatedManager):
def _new_entry(self, site, using):
ws = super(WidgetSettingsManager, self)._new_entry(site, using)
try:
site_settings = SiteSettings.objects.get_cached(site, using)
except SiteSettings.DoesNotExist:
pass
else:
if site_settings.logo:
site_settings.logo.open()
ws.icon = site_settings.logo
ws.save()
return ws
class WidgetSettings(Thumbnailable):
"""
A Model which represents the options for controlling the widget creator.
"""
thumbnail_attribute = 'icon'
site = models.OneToOneField(Site)
title = models.CharField(max_length=250, blank=True)
title_editable = models.BooleanField(default=True)
icon = models.ImageField(upload_to=utils.UploadTo('localtv/widgetsettings/icon/%Y/%m/%d/'), blank=True)
icon_editable = models.BooleanField(default=False)
css = models.FileField(upload_to=utils.UploadTo('localtv/widgetsettings/css/%Y/%m/%d/'), blank=True)
css_editable = models.BooleanField(default=False)
bg_color = models.CharField(max_length=20, blank=True)
bg_color_editable = models.BooleanField(default=False)
text_color = models.CharField(max_length=20, blank=True)
text_color_editable = models.BooleanField(default=False)
border_color = models.CharField(max_length=20, blank=True)
border_color_editable = models.BooleanField(default=False)
objects = WidgetSettingsManager()
def get_title_or_reasonable_default(self):
# Is the title worth using? If so, use that.
use_title = True
if self.title.endswith('example.com'):
use_title = False
if not self.title:
use_title = False
# Okay, so either we return the title, or a sensible default
if use_title:
return html_escape(self.title)
return self.generate_reasonable_default_title()
def generate_reasonable_default_title(self):
prefix = 'Watch Videos on %s'
# Now, work on calculating what goes at the end.
site = Site.objects.get_current()
# The default suffix is a self-link. If the site name and
# site domain are plausible, do that.
if ((site.name and site.name.lower() != 'example.com') and
(site.domain and site.domain.lower() != 'example.com')):
suffix = '<a href="http://%s/">%s</a>' % (
site.domain, html_escape(site.name))
# First, we try the site name, if that's a nice string.
elif site.name and site.name.lower() != 'example.com':
suffix = site.name
# Else, we try the site domain, if that's not example.com
elif site.domain.lower() != 'example.com':
suffix = site.domain
else:
suffix = 'our video site'
return prefix % suffix
class Source(Thumbnailable):
"""
An abstract base class to represent things which are sources of multiple
videos. Current subclasses are Feed and SavedSearch.
"""
id = models.AutoField(primary_key=True)
site = models.ForeignKey(Site)
thumbnail = models.ImageField(upload_to=utils.UploadTo('localtv/source/thumbnail/%Y/%m/%d/'),
blank=True)
auto_approve = models.BooleanField(default=False)
auto_update = models.BooleanField(default=True,
help_text=_("If selected, new videos will"
" automatically be imported "
"from this source."))
user = models.ForeignKey('auth.User', null=True, blank=True)
auto_categories = models.ManyToManyField("Category", blank=True)
auto_authors = models.ManyToManyField("auth.User", blank=True,
related_name='auto_%(class)s_set')
class Meta:
abstract = True
def update(self, video_iter, source_import, clear_rejected=False):
"""
Imports videos from a feed/search. `videos` is an iterable which
returns :class:`vidscraper.videos.Video` objects. We use
:method:`.Video.from_vidscraper_video` to map the Vidscraper fields to
Video attributes.
If ``clear_rejected`` is ``True``, rejected versions of videos that are
found in the ``video_iter`` will be deleted and re-imported.
"""
author_pks = list(self.auto_authors.values_list('pk', flat=True))
category_pks = list(self.auto_categories.values_list('pk', flat=True))
import_opts = source_import.__class__._meta
from localtv.tasks import video_from_vidscraper_video, mark_import_pending
total_videos = 0
try:
for vidscraper_video in video_iter:
total_videos += 1
try:
video_from_vidscraper_video.delay(
vidscraper_video.serialize(),
site_pk=self.site_id,
import_app_label=import_opts.app_label,
import_model=import_opts.module_name,
import_pk=source_import.pk,
status=Video.PENDING,
author_pks=author_pks,
category_pks=category_pks,
clear_rejected=clear_rejected)
except Exception:
source_import.handle_error(
'Import task creation failed for %r' % (
vidscraper_video.url,),
is_skip=True,
with_exception=True)
except Exception:
source_import.fail(with_exception=True)
return
source_import.__class__._default_manager.filter(
pk=source_import.pk
).update(
total_videos=total_videos
)
mark_import_pending.delay(import_app_label=import_opts.app_label,
import_model=import_opts.module_name,
import_pk=source_import.pk)
class Feed(Source):
"""
Feed to pull videos in from.
If the same feed is used on two different sites, they will require two
separate entries here.
Fields:
- feed_url: The location of this field
- site: which site this feed belongs to
- name: human readable name for this feed
- webpage: webpage that this feed\'s content is associated with
- description: human readable description of this item
- last_updated: last time we ran self.update_items()
- when_submitted: when this feed was first registered on this site
- status: one of Feed.STATUS_CHOICES
- etag: used to see whether or not the feed has changed since our last
update.
- auto_approve: whether or not to set all videos in this feed to approved
during the import process
- user: a user that submitted this feed, if any
- auto_categories: categories that are automatically applied to videos on
import
- auto_authors: authors that are automatically applied to videos on
import
"""
INACTIVE = 0
ACTIVE = 1
STATUS_CHOICES = (
(INACTIVE, _(u'Inactive')),
(ACTIVE, _(u'Active')),
)
feed_url = models.URLField(verify_exists=False)
name = models.CharField(max_length=250)
webpage = models.URLField(verify_exists=False, blank=True)
description = models.TextField(blank=True)
last_updated = models.DateTimeField()
when_submitted = models.DateTimeField(auto_now_add=True)
etag = models.CharField(max_length=250, blank=True)
calculated_source_type = models.CharField(max_length=255, blank=True, default='')
status = models.IntegerField(choices=STATUS_CHOICES, default=INACTIVE)
class Meta:
unique_together = (
('feed_url', 'site'))
get_latest_by = 'last_updated'
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('localtv_list_feed', [self.pk])
def update(self, **kwargs):
"""
Fetch and import new videos from this feed.
"""
try:
FeedImport.objects.get(source=self,
status=FeedImport.STARTED)
except FeedImport.DoesNotExist:
pass
else:
logging.info('Skipping import of %s: already in progress' % self)
return
feed_import = FeedImport.objects.create(source=self,
auto_approve=self.auto_approve)
video_iter = vidscraper.auto_feed(
self.feed_url,
max_results=None if self.status == self.INACTIVE else 100,
api_keys=lsettings.API_KEYS,
)
try:
video_iter.load()
except Exception:
feed_import.fail("Data loading failed for {source}",
with_exception=True)
return
self.etag = getattr(video_iter, 'etag', None) or ''
self.last_updated = datetime.datetime.now()
if self.status == self.INACTIVE:
# If these fields have already been changed, don't
# override those changes. Don't unset the name field
# if no further data is available.
if self.name == self.feed_url:
self.name = video_iter.title or self.name
if not self.webpage:
self.webpage = video_iter.webpage or ''
if not self.description:
self.description = video_iter.description or ''
self.save()
super(Feed, self).update(video_iter, source_import=feed_import,
**kwargs)
def source_type(self):
return self.calculated_source_type
def _calculate_source_type(self):
video_service = self.video_service()
if video_service is None:
return u'Feed'
else:
return u'User: %s' % video_service
def video_service(self):
for service, regexp in VIDEO_SERVICE_REGEXES:
if re.search(regexp, self.feed_url, re.I):
return service
def pre_save_set_calculated_source_type(instance, **kwargs):
# Always save the calculated_source_type
instance.calculated_source_type = instance._calculate_source_type()
# Plus, if the name changed, we have to recalculate all the Videos that depend on us.
try:
v = Feed.objects.get(id=instance.id)
except Feed.DoesNotExist:
return instance
if v.name != instance.name:
# recalculate all the sad little videos' calculated_source_type
for vid in instance.video_set.all():
vid.save()
models.signals.pre_save.connect(pre_save_set_calculated_source_type,
sender=Feed)
class Category(MPTTModel):
"""
A category for videos to be contained in.
Categories and tags aren't too different functionally, but categories are
more strict as they can't be defined by visitors. Categories can also be
hierarchical.
Fields:
- site: A link to the django.contrib.sites.models.Site object this object
is bound to
- name: Name of this category
- slug: a slugified verison of the name, used to create more friendly URLs
- logo: An image to associate with this category
- description: human readable description of this item
- parent: Reference to another Category. Allows you to have heirarchical
categories.
"""
site = models.ForeignKey(Site)
name = models.CharField(
max_length=80, verbose_name='Category Name',
help_text=_("The name is used to identify the category almost "
"everywhere; for example, under a video or in a "
"category widget."))
slug = models.SlugField(
verbose_name='Category Slug',
help_text=_("The \"slug\" is the URL-friendly version of the name. It "
"is usually lower-case and contains only letters, numbers "
"and hyphens."))
logo = models.ImageField(
upload_to=utils.UploadTo('localtv/category/logo/%Y/%m/%d/'),
blank=True,
verbose_name='Thumbnail/Logo',
help_text=_("Optional. For example: a leaf for 'environment' or the "
"logo of a university department."))
description = models.TextField(
blank=True, verbose_name='Description (HTML)',
help_text=_("Optional. The description is not prominent by default, but"
" some themes may show it."))
parent = models.ForeignKey(
'self', blank=True, null=True,
related_name='child_set',
verbose_name='Category Parent',
help_text=_("Categories, unlike tags, can have a hierarchy."))
class MPTTMeta:
order_insertion_by = ['name']
class Meta:
unique_together = (
('slug', 'site'),
('name', 'site'))
def __unicode__(self):
return self.name
def dashes(self):
"""
Returns a string of em dashes equal to the :class:`Category`\ 's
level. This is used to indent the category name in the admin
templates.
"""
return mark_safe('—' * self.level)
@models.permalink
def get_absolute_url(self):
return ('localtv_category', [self.slug])
def approved_set(self):
"""
Returns active videos for the category and its subcategories, ordered
by decreasing best date.
"""
opts = self._mptt_meta
lookups = {
'status': Video.ACTIVE,
'categories__left__gte': getattr(self, opts.left_attr),
'categories__left__lte': getattr(self, opts.right_attr),
'categories__tree_id': getattr(self, opts.tree_id_attr)
}
lookups = self._tree_manager._translate_lookups(**lookups)
return Video.objects.filter(**lookups).distinct()
approved_set = property(approved_set)
def unique_error_message(self, model_class, unique_check):
return 'Category with this %s already exists.' % (
unique_check[0],)
class SavedSearch(Source):
"""
A set of keywords to regularly pull in new videos from.
There's an administrative interface for doing "live searches"
Fields:
- site: site this savedsearch applies to
- query_string: a whitespace-separated list of words to search for. Words
starting with a dash will be processed as negative query terms
- when_created: date and time that this search was saved.
"""
query_string = models.TextField()
when_created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.query_string
def update(self, **kwargs):
"""
Fetch and import new videos from this search.
"""
try:
SearchImport.objects.get(source=self,
status=SearchImport.STARTED)
except SearchImport.DoesNotExist:
pass
else:
logging.info('Skipping import of %s: already in progress' % self)
return
search_import = SearchImport.objects.create(
source=self,
auto_approve=self.auto_approve
)
searches = vidscraper.auto_search(
self.query_string,
max_results=100,
api_keys=lsettings.API_KEYS,
)
video_iters = []
for video_iter in searches:
try:
video_iter.load()
except Exception:
search_import.handle_error(u'Skipping import of search results '
u'from %s' % video_iter.__class__.__name__,
with_exception=True)
continue
video_iters.append(video_iter)
if video_iters:
super(SavedSearch, self).update(itertools.chain(*video_iters),
source_import=search_import,
**kwargs)
else:
# Mark the import as failed if none of the searches could load.
search_import.fail("All searches failed for {source}",
with_exception=False)
def source_type(self):
return u'Search'
class SourceImportIndex(models.Model):
video = models.OneToOneField('Video', unique=True)
index = models.PositiveIntegerField(blank=True, null=True)
class Meta:
abstract = True
class FeedImportIndex(SourceImportIndex):
source_import = models.ForeignKey('FeedImport', related_name='indexes')
class SearchImportIndex(SourceImportIndex):
source_import = models.ForeignKey('SearchImport', related_name='indexes')
class SourceImportError(models.Model):
message = models.TextField()
traceback = models.TextField(blank=True)
is_skip = models.BooleanField(help_text="Whether this error represents a "
"video that was skipped.")
datetime = models.DateTimeField(auto_now_add=True)
class Meta:
abstract = True
class FeedImportError(SourceImportError):
source_import = models.ForeignKey('FeedImport', related_name='errors')
class SearchImportError(SourceImportError):
source_import = models.ForeignKey('SearchImport', related_name='errors')
class SourceImport(models.Model):
STARTED = 'started'
PENDING = 'pending'
COMPLETE = 'complete'
FAILED = 'failed'
STATUS_CHOICES = (
(STARTED, _('Started')),
(PENDING, _('Pending haystack updates')),
(COMPLETE, _('Complete')),
(FAILED, _('Failed'))
)
start = models.DateTimeField(auto_now_add=True)
last_activity = models.DateTimeField(blank=True, null=True)
total_videos = models.PositiveIntegerField(blank=True, null=True)
videos_imported = models.PositiveIntegerField(default=0)
videos_skipped = models.PositiveIntegerField(default=0)
#: Caches the auto_approve of the search on the import, so that the imported
#: videos can be approved en masse at the end of the import based on the
#: settings at the beginning of the import.
auto_approve = models.BooleanField()
status = models.CharField(max_length=10, choices=STATUS_CHOICES,
default=STARTED)
class Meta:
get_latest_by = 'start'
ordering = ['-start']
abstract = True
def is_running(self):
"""
Returns True if the SourceImport is currently running.
"""
return self.status in (self.STARTED, self.PENDING)
def set_video_source(self, video):
"""
Sets the value of the correct field on the ``video`` to mark it as
having the same source as this import. Must be implemented by
subclasses.
"""
raise NotImplementedError
def get_videos(self):
raise NotImplementedError
def handle_error(self, message, is_skip=False, with_exception=False):
"""
Logs the error with the default logger and to the database.
:param message: A human-friendly description of the error that does
not contain sensitive information.
:param is_skip: ``True`` if the error results in a video being skipped.
Default: False.
:param with_exception: ``True`` if exception information should be
recorded. Default: False.
:param using: The database to use. Default: 'default'.
"""
if with_exception:
exc_info = sys.exc_info()
logging.warn(message, exc_info=exc_info)
tb = ''.join(traceback.format_exception(*exc_info))
else:
logging.warn(message)
tb = ''
self.errors.create(message=message,
source_import=self,
traceback=tb,
is_skip=is_skip)
if is_skip:
self.__class__._default_manager.filter(pk=self.pk
).update(videos_skipped=models.F('videos_skipped') + 1)
def get_index_creation_kwargs(self, video, vidscraper_video):
return {
'source_import': self,
'video': video,
'index': vidscraper_video.index
}
def handle_video(self, video, vidscraper_video):
"""
Creates an index instance connecting the video to this import.
:param video: The :class:`Video` instance which was imported.
:param vidscraper_video: The original video from :mod:`vidscraper`.
:param using: The database alias to use. Default: 'default'
"""
self.indexes.create(
**self.get_index_creation_kwargs(video, vidscraper_video))
self.__class__._default_manager.filter(pk=self.pk
).update(videos_imported=models.F('videos_imported') + 1)
def fail(self, message="Import failed for {source}", with_exception=False):
"""
Mark an import as failed, along with some post-fail cleanup.
"""
self.status = self.FAILED
self.last_activity = datetime.datetime.now()
self.save()
self.handle_error(message.format(source=self.source),
with_exception=with_exception)
self.get_videos().delete()
class FeedImport(SourceImport):
source = models.ForeignKey(Feed, related_name='imports')
def set_video_source(self, video):
video.feed_id = self.source_id
def get_videos(self):
return Video.objects.filter(feedimportindex__source_import=self)
class SearchImport(SourceImport):
source = models.ForeignKey(SavedSearch, related_name='imports')
def set_video_source(self, video):
video.search_id = self.source_id
def get_videos(self):
return Video.objects.filter(searchimportindex__source_import=self)
class Video(Thumbnailable):
"""
Fields:
- name: Name of this video
- site: Site this video is attached to
- description: Video description
- tags: A list of Tag objects associated with this item
- categories: Similar to Tags
- authors: the person/people responsible for this video
- file_url: The file this object points to (if any) ... if not
provided, at minimum we need the embed_code for the item.
- file_url_length: size of the file, in bytes
- file_url_mimetype: mimetype of the file
- when_submitted: When this item was first entered into the
database
- when_approved: When this item was marked to appear publicly on
the site
- when_published: When this file was published at its original
source (if known)
- last_featured: last time this item was featured.
- status: one of Video.STATUS_CHOICES
- feed: which feed this item came from (if any)
- website_url: The page that this item is associated with.
- embed_code: code used to embed this item.
- flash_enclosure_url: Crappy enclosure link that doesn't
actually point to a url.. the kind crappy flash video sites
give out when they don't actually want their enclosures to
point to video files.
- guid: data used to identify this video
- thumbnail_url: url to the thumbnail, if such a thing exists
- user: if not None, the user who submitted this video
- search: if not None, the SavedSearch from which this video came
- video_service_user: if not blank, the username of the user on the video
service who owns this video. We can figure out the service from the
website_url.
- contact: a free-text field for anonymous users to specify some contact
info
- notes: a free-text field to add notes about the video
"""
UNAPPROVED = 0
ACTIVE = 1
REJECTED = 2
PENDING = 3
STATUS_CHOICES = (
(UNAPPROVED, _(u'Unapproved')),
(ACTIVE, _(u'Active')),
(REJECTED, _(u'Rejected')),
(PENDING, _(u'Waiting on import to finish')),
)
site = models.ForeignKey(Site)
name = models.CharField(verbose_name="Video Name", max_length=250)
description = models.TextField(verbose_name="Video Description (optional)",
blank=True)
thumbnail_url = models.URLField(verbose_name="Thumbnail URL (optional)",
verify_exists=False, blank=True,
max_length=400)
thumbnail = models.ImageField(upload_to=utils.UploadTo('localtv/video/thumbnail/%Y/%m/%d/'),
blank=True)
categories = models.ManyToManyField(Category, blank=True)
authors = models.ManyToManyField('auth.User', blank=True,
related_name='authored_set')
file_url = models.URLField(verify_exists=False, blank=True,
max_length=2048)
file_url_length = models.IntegerField(null=True, blank=True)
file_url_mimetype = models.CharField(max_length=60, blank=True)
when_modified = models.DateTimeField(auto_now=True,
db_index=True,
default=datetime.datetime.now)
when_submitted = models.DateTimeField(auto_now_add=True)
when_approved = models.DateTimeField(null=True, blank=True)
when_published = models.DateTimeField(null=True, blank=True)
last_featured = models.DateTimeField(null=True, blank=True)
status = models.IntegerField(choices=STATUS_CHOICES, default=UNAPPROVED)
feed = models.ForeignKey(Feed, null=True, blank=True)
website_url = models.URLField(
verbose_name='Original Video Page URL (optional)',
max_length=2048,
verify_exists=False,
blank=True)
embed_code = models.TextField(verbose_name="Video <embed> code", blank=True)
flash_enclosure_url = models.URLField(verify_exists=False, max_length=2048,
blank=True)
guid = models.CharField(max_length=250, blank=True)
user = models.ForeignKey('auth.User', null=True, blank=True)
search = models.ForeignKey(SavedSearch, null=True, blank=True)
video_service_user = models.CharField(max_length=250, blank=True)
video_service_url = models.URLField(verify_exists=False, blank=True)
contact = models.CharField(verbose_name='Email (optional)', max_length=250,
blank=True)
notes = models.TextField(verbose_name='Notes (optional)', blank=True)
calculated_source_type = models.CharField(max_length=255, blank=True, default='')
objects = VideoManager()
taggeditem_set = generic.GenericRelation(tagging.models.TaggedItem,
content_type_field='content_type',
object_id_field='object_id')
class Meta:
ordering = ['-when_submitted']
get_latest_by = 'when_modified'
def __unicode__(self):
return self.name
def clean(self):
# clean is always run during ModelForm cleaning. If a model form is in
# play, rejected videos don't matter; the submission of that form
# should be considered valid. During automated imports, rejected
# videos are not excluded.
self._check_for_duplicates(exclude_rejected=True)
def _check_for_duplicates(self, exclude_rejected=True):
if not self.embed_code and not self.file_url:
raise ValidationError("Video has no embed code or file url.")
qs = Video.objects.filter(site=self.site_id)
if exclude_rejected:
qs = qs.exclude(status=Video.REJECTED)
if self.pk is not None:
qs = qs.exclude(pk=self.pk)
if self.guid and qs.filter(guid=self.guid).exists():
raise ValidationError("Another video with the same guid "
"already exists.")
if (self.website_url and
qs.filter(website_url=self.website_url).exists()):
raise ValidationError("Another video with the same website url "
"already exists.")
if self.file_url and qs.filter(file_url=self.file_url).exists():
raise ValidationError("Another video with the same file url "
"already exists.")
def clear_rejected_duplicates(self):
"""
Deletes rejected copies of this video based on the file_url,
website_url, and guid fields.
"""
if not any((self.website_url, self.file_url, self.guid)):
return
q_filter = models.Q()
if self.website_url:
q_filter |= models.Q(website_url=self.website_url)
if self.file_url:
q_filter |= models.Q(file_url=self.file_url)
if self.guid:
q_filter |= models.Q(guid=self.guid)
qs = Video.objects.filter(
site=self.site_id,
status=Video.REJECTED).filter(q_filter)
qs.delete()
@models.permalink
def get_absolute_url(self):
return ('localtv_view_video', (),
{'video_id': self.id,
'slug': slugify(self.name)[:30]})
def save(self, **kwargs):
"""
Adds support for an ```update_index`` kwarg, defaulting to ``True``.
If this kwarg is ``False``, then no index updates will be run by the
search index.
"""
# This actually relies on logic in
# :meth:`QueuedSearchIndex._enqueue_instance`
self._update_index = kwargs.pop('update_index', True)
super(Video, self).save(**kwargs)
save.alters_data = True
@classmethod
def from_vidscraper_video(cls, video, status=None, commit=True,
source_import=None, site_pk=None, authors=None,
categories=None, update_index=True):
"""
Builds a :class:`Video` instance from a
:class:`vidscraper.videos.Video` instance. If `commit` is False,
the :class:`Video` will not be saved, and the created instance will have
a `save_m2m()` method that must be called after you call `save()`.
"""
video_file = video.get_file()
if video_file and video_file.expires is None:
file_url = video_file.url
else:
file_url = None
if status is None:
status = cls.UNAPPROVED
if site_pk is None:
site_pk = settings.SITE_ID
now = datetime.datetime.now()
instance = cls(
guid=video.guid or '',
name=video.title or '',
description=video.description or '',
website_url=video.link or '',
when_published=video.publish_datetime,
file_url=file_url or '',
file_url_mimetype=getattr(video_file, 'mime_type', '') or '',
file_url_length=getattr(video_file, 'length', None),
when_submitted=now,
when_approved=now if status == cls.ACTIVE else None,
status=status,
thumbnail_url=video.thumbnail_url or '',
embed_code=video.embed_code or '',
flash_enclosure_url=video.flash_enclosure_url or '',
video_service_user=video.user or '',
video_service_url=video.user_url or '',
site_id=site_pk
)
if instance.description:
soup = BeautifulSoup(video.description)
for tag in soup.find_all(
'div', {'class': "miro-community-description"}):
instance.description = unicode(tag)
break
instance.description = sanitize(instance.description,
extra_filters=['img'])
instance._vidscraper_video = video
if source_import is not None:
source_import.set_video_source(instance)
def save_m2m():
if authors:
instance.authors = authors
if video.user:
name = video.user
if ' ' in name:
first, last = name.split(' ', 1)
else:
first, last = name, ''
author, created = User.objects.get_or_create(
username=name[:30],
defaults={'first_name': first[:30],
'last_name': last[:30]})
if created:
author.set_unusable_password()
author.save()
utils.get_profile_model()._default_manager.create(
user=author, website=video.user_url or '')
instance.authors.add(author)
if categories:
instance.categories = categories
if video.tags:
if settings.FORCE_LOWERCASE_TAGS:
fix = lambda t: t.lower().strip()
else:
fix = lambda t: t.strip()
tags = set(fix(tag) for tag in video.tags if tag.strip())
for tag_name in tags:
tag, created = \
tagging.models.Tag._default_manager.get_or_create(name=tag_name)
tagging.models.TaggedItem._default_manager.create(
tag=tag, object=instance)
if source_import is not None:
source_import.handle_video(instance, video)
post_video_from_vidscraper.send(sender=cls, instance=instance,
vidscraper_video=video)
if update_index:
using = connection_router.for_write()
index = connections[using].get_unified_index().get_index(cls)
index._enqueue_update(instance)
if commit:
instance.save(update_index=False)
save_m2m()
else:
instance.save_m2m = save_m2m
return instance
def get_tags(self):
if self.pk is None:
vidscraper_video = getattr(self, '_vidscraper_video', None)
return getattr(vidscraper_video, 'tags', None) or []
if (hasattr(self, '_prefetched_objects_cache') and
'taggeditem_set' in self._prefetched_objects_cache):
return [item.tag for item in
self._prefetched_objects_cache['taggeditem_set']]
return self.tags
def try_to_get_file_url_data(self):
"""
Do a HEAD request on self.file_url to find information about
self.file_url_length and self.file_url_mimetype
Note that while this method fills in those attributes, it does *NOT*
run self.save() ... so be sure to do so after calling this method!
"""
if not self.file_url:
return
request = urllib2.Request(utils.quote_unicode_url(self.file_url))
request.get_method = lambda: 'HEAD'
try:
http_file = urllib2.urlopen(request, timeout=5)
except Exception:
pass
else:
self.file_url_length = http_file.headers.get('content-length')
self.file_url_mimetype = http_file.headers.get('content-type', '')
if self.file_url_mimetype in ('application/octet-stream', ''):
# We got a not-useful MIME type; guess!
guess = mimetypes.guess_type(self.file_url)
if guess[0] is not None:
self.file_url_mimetype = guess[0]
def submitter(self):
"""
Return the user that submitted this video. If necessary, use the
submitter from the originating feed or savedsearch.
"""
if self.user is not None:
return self.user
elif self.feed is not None:
return self.feed.user
elif self.search is not None:
return self.search.user
else:
# XXX warning?
return None
def when(self):
"""
Simple method for getting the when_published date if the video came
from a feed or a search, otherwise the when_approved date.
"""
site_settings = SiteSettings.objects.get_cached(self.site_id,
self._state.db)
if site_settings.use_original_date and self.when_published:
return self.when_published
return self.when_approved or self.when_submitted
def source_type(self):
if self.id and self.search_id:
try:
return u'Search: %s' % self.search
except SavedSearch.DoesNotExist:
return u''
if self.id and self.feed_id:
try:
if self.feed.video_service():
return u'User: %s: %s' % (
self.feed.video_service(),
self.feed.name)
else:
return 'Feed: %s' % self.feed.name
except Feed.DoesNotExist:
return ''
if self.video_service_user:
return u'User: %s: %s' % (self.video_service(),
self.video_service_user)
return ''
def video_service(self):
if not self.website_url:
return
url = self.website_url
for service, regexp in VIDEO_SERVICE_REGEXES:
if re.search(regexp, url, re.I):
return service
def when_prefix(self):
"""
When videos are bulk imported (from a feed or a search), we list the
date as "published", otherwise we show 'posted'.
"""
site_settings = SiteSettings.objects.get_cached(site=self.site_id,
using=self._state.db)
if self.when_published and site_settings.use_original_date:
return 'published'
else:
return 'posted'
@property
def all_categories(self):
"""
Returns a set of all the categories to which this video belongs.
"""
categories = self.categories.all()
if not categories:
return categories
q_list = []
opts = Category._mptt_meta
for category in categories:
l = {
'left__lte': getattr(category, opts.left_attr),
'right__gte': getattr(category, opts.right_attr),
'tree_id': getattr(category, opts.tree_id_attr)
}
l = Category._tree_manager._translate_lookups(**l)
q_list.append(models.Q(**l))
q = reduce(operator.or_, q_list)
return Category.objects.filter(q)
def pre_save_video_set_calculated_source_type(instance, **kwargs):
# Always recalculate the source_type field.
instance.calculated_source_type = instance.source_type()
models.signals.pre_save.connect(pre_save_video_set_calculated_source_type,
sender=Video)
class Watch(models.Model):
"""
Record of a video being watched.
fields:
- video: Video that was watched
- timestamp: when watched
- user: user that watched it, if any
- ip_address: IP address of the user
"""
video = models.ForeignKey(Video)
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
user = models.ForeignKey('auth.User', blank=True, null=True)
ip_address = models.IPAddressField()
@classmethod
def add(Class, request, video):
"""
Adds a record of a watched video to the database. If the request came
from localhost, check to see if it was forwarded to (hopefully) get the
right IP address.
"""
ignored_bots = getattr(settings, 'LOCALTV_WATCH_IGNORED_USER_AGENTS',
('bot', 'spider', 'crawler'))
user_agent = request.META.get('HTTP_USER_AGENT', '').lower()
if user_agent and ignored_bots:
for bot in ignored_bots:
if bot in user_agent:
return
ip = request.META.get('REMOTE_ADDR', '0.0.0.0')
if not ipv4_re.match(ip):
ip = '0.0.0.0'
if hasattr(request, 'user') and request.user.is_authenticated():
user = request.user
else:
user = None
try:
Class(video=video, user=user, ip_address=ip).save()
except Exception:
pass
class VideoModerator(CommentModerator):
def allow(self, comment, video, request):
site_settings = SiteSettings.objects.get_cached(site=video.site_id,
using=video._state.db)
if site_settings.comments_required_login:
return request.user and request.user.is_authenticated()
else:
return True
def email(self, comment, video, request):
# we do the import in the function because otherwise there's a circular
# dependency
from localtv.utils import send_notice
site_settings = SiteSettings.objects.get_cached(site=video.site_id,
using=video._state.db)
t = loader.get_template('comments/comment_notification_email.txt')
c = Context({'comment': comment,
'content_object': video,
'user_is_admin': True})
subject = '[%s] New comment posted on "%s"' % (video.site.name,
video)
message = t.render(c)
send_notice('admin_new_comment', subject, message,
site_settings=site_settings)
admin_new_comment = notification.NoticeType.objects.get(
label="admin_new_comment")
if video.user and video.user.email:
video_comment = notification.NoticeType.objects.get(
label="video_comment")
if notification.should_send(video.user, video_comment, "1") and \
not notification.should_send(video.user,
admin_new_comment, "1"):
c = Context({'comment': comment,
'content_object': video,
'user_is_admin': False})
message = t.render(c)
EmailMessage(subject, message, settings.DEFAULT_FROM_EMAIL,
[video.user.email]).send(fail_silently=True)
comment_post_comment = notification.NoticeType.objects.get(
label="comment_post_comment")
previous_users = set()
for previous_comment in comment.__class__.objects.filter(
content_type=comment.content_type,
object_pk=video.pk,
is_public=True,
is_removed=False,
submit_date__lte=comment.submit_date,
user__email__isnull=False).exclude(
user__email='').exclude(pk=comment.pk):
if (previous_comment.user not in previous_users and
notification.should_send(previous_comment.user,
comment_post_comment, "1") and
not notification.should_send(previous_comment.user,
admin_new_comment, "1")):
previous_users.add(previous_comment.user)
c = Context({'comment': comment,
'content_object': video,
'user_is_admin': False})
message = t.render(c)
EmailMessage(subject, message, settings.DEFAULT_FROM_EMAIL,
[previous_comment.user.email]).send(fail_silently=True)
def moderate(self, comment, video, request):
site_settings = SiteSettings.objects.get_cached(site=video.site_id,
using=video._state.db)
if site_settings.screen_all_comments:
if not getattr(request, 'user'):
return True
else:
return not site_settings.user_is_admin(request.user)
else:
return False
moderator.register(Video, VideoModerator)
tagging.register(Video)
def finished(sender, **kwargs):
SiteSettings.objects.clear_cache()
request_finished.connect(finished)
def tag_unicode(self):
# hack to make sure that Unicode data gets returned for all tags
if isinstance(self.name, str):
self.name = self.name.decode('utf8')
return self.name
tagging.models.Tag.__unicode__ = tag_unicode
def send_new_video_email(sender, **kwargs):
site_settings = SiteSettings.objects.get_cached(site=sender.site_id,
using=sender._state.db)
if sender.status == Video.ACTIVE:
# don't send the e-mail for videos that are already active
return
t = loader.get_template('localtv/submit_video/new_video_email.txt')
c = Context({'video': sender})
message = t.render(c)
subject = '[%s] New Video in Review Queue: %s' % (sender.site.name,
sender)
utils.send_notice('admin_new_submission',
subject, message,
site_settings=site_settings)
submit_finished.connect(send_new_video_email, weak=False)
def create_email_notices(app, created_models, verbosity, **kwargs):
notification.create_notice_type('video_comment',
'New comment on your video',
'Someone commented on your video',
default=2,
verbosity=verbosity)
notification.create_notice_type('comment_post_comment',
'New comment after your comment',
'Someone commented on a video after you',
default=2,
verbosity=verbosity)
notification.create_notice_type('video_approved',
'Your video was approved',
'An admin approved your video',
default=2,
verbosity=verbosity)
notification.create_notice_type('admin_new_comment',
'New comment',
'A comment was submitted to the site',
default=1,
verbosity=verbosity)
notification.create_notice_type('admin_new_submission',
'New Submission',
'A new video was submitted',
default=1,
verbosity=verbosity)
notification.create_notice_type('admin_queue_weekly',
'Weekly Queue Update',
'A weekly e-mail of the queue status',
default=1,
verbosity=verbosity)
notification.create_notice_type('admin_queue_daily',
'Daily Queue Update',
'A daily e-mail of the queue status',
default=1,
verbosity=verbosity)
notification.create_notice_type('admin_video_updated',
'Video Updated',
'A video from a service was updated',
default=1,
verbosity=verbosity)
notification.create_notice_type('admin_new_playlist',
'Request for Playlist Moderation',
'A new playlist asked to be public',
default=2,
verbosity=verbosity)
models.signals.post_syncdb.connect(create_email_notices)
def delete_comments(sender, instance, **kwargs):
from django.contrib.comments import get_model
get_model().objects.filter(
object_pk=instance.pk,
content_type__app_label='localtv',
content_type__model='video'
).delete()
models.signals.pre_delete.connect(delete_comments,
sender=Video)
| pculture/mirocommunity | localtv/models.py | Python | agpl-3.0 | 55,425 | 0.000902 |
from rest_framework import exceptions as drf_exceptions
from rest_framework import versioning as drf_versioning
from rest_framework.compat import unicode_http_header
from rest_framework.utils.mediatypes import _MediaType
from api.base import exceptions
from api.base import utils
from api.base.renderers import BrowsableAPIRendererNoForms
from api.base.settings import LATEST_VERSIONS
def get_major_version(version):
return int(version.split('.')[0])
def url_path_version_to_decimal(url_path_version):
# 'v2' --> '2.0'
return str(float(url_path_version.split('v')[1]))
def decimal_version_to_url_path(decimal_version):
# '2.0' --> 'v2'
return 'v{}'.format(get_major_version(decimal_version))
def get_latest_sub_version(major_version):
# '2' --> '2.6'
return LATEST_VERSIONS.get(major_version, None)
class BaseVersioning(drf_versioning.BaseVersioning):
def __init__(self):
super(BaseVersioning, self).__init__()
def get_url_path_version(self, kwargs):
invalid_version_message = 'Invalid version in URL path.'
version = kwargs.get(self.version_param)
if version is None:
return self.default_version
version = url_path_version_to_decimal(version)
if not self.is_allowed_version(version):
raise drf_exceptions.NotFound(invalid_version_message)
if get_major_version(version) == get_major_version(self.default_version):
return self.default_version
return version
def get_header_version(self, request, major_version):
invalid_version_message = 'Invalid version in "Accept" header.'
media_type = _MediaType(request.accepted_media_type)
version = media_type.params.get(self.version_param)
if not version:
return None
if version == 'latest':
return get_latest_sub_version(major_version)
version = unicode_http_header(version)
if not self.is_allowed_version(version):
raise drf_exceptions.NotAcceptable(invalid_version_message)
return version
def get_default_version(self, request, major_version):
"""Returns the latest available version for the browsable api, otherwise REST_FRAMEWORK default version"""
if request.accepted_renderer.__class__ == BrowsableAPIRendererNoForms:
return get_latest_sub_version(major_version)
return self.default_version
def get_query_param_version(self, request, major_version):
invalid_version_message = 'Invalid version in query parameter.'
version = request.query_params.get(self.version_param)
if not version:
return None
if version == 'latest':
return get_latest_sub_version(major_version)
if not self.is_allowed_version(version):
raise drf_exceptions.NotFound(invalid_version_message)
return version
def validate_pinned_versions(self, url_path_version, header_version, query_parameter_version):
url_path_major_version = get_major_version(url_path_version)
header_major_version = get_major_version(header_version) if header_version else None
query_major_version = get_major_version(query_parameter_version) if query_parameter_version else None
if header_version and header_major_version != url_path_major_version:
raise exceptions.Conflict(
detail='Version {} specified in "Accept" header does not fall within URL path version {}'.format(
header_version,
url_path_version,
),
)
if query_parameter_version and query_major_version != url_path_major_version:
raise exceptions.Conflict(
detail='Version {} specified in query parameter does not fall within URL path version {}'.format(
query_parameter_version,
url_path_version,
),
)
if header_version and query_parameter_version and (header_version != query_parameter_version):
raise exceptions.Conflict(
detail='Version {} specified in "Accept" header does not match version {} specified in query parameter'.format(
header_version,
query_parameter_version,
),
)
def determine_version(self, request, *args, **kwargs):
url_path_version = self.get_url_path_version(kwargs)
major_version = get_major_version(url_path_version)
header_version = self.get_header_version(request, major_version)
query_parameter_version = self.get_query_param_version(request, major_version)
version = url_path_version
if header_version or query_parameter_version:
self.validate_pinned_versions(url_path_version, header_version, query_parameter_version)
version = header_version if header_version else query_parameter_version
else:
version = self.get_default_version(request, major_version)
return version
def reverse(self, viewname, args=None, kwargs=None, request=None, format=None, **extra):
url_path_version = self.get_url_path_version(kwargs)
major_version = get_major_version(url_path_version)
query_parameter_version = self.get_query_param_version(request, major_version)
kwargs = {} if (kwargs is None) else kwargs
kwargs[self.version_param] = decimal_version_to_url_path(url_path_version)
query_kwargs = {'version': query_parameter_version} if query_parameter_version else None
return utils.absolute_reverse(
viewname, query_kwargs=query_kwargs, args=args, kwargs=kwargs,
)
| caseyrollins/osf.io | api/base/versioning.py | Python | apache-2.0 | 5,741 | 0.003135 |
# -*- coding: utf-8 -*-
from module.plugins.internal.XFSAccount import XFSAccount
class FilerioCom(XFSAccount):
__name__ = "FilerioCom"
__type__ = "account"
__version__ = "0.07"
__status__ = "testing"
__description__ = """FileRio.in account plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "[email protected]")]
PLUGIN_DOMAIN = "filerio.in"
| Guidobelix/pyload | module/plugins/accounts/FilerioCom.py | Python | gpl-3.0 | 407 | 0.014742 |
import pytest
from ray.train.callbacks.results_preprocessors import (
ExcludedKeysResultsPreprocessor,
IndexedResultsPreprocessor,
SequentialResultsPreprocessor,
AverageResultsPreprocessor,
MaxResultsPreprocessor,
WeightedAverageResultsPreprocessor,
)
def test_excluded_keys_results_preprocessor():
results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}]
expected = [{"b": 2}, {"b": 4}]
preprocessor = ExcludedKeysResultsPreprocessor("a")
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected
def test_indexed_results_preprocessor():
results = [{"a": 1}, {"a": 2}, {"a": 3}, {"a": 4}]
expected = [{"a": 1}, {"a": 3}]
preprocessor = IndexedResultsPreprocessor([0, 2])
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected
def test_sequential_results_preprocessor():
results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}, {"a": 7, "b": 8}]
expected = [{"b": 2}, {"b": 6}]
preprocessor_1 = ExcludedKeysResultsPreprocessor("a")
# [{"b": 2}, {"b": 4}, {"b": 6}, {"b": 8}]
preprocessor_2 = IndexedResultsPreprocessor([0, 2])
preprocessor = SequentialResultsPreprocessor([preprocessor_1, preprocessor_2])
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected
def test_average_results_preprocessor():
from copy import deepcopy
import numpy as np
results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}, {"a": 7, "b": 8}]
expected = deepcopy(results)
for res in expected:
res.update(
{
"avg(a)": np.mean([result["a"] for result in results]),
"avg(b)": np.mean([result["b"] for result in results]),
}
)
preprocessor = AverageResultsPreprocessor(["a", "b"])
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected
def test_max_results_preprocessor():
from copy import deepcopy
import numpy as np
results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}, {"a": 7, "b": 8}]
expected = deepcopy(results)
for res in expected:
res.update(
{
"max(a)": np.max([result["a"] for result in results]),
"max(b)": np.max([result["b"] for result in results]),
}
)
preprocessor = MaxResultsPreprocessor(["a", "b"])
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected
def test_weighted_average_results_preprocessor():
from copy import deepcopy
import numpy as np
results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}, {"a": 7, "b": 8}]
expected = deepcopy(results)
total_weight = np.sum([result["b"] for result in results])
for res in expected:
res.update(
{
"weight_avg_b(a)": np.sum(
[result["a"] * result["b"] / total_weight for result in results]
)
}
)
preprocessor = WeightedAverageResultsPreprocessor(["a"], "b")
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected
@pytest.mark.parametrize(
("results_preprocessor", "expected_value"),
[(AverageResultsPreprocessor, 2.0), (MaxResultsPreprocessor, 3.0)],
)
def test_warning_in_aggregate_results_preprocessors(
caplog, results_preprocessor, expected_value
):
import logging
from copy import deepcopy
from ray.util import debug
caplog.at_level(logging.WARNING)
results1 = [{"a": 1}, {"a": 2}, {"a": 3}, {"a": 4}]
results2 = [{"a": 1}, {"a": "invalid"}, {"a": 3}, {"a": "invalid"}]
results3 = [{"a": "invalid"}, {"a": "invalid"}, {"a": "invalid"}, {"a": "invalid"}]
results4 = [{"a": 1}, {"a": 2}, {"a": 3}, {"c": 4}]
# test case 1: metric key `b` is missing from all workers
results_preprocessor1 = results_preprocessor(["b"])
results_preprocessor1.preprocess(results1)
assert "`b` is not reported from workers, so it is ignored." in caplog.text
# test case 2: some values of key `a` have invalid data type
results_preprocessor2 = results_preprocessor(["a"])
expected2 = deepcopy(results2)
aggregation_key = results_preprocessor2.aggregate_fn.wrap_key("a")
for res in expected2:
res.update({aggregation_key: expected_value})
assert results_preprocessor2.preprocess(results2) == expected2
# test case 3: all key `a` values are invalid
results_preprocessor2.preprocess(results3)
assert "`a` value type is not valid, so it is ignored." in caplog.text
# test case 4: some workers don't report key `a`
expected4 = deepcopy(results4)
aggregation_key = results_preprocessor2.aggregate_fn.wrap_key("a")
for res in expected4:
res.update({aggregation_key: expected_value})
assert results_preprocessor2.preprocess(results4) == expected4
for record in caplog.records:
assert record.levelname == "WARNING"
debug.reset_log_once("b")
debug.reset_log_once("a")
def test_warning_in_weighted_average_results_preprocessors(caplog):
import logging
from copy import deepcopy
caplog.at_level(logging.WARNING)
results1 = [{"a": 1}, {"a": 2}, {"a": 3}, {"a": 4}]
results2 = [{"b": 1}, {"b": 2}, {"b": 3}, {"b": 4}]
results3 = [
{"a": 1, "c": 3},
{"a": 2, "c": "invalid"},
{"a": "invalid", "c": 1},
{"a": 4, "c": "invalid"},
]
results4 = [
{"a": 1, "c": "invalid"},
{"a": 2, "c": "invalid"},
{"a": 3, "c": "invalid"},
{"a": 4, "c": "invalid"},
]
# test case 1: weight key `b` is not reported from all workers
results_preprocessor1 = WeightedAverageResultsPreprocessor(["a"], "b")
expected1 = deepcopy(results1)
for res in expected1:
res.update({"weight_avg_b(a)": 2.5})
assert results_preprocessor1.preprocess(results1) == expected1
assert (
"Averaging weight `b` is not reported by all workers in `train.report()`."
in caplog.text
)
assert "Use equal weight instead." in caplog.text
# test case 2: metric key `a` (to be averaged) is not reported from all workers
results_preprocessor1.preprocess(results2)
assert "`a` is not reported from workers, so it is ignored." in caplog.text
# test case 3: both metric and weight keys have invalid data type
results_preprocessor2 = WeightedAverageResultsPreprocessor(["a"], "c")
expected3 = deepcopy(results3)
for res in expected3:
res.update({"weight_avg_c(a)": 1.0})
assert results_preprocessor2.preprocess(results3) == expected3
# test case 4: all weight values are invalid
expected4 = deepcopy(results4)
for res in expected4:
res.update({"weight_avg_c(a)": 2.5})
assert results_preprocessor2.preprocess(results4) == expected4
assert "Averaging weight `c` value type is not valid." in caplog.text
for record in caplog.records:
assert record.levelname == "WARNING"
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
| ray-project/ray | python/ray/train/tests/test_results_preprocessors.py | Python | apache-2.0 | 7,269 | 0.001238 |
import re
from django.core.exceptions import ImproperlyConfigured
from sqlalchemy import create_engine, MetaData
from sqlalchemy.orm import sessionmaker
from tranquil.models import Importer
__all__ = ( 'engine', 'meta', 'Session', )
class EngineCache(object):
__shared_state = dict(
engine = None,
meta = None,
Session = None,
)
_mappings = {
'sqlite3': 'sqlite',
'mysql': 'mysql',
'postgresql': 'postgresql',
'postgresql_psycopg2': 'postgresql+psycopg2',
'oracle': 'oracle',
}
def __init__(self):
from django.conf import settings
self.__dict__ = self.__shared_state
if self.engine is not None:
return
if settings.DATABASE_ENGINE == 'django_sqlalchemy.backend':
from django_sqlalchemy import backend
self.engine = backend.engine
else:
options = {
'protocol': self._mappings.get( settings.DATABASE_ENGINE ),
'name': settings.DATABASE_NAME,
'user': settings.DATABASE_USER,
'pass': settings.DATABASE_PASSWORD,
'host': settings.DATABASE_HOST,
'port': settings.DATABASE_PORT,
}
if options['protocol'] is None:
raise ImproperlyConfigured( 'Unknown database engine: %s' % settings.DATABASE_ENGINE )
url = '{protocol}://{user}:{pass}@{host}{port}/{name}'
for p in options:
if p == 'port' and len( options[p] ) > 0:
url = re.sub( '{%s}' % p, ':%s' % options[p], url )
else:
url = re.sub( '{%s}' % p, options[p], url )
self.engine = create_engine( url )
self.meta = MetaData(bind=self.engine,reflect=True)
self.Session = sessionmaker( bind=self.engine, autoflush=True, autocommit=False )
self.importer = Importer(self.meta)
cache = EngineCache()
engine = cache.engine
meta = cache.meta
Session = cache.Session
| g2p/tranquil | tranquil/__init__.py | Python | bsd-3-clause | 1,717 | 0.041351 |
"""
Tests outgoing calls created with InitialAudio and/or InitialVideo, and
exposing the initial contents of incoming calls as values of InitialAudio and
InitialVideo
"""
import operator
from servicetest import (
assertContains, assertEquals, assertLength,
wrap_channel, EventPattern, call_async, make_channel_proxy)
from jingletest2 import JingleTest2, test_all_dialects
import constants as cs
def outgoing(jp, q, bus, conn, stream):
remote_jid = '[email protected]/beyond'
jt = JingleTest2(jp, conn, q, stream, 'test@localhost', remote_jid)
jt.prepare()
self_handle = conn.GetSelfHandle()
remote_handle = conn.RequestHandles(cs.HT_CONTACT, [remote_jid])[0]
rccs = conn.Properties.Get(cs.CONN_IFACE_REQUESTS, 'RequestableChannelClasses')
media_classes = [ rcc for rcc in rccs
if rcc[0][cs.CHANNEL_TYPE] == cs.CHANNEL_TYPE_STREAMED_MEDIA ]
assertLength(1, media_classes)
fixed, allowed = media_classes[0]
assertContains(cs.INITIAL_AUDIO, allowed)
assertContains(cs.INITIAL_VIDEO, allowed)
check_neither(q, conn, bus, stream, remote_handle)
check_iav(jt, q, conn, bus, stream, remote_handle, True, False)
check_iav(jt, q, conn, bus, stream, remote_handle, False, True)
check_iav(jt, q, conn, bus, stream, remote_handle, True, True)
def check_neither(q, conn, bus, stream, remote_handle):
"""
Make a channel without specifying InitialAudio or InitialVideo; check
that it's announced with both False, and that they're both present and
false in GetAll().
"""
path, props = conn.Requests.CreateChannel({
cs.CHANNEL_TYPE: cs.CHANNEL_TYPE_STREAMED_MEDIA,
cs.TARGET_HANDLE_TYPE: cs.HT_CONTACT,
cs.TARGET_HANDLE: remote_handle})
assertContains((cs.INITIAL_AUDIO, False), props.items())
assertContains((cs.INITIAL_VIDEO, False), props.items())
chan = wrap_channel(bus.get_object(conn.bus_name, path),
cs.CHANNEL_TYPE_STREAMED_MEDIA, ['MediaSignalling'])
props = chan.Properties.GetAll(cs.CHANNEL_TYPE_STREAMED_MEDIA)
assertContains(('InitialAudio', False), props.items())
assertContains(('InitialVideo', False), props.items())
# We shouldn't have started a session yet, so there shouldn't be any
# session handlers. Strictly speaking, there could be a session handler
# with no stream handlers, but...
session_handlers = chan.MediaSignalling.GetSessionHandlers()
assertLength(0, session_handlers)
def check_iav(jt, q, conn, bus, stream, remote_handle, initial_audio,
initial_video):
"""
Make a channel and check that its InitialAudio and InitialVideo properties
come out correctly.
"""
call_async(q, conn.Requests, 'CreateChannel', {
cs.CHANNEL_TYPE: cs.CHANNEL_TYPE_STREAMED_MEDIA,
cs.TARGET_HANDLE_TYPE: cs.HT_CONTACT,
cs.TARGET_HANDLE: remote_handle,
cs.INITIAL_AUDIO: initial_audio,
cs.INITIAL_VIDEO: initial_video,
})
if initial_video and (not jt.jp.can_do_video()
or (not initial_audio and not jt.jp.can_do_video_only ())):
# Some protocols can't do video
event = q.expect('dbus-error', method='CreateChannel')
assertEquals(cs.NOT_CAPABLE, event.error.get_dbus_name())
else:
path, props = q.expect('dbus-return', method='CreateChannel').value
assertContains((cs.INITIAL_AUDIO, initial_audio), props.items())
assertContains((cs.INITIAL_VIDEO, initial_video), props.items())
chan = wrap_channel(bus.get_object(conn.bus_name, path),
cs.CHANNEL_TYPE_STREAMED_MEDIA, ['MediaSignalling'])
props = chan.Properties.GetAll(cs.CHANNEL_TYPE_STREAMED_MEDIA)
assertContains(('InitialAudio', initial_audio), props.items())
assertContains(('InitialVideo', initial_video), props.items())
session_handlers = chan.MediaSignalling.GetSessionHandlers()
assertLength(1, session_handlers)
path, type = session_handlers[0]
assertEquals('rtp', type)
session_handler = make_channel_proxy(conn, path, 'Media.SessionHandler')
session_handler.Ready()
stream_handler_paths = []
stream_handler_types = []
for x in [initial_audio, initial_video]:
if x:
e = q.expect('dbus-signal', signal='NewStreamHandler')
stream_handler_paths.append(e.args[0])
stream_handler_types.append(e.args[2])
if initial_audio:
assertContains(cs.MEDIA_STREAM_TYPE_AUDIO, stream_handler_types)
if initial_video:
assertContains(cs.MEDIA_STREAM_TYPE_VIDEO, stream_handler_types)
for x in xrange (0, len(stream_handler_paths)):
p = stream_handler_paths[x]
t = stream_handler_types[x]
sh = make_channel_proxy(conn, p, 'Media.StreamHandler')
sh.NewNativeCandidate("fake", jt.get_remote_transports_dbus())
if t == cs.MEDIA_STREAM_TYPE_AUDIO:
sh.Ready(jt.get_audio_codecs_dbus())
else:
sh.Ready(jt.get_video_codecs_dbus())
sh.StreamState(cs.MEDIA_STREAM_STATE_CONNECTED)
e = q.expect('stream-iq',
predicate=jt.jp.action_predicate('session-initiate'))
jt.parse_session_initiate (e.query)
jt.accept()
events = reduce(operator.concat,
[ [ EventPattern('dbus-signal', signal='SetRemoteCodecs', path=p),
EventPattern('dbus-signal', signal='SetStreamPlaying', path=p),
] for p in stream_handler_paths
], [])
q.expect_many(*events)
chan.Close()
def incoming(jp, q, bus, conn, stream):
remote_jid = 'skinny.fists@heaven/antennas'
jt = JingleTest2(jp, conn, q, stream, 'test@localhost', remote_jid)
jt.prepare()
self_handle = conn.GetSelfHandle()
remote_handle = conn.RequestHandles(cs.HT_CONTACT, [remote_jid])[0]
for a, v in [("audio1", None), (None, "video1"), ("audio1", "video1")]:
if v!= None and not jp.can_do_video():
continue
if a == None and v != None and not jp.can_do_video_only():
continue
jt.incoming_call(audio=a, video=v)
e = q.expect('dbus-signal', signal='NewChannels',
predicate=lambda e:
cs.CHANNEL_TYPE_CONTACT_LIST not in e.args[0][0][1].values())
chans = e.args[0]
assertLength(1, chans)
path, props = chans[0]
assertEquals(cs.CHANNEL_TYPE_STREAMED_MEDIA, props[cs.CHANNEL_TYPE])
assertEquals(a != None, props[cs.INITIAL_AUDIO])
assertEquals(v != None, props[cs.INITIAL_VIDEO])
# FIXME: This doesn't check non-Google contacts that can only do one
# media type, as such contacts as simulated by JingleTest2 can always
# do both.
assertEquals(not jp.can_do_video() or not jp.can_do_video_only(),
props[cs.IMMUTABLE_STREAMS])
chan = wrap_channel(bus.get_object(conn.bus_name, path),
cs.CHANNEL_TYPE_STREAMED_MEDIA)
chan.Close()
if __name__ == '__main__':
test_all_dialects(outgoing)
test_all_dialects(incoming)
| jku/telepathy-gabble | tests/twisted/jingle/initial-audio-video.py | Python | lgpl-2.1 | 7,213 | 0.004298 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for textio module."""
from __future__ import absolute_import
from __future__ import division
import bz2
import datetime
import glob
import gzip
import logging
import os
import shutil
import sys
import tempfile
import unittest
import zlib
from builtins import range
import apache_beam as beam
import apache_beam.io.source_test_utils as source_test_utils
from apache_beam import coders
from apache_beam.io import ReadAllFromText
from apache_beam.io import iobase
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.textio import _TextSink as TextSink
from apache_beam.io.textio import _TextSource as TextSource
# Importing following private classes for testing.
from apache_beam.io.textio import ReadFromText
from apache_beam.io.textio import ReadFromTextWithFilename
from apache_beam.io.textio import WriteToText
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.test_utils import TempDir
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms.core import Create
class EOL(object):
LF = 1
CRLF = 2
MIXED = 3
LF_WITH_NOTHING_AT_LAST_LINE = 4
def write_data(
num_lines, no_data=False, directory=None, prefix=tempfile.template,
eol=EOL.LF):
"""Writes test data to a temporary file.
Args:
num_lines (int): The number of lines to write.
no_data (bool): If :data:`True`, empty lines will be written, otherwise
each line will contain a concatenation of b'line' and the line number.
directory (str): The name of the directory to create the temporary file in.
prefix (str): The prefix to use for the temporary file.
eol (int): The line ending to use when writing.
:class:`~apache_beam.io.textio_test.EOL` exposes attributes that can be
used here to define the eol.
Returns:
Tuple[str, List[str]]: A tuple of the filename and a list of the
utf-8 decoded written data.
"""
all_data = []
with tempfile.NamedTemporaryFile(
delete=False, dir=directory, prefix=prefix) as f:
sep_values = [b'\n', b'\r\n']
for i in range(num_lines):
data = b'' if no_data else b'line' + str(i).encode()
all_data.append(data)
if eol == EOL.LF:
sep = sep_values[0]
elif eol == EOL.CRLF:
sep = sep_values[1]
elif eol == EOL.MIXED:
sep = sep_values[i % len(sep_values)]
elif eol == EOL.LF_WITH_NOTHING_AT_LAST_LINE:
sep = b'' if i == (num_lines - 1) else sep_values[0]
else:
raise ValueError('Received unknown value %s for eol.' % eol)
f.write(data + sep)
return f.name, [line.decode('utf-8') for line in all_data]
def write_pattern(lines_per_file, no_data=False):
"""Writes a pattern of temporary files.
Args:
lines_per_file (List[int]): The number of lines to write per file.
no_data (bool): If :data:`True`, empty lines will be written, otherwise
each line will contain a concatenation of b'line' and the line number.
Returns:
Tuple[str, List[str]]: A tuple of the filename pattern and a list of the
utf-8 decoded written data.
"""
temp_dir = tempfile.mkdtemp()
all_data = []
file_name = None
start_index = 0
for i in range(len(lines_per_file)):
file_name, data = write_data(lines_per_file[i], no_data=no_data,
directory=temp_dir, prefix='mytemp')
all_data.extend(data)
start_index += lines_per_file[i]
assert file_name
return (
file_name[:file_name.rfind(os.path.sep)] + os.path.sep + 'mytemp*',
all_data)
class TextSourceTest(unittest.TestCase):
# Number of records that will be written by most tests.
DEFAULT_NUM_RECORDS = 100
@classmethod
def setUpClass(cls):
# Method has been renamed in Python 3
if sys.version_info[0] < 3:
cls.assertCountEqual = cls.assertItemsEqual
def _run_read_test(self, file_or_pattern, expected_data,
buffer_size=DEFAULT_NUM_RECORDS,
compression=CompressionTypes.UNCOMPRESSED):
# Since each record usually takes more than 1 byte, default buffer size is
# smaller than the total size of the file. This is done to
# increase test coverage for cases that hit the buffer boundary.
source = TextSource(file_or_pattern, 0, compression,
True, coders.StrUtf8Coder(), buffer_size)
range_tracker = source.get_range_tracker(None, None)
read_data = list(source.read(range_tracker))
self.assertCountEqual(expected_data, read_data)
def test_read_single_file(self):
file_name, expected_data = write_data(TextSourceTest.DEFAULT_NUM_RECORDS)
assert len(expected_data) == TextSourceTest.DEFAULT_NUM_RECORDS
self._run_read_test(file_name, expected_data)
def test_read_single_file_smaller_than_default_buffer(self):
file_name, expected_data = write_data(TextSourceTest.DEFAULT_NUM_RECORDS)
self._run_read_test(file_name, expected_data,
buffer_size=TextSource.DEFAULT_READ_BUFFER_SIZE)
def test_read_single_file_larger_than_default_buffer(self):
file_name, expected_data = write_data(TextSource.DEFAULT_READ_BUFFER_SIZE)
self._run_read_test(file_name, expected_data,
buffer_size=TextSource.DEFAULT_READ_BUFFER_SIZE)
def test_read_file_pattern(self):
pattern, expected_data = write_pattern(
[TextSourceTest.DEFAULT_NUM_RECORDS * 5,
TextSourceTest.DEFAULT_NUM_RECORDS * 3,
TextSourceTest.DEFAULT_NUM_RECORDS * 12,
TextSourceTest.DEFAULT_NUM_RECORDS * 8,
TextSourceTest.DEFAULT_NUM_RECORDS * 8,
TextSourceTest.DEFAULT_NUM_RECORDS * 4])
assert len(expected_data) == TextSourceTest.DEFAULT_NUM_RECORDS * 40
self._run_read_test(pattern, expected_data)
def test_read_single_file_windows_eol(self):
file_name, expected_data = write_data(TextSourceTest.DEFAULT_NUM_RECORDS,
eol=EOL.CRLF)
assert len(expected_data) == TextSourceTest.DEFAULT_NUM_RECORDS
self._run_read_test(file_name, expected_data)
def test_read_single_file_mixed_eol(self):
file_name, expected_data = write_data(TextSourceTest.DEFAULT_NUM_RECORDS,
eol=EOL.MIXED)
assert len(expected_data) == TextSourceTest.DEFAULT_NUM_RECORDS
self._run_read_test(file_name, expected_data)
def test_read_single_file_last_line_no_eol(self):
file_name, expected_data = write_data(
TextSourceTest.DEFAULT_NUM_RECORDS,
eol=EOL.LF_WITH_NOTHING_AT_LAST_LINE)
assert len(expected_data) == TextSourceTest.DEFAULT_NUM_RECORDS
self._run_read_test(file_name, expected_data)
def test_read_single_file_single_line_no_eol(self):
file_name, expected_data = write_data(
1, eol=EOL.LF_WITH_NOTHING_AT_LAST_LINE)
assert len(expected_data) == 1
self._run_read_test(file_name, expected_data)
def test_read_empty_single_file(self):
file_name, written_data = write_data(
1, no_data=True, eol=EOL.LF_WITH_NOTHING_AT_LAST_LINE)
assert len(written_data) == 1
# written data has a single entry with an empty string. Reading the source
# should not produce anything since we only wrote a single empty string
# without an end of line character.
self._run_read_test(file_name, [])
def test_read_single_file_last_line_no_eol_gzip(self):
file_name, expected_data = write_data(
TextSourceTest.DEFAULT_NUM_RECORDS,
eol=EOL.LF_WITH_NOTHING_AT_LAST_LINE)
gzip_file_name = file_name + '.gz'
with open(file_name, 'rb') as src, gzip.open(gzip_file_name, 'wb') as dst:
dst.writelines(src)
assert len(expected_data) == TextSourceTest.DEFAULT_NUM_RECORDS
self._run_read_test(gzip_file_name, expected_data,
compression=CompressionTypes.GZIP)
def test_read_single_file_single_line_no_eol_gzip(self):
file_name, expected_data = write_data(
1, eol=EOL.LF_WITH_NOTHING_AT_LAST_LINE)
gzip_file_name = file_name + '.gz'
with open(file_name, 'rb') as src, gzip.open(gzip_file_name, 'wb') as dst:
dst.writelines(src)
assert len(expected_data) == 1
self._run_read_test(gzip_file_name, expected_data,
compression=CompressionTypes.GZIP)
def test_read_empty_single_file_no_eol_gzip(self):
file_name, written_data = write_data(
1, no_data=True, eol=EOL.LF_WITH_NOTHING_AT_LAST_LINE)
gzip_file_name = file_name + '.gz'
with open(file_name, 'rb') as src, gzip.open(gzip_file_name, 'wb') as dst:
dst.writelines(src)
assert len(written_data) == 1
# written data has a single entry with an empty string. Reading the source
# should not produce anything since we only wrote a single empty string
# without an end of line character.
self._run_read_test(gzip_file_name, [], compression=CompressionTypes.GZIP)
def test_read_single_file_with_empty_lines(self):
file_name, expected_data = write_data(
TextSourceTest.DEFAULT_NUM_RECORDS, no_data=True, eol=EOL.LF)
assert len(expected_data) == TextSourceTest.DEFAULT_NUM_RECORDS
assert not expected_data[0]
self._run_read_test(file_name, expected_data)
def test_read_single_file_without_striping_eol_lf(self):
file_name, written_data = write_data(TextSourceTest.DEFAULT_NUM_RECORDS,
eol=EOL.LF)
assert len(written_data) == TextSourceTest.DEFAULT_NUM_RECORDS
source = TextSource(file_name, 0,
CompressionTypes.UNCOMPRESSED,
False, coders.StrUtf8Coder())
range_tracker = source.get_range_tracker(None, None)
read_data = list(source.read(range_tracker))
self.assertCountEqual([line + '\n' for line in written_data], read_data)
def test_read_single_file_without_striping_eol_crlf(self):
file_name, written_data = write_data(TextSourceTest.DEFAULT_NUM_RECORDS,
eol=EOL.CRLF)
assert len(written_data) == TextSourceTest.DEFAULT_NUM_RECORDS
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED,
False, coders.StrUtf8Coder())
range_tracker = source.get_range_tracker(None, None)
read_data = list(source.read(range_tracker))
self.assertCountEqual([line + '\r\n' for line in written_data], read_data)
def test_read_file_pattern_with_empty_files(self):
pattern, expected_data = write_pattern(
[5 * TextSourceTest.DEFAULT_NUM_RECORDS,
3 * TextSourceTest.DEFAULT_NUM_RECORDS,
12 * TextSourceTest.DEFAULT_NUM_RECORDS,
8 * TextSourceTest.DEFAULT_NUM_RECORDS,
8 * TextSourceTest.DEFAULT_NUM_RECORDS,
4 * TextSourceTest.DEFAULT_NUM_RECORDS],
no_data=True)
assert len(expected_data) == 40 * TextSourceTest.DEFAULT_NUM_RECORDS
assert not expected_data[0]
self._run_read_test(pattern, expected_data)
def test_read_after_splitting(self):
file_name, expected_data = write_data(10)
assert len(expected_data) == 10
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder())
splits = list(source.split(desired_bundle_size=33))
reference_source_info = (source, None, None)
sources_info = ([
(split.source, split.start_position, split.stop_position) for
split in splits])
source_test_utils.assert_sources_equal_reference_source(
reference_source_info, sources_info)
def test_header_processing(self):
file_name, expected_data = write_data(10)
assert len(expected_data) == 10
def header_matcher(line):
return line in expected_data[:5]
header_lines = []
def store_header(lines):
for line in lines:
header_lines.append(line)
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder(),
header_processor_fns=(header_matcher, store_header))
splits = list(source.split(desired_bundle_size=100000))
assert len(splits) == 1
range_tracker = splits[0].source.get_range_tracker(
splits[0].start_position, splits[0].stop_position)
read_data = list(source.read_records(file_name, range_tracker))
self.assertCountEqual(expected_data[:5], header_lines)
self.assertCountEqual(expected_data[5:], read_data)
def test_progress(self):
file_name, expected_data = write_data(10)
assert len(expected_data) == 10
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder())
splits = list(source.split(desired_bundle_size=100000))
assert len(splits) == 1
fraction_consumed_report = []
split_points_report = []
range_tracker = splits[0].source.get_range_tracker(
splits[0].start_position, splits[0].stop_position)
for _ in splits[0].source.read(range_tracker):
fraction_consumed_report.append(range_tracker.fraction_consumed())
split_points_report.append(range_tracker.split_points())
self.assertEqual(
[float(i) / 10 for i in range(0, 10)], fraction_consumed_report)
expected_split_points_report = [
((i - 1), iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)
for i in range(1, 10)]
# At last split point, the remaining split points callback returns 1 since
# the expected position of next record becomes equal to the stop position.
expected_split_points_report.append((9, 1))
self.assertEqual(
expected_split_points_report, split_points_report)
def test_read_reentrant_without_splitting(self):
file_name, expected_data = write_data(10)
assert len(expected_data) == 10
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder())
source_test_utils.assert_reentrant_reads_succeed((source, None, None))
def test_read_reentrant_after_splitting(self):
file_name, expected_data = write_data(10)
assert len(expected_data) == 10
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder())
splits = list(source.split(desired_bundle_size=100000))
assert len(splits) == 1
source_test_utils.assert_reentrant_reads_succeed(
(splits[0].source, splits[0].start_position, splits[0].stop_position))
def test_dynamic_work_rebalancing(self):
file_name, expected_data = write_data(5)
assert len(expected_data) == 5
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder())
splits = list(source.split(desired_bundle_size=100000))
assert len(splits) == 1
source_test_utils.assert_split_at_fraction_exhaustive(
splits[0].source, splits[0].start_position, splits[0].stop_position)
def test_dynamic_work_rebalancing_windows_eol(self):
file_name, expected_data = write_data(15, eol=EOL.CRLF)
assert len(expected_data) == 15
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder())
splits = list(source.split(desired_bundle_size=100000))
assert len(splits) == 1
source_test_utils.assert_split_at_fraction_exhaustive(
splits[0].source, splits[0].start_position, splits[0].stop_position,
perform_multi_threaded_test=False)
def test_dynamic_work_rebalancing_mixed_eol(self):
file_name, expected_data = write_data(5, eol=EOL.MIXED)
assert len(expected_data) == 5
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder())
splits = list(source.split(desired_bundle_size=100000))
assert len(splits) == 1
source_test_utils.assert_split_at_fraction_exhaustive(
splits[0].source, splits[0].start_position, splits[0].stop_position,
perform_multi_threaded_test=False)
def test_read_from_text_single_file(self):
file_name, expected_data = write_data(5)
assert len(expected_data) == 5
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(file_name)
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_from_text_with_file_name_single_file(self):
file_name, data = write_data(5)
expected_data = [(file_name, el) for el in data]
assert len(expected_data) == 5
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromTextWithFilename(file_name)
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_all_single_file(self):
file_name, expected_data = write_data(5)
assert len(expected_data) == 5
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> Create(
[file_name]) |'ReadAll' >> ReadAllFromText()
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_all_many_single_files(self):
file_name1, expected_data1 = write_data(5)
assert len(expected_data1) == 5
file_name2, expected_data2 = write_data(10)
assert len(expected_data2) == 10
file_name3, expected_data3 = write_data(15)
assert len(expected_data3) == 15
expected_data = []
expected_data.extend(expected_data1)
expected_data.extend(expected_data2)
expected_data.extend(expected_data3)
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> Create(
[file_name1, file_name2, file_name3]) |'ReadAll' >> ReadAllFromText()
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_all_unavailable_files_ignored(self):
file_name1, expected_data1 = write_data(5)
assert len(expected_data1) == 5
file_name2, expected_data2 = write_data(10)
assert len(expected_data2) == 10
file_name3, expected_data3 = write_data(15)
assert len(expected_data3) == 15
file_name4 = "/unavailable_file"
expected_data = []
expected_data.extend(expected_data1)
expected_data.extend(expected_data2)
expected_data.extend(expected_data3)
pipeline = TestPipeline()
pcoll = (pipeline
| 'Create' >> Create(
[file_name1, file_name2, file_name3, file_name4])
|'ReadAll' >> ReadAllFromText())
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_from_text_single_file_with_coder(self):
class DummyCoder(coders.Coder):
def encode(self, x):
raise ValueError
def decode(self, x):
return (x * 2).decode('utf-8')
file_name, expected_data = write_data(5)
assert len(expected_data) == 5
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(file_name, coder=DummyCoder())
assert_that(pcoll, equal_to([record * 2 for record in expected_data]))
pipeline.run()
def test_read_from_text_file_pattern(self):
pattern, expected_data = write_pattern([5, 3, 12, 8, 8, 4])
assert len(expected_data) == 40
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(pattern)
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_from_text_with_file_name_file_pattern(self):
prefix = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
file_name_1, data_1 = write_data(5, prefix=prefix)
file_name_2, data_2 = write_data(5, prefix=prefix)
expected_data = []
expected_data.extend([(file_name_1, el) for el in data_1])
expected_data.extend([(file_name_2, el) for el in data_2])
folder = file_name_1[:file_name_1.rfind(os.path.sep)]
pattern = folder + os.path.sep + prefix + '*'
assert len(expected_data) == 10
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromTextWithFilename(pattern)
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_all_file_pattern(self):
pattern, expected_data = write_pattern([5, 3, 12, 8, 8, 4])
assert len(expected_data) == 40
pipeline = TestPipeline()
pcoll = (pipeline
| 'Create' >> Create([pattern])
|'ReadAll' >> ReadAllFromText())
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_all_many_file_patterns(self):
pattern1, expected_data1 = write_pattern([5, 3, 12, 8, 8, 4])
assert len(expected_data1) == 40
pattern2, expected_data2 = write_pattern([3, 7, 9])
assert len(expected_data2) == 19
pattern3, expected_data3 = write_pattern([11, 20, 5, 5])
assert len(expected_data3) == 41
expected_data = []
expected_data.extend(expected_data1)
expected_data.extend(expected_data2)
expected_data.extend(expected_data3)
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> Create(
[pattern1, pattern2, pattern3]) |'ReadAll' >> ReadAllFromText()
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_auto_bzip2(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file(suffix='.bz2')
with bz2.BZ2File(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(file_name)
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_auto_deflate(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file(suffix='.deflate')
with open(file_name, 'wb') as f:
f.write(zlib.compress('\n'.join(lines).encode('utf-8')))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(file_name)
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_auto_gzip(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file(suffix='.gz')
with gzip.GzipFile(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(file_name)
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_bzip2(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with bz2.BZ2File(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
compression_type=CompressionTypes.BZIP2)
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_corrupted_bzip2_fails(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with bz2.BZ2File(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
with open(file_name, 'wb') as f:
f.write(b'corrupt')
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
compression_type=CompressionTypes.BZIP2)
assert_that(pcoll, equal_to(lines))
with self.assertRaises(Exception):
pipeline.run()
def test_read_bzip2_concat(self):
with TempDir() as tempdir:
bzip2_file_name1 = tempdir.create_temp_file()
lines = ['a', 'b', 'c']
with bz2.BZ2File(bzip2_file_name1, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(data.encode('utf-8'))
bzip2_file_name2 = tempdir.create_temp_file()
lines = ['p', 'q', 'r']
with bz2.BZ2File(bzip2_file_name2, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(data.encode('utf-8'))
bzip2_file_name3 = tempdir.create_temp_file()
lines = ['x', 'y', 'z']
with bz2.BZ2File(bzip2_file_name3, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(data.encode('utf-8'))
final_bzip2_file = tempdir.create_temp_file()
with open(bzip2_file_name1, 'rb') as src, open(
final_bzip2_file, 'wb') as dst:
dst.writelines(src.readlines())
with open(bzip2_file_name2, 'rb') as src, open(
final_bzip2_file, 'ab') as dst:
dst.writelines(src.readlines())
with open(bzip2_file_name3, 'rb') as src, open(
final_bzip2_file, 'ab') as dst:
dst.writelines(src.readlines())
pipeline = TestPipeline()
lines = pipeline | 'ReadFromText' >> beam.io.ReadFromText(
final_bzip2_file,
compression_type=beam.io.filesystem.CompressionTypes.BZIP2)
expected = ['a', 'b', 'c', 'p', 'q', 'r', 'x', 'y', 'z']
assert_that(lines, equal_to(expected))
pipeline.run()
def test_read_deflate(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with open(file_name, 'wb') as f:
f.write(zlib.compress('\n'.join(lines).encode('utf-8')))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
0, CompressionTypes.DEFLATE,
True, coders.StrUtf8Coder())
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_corrupted_deflate_fails(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with open(file_name, 'wb') as f:
f.write(zlib.compress('\n'.join(lines).encode('utf-8')))
with open(file_name, 'wb') as f:
f.write(b'corrupt')
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
0, CompressionTypes.DEFLATE,
True, coders.StrUtf8Coder())
assert_that(pcoll, equal_to(lines))
with self.assertRaises(Exception):
pipeline.run()
def test_read_deflate_concat(self):
with TempDir() as tempdir:
deflate_file_name1 = tempdir.create_temp_file()
lines = ['a', 'b', 'c']
with open(deflate_file_name1, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(zlib.compress(data.encode('utf-8')))
deflate_file_name2 = tempdir.create_temp_file()
lines = ['p', 'q', 'r']
with open(deflate_file_name2, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(zlib.compress(data.encode('utf-8')))
deflate_file_name3 = tempdir.create_temp_file()
lines = ['x', 'y', 'z']
with open(deflate_file_name3, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(zlib.compress(data.encode('utf-8')))
final_deflate_file = tempdir.create_temp_file()
with open(deflate_file_name1, 'rb') as src, \
open(final_deflate_file, 'wb') as dst:
dst.writelines(src.readlines())
with open(deflate_file_name2, 'rb') as src, \
open(final_deflate_file, 'ab') as dst:
dst.writelines(src.readlines())
with open(deflate_file_name3, 'rb') as src, \
open(final_deflate_file, 'ab') as dst:
dst.writelines(src.readlines())
pipeline = TestPipeline()
lines = pipeline | 'ReadFromText' >> beam.io.ReadFromText(
final_deflate_file,
compression_type=beam.io.filesystem.CompressionTypes.DEFLATE)
expected = ['a', 'b', 'c', 'p', 'q', 'r', 'x', 'y', 'z']
assert_that(lines, equal_to(expected))
def test_read_gzip(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with gzip.GzipFile(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
0, CompressionTypes.GZIP,
True, coders.StrUtf8Coder())
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_corrupted_gzip_fails(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with gzip.GzipFile(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
with open(file_name, 'wb') as f:
f.write(b'corrupt')
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
0, CompressionTypes.GZIP,
True, coders.StrUtf8Coder())
assert_that(pcoll, equal_to(lines))
with self.assertRaises(Exception):
pipeline.run()
def test_read_gzip_concat(self):
with TempDir() as tempdir:
gzip_file_name1 = tempdir.create_temp_file()
lines = ['a', 'b', 'c']
with gzip.open(gzip_file_name1, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(data.encode('utf-8'))
gzip_file_name2 = tempdir.create_temp_file()
lines = ['p', 'q', 'r']
with gzip.open(gzip_file_name2, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(data.encode('utf-8'))
gzip_file_name3 = tempdir.create_temp_file()
lines = ['x', 'y', 'z']
with gzip.open(gzip_file_name3, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(data.encode('utf-8'))
final_gzip_file = tempdir.create_temp_file()
with open(gzip_file_name1, 'rb') as src, \
open(final_gzip_file, 'wb') as dst:
dst.writelines(src.readlines())
with open(gzip_file_name2, 'rb') as src, \
open(final_gzip_file, 'ab') as dst:
dst.writelines(src.readlines())
with open(gzip_file_name3, 'rb') as src, \
open(final_gzip_file, 'ab') as dst:
dst.writelines(src.readlines())
pipeline = TestPipeline()
lines = pipeline | 'ReadFromText' >> beam.io.ReadFromText(
final_gzip_file,
compression_type=beam.io.filesystem.CompressionTypes.GZIP)
expected = ['a', 'b', 'c', 'p', 'q', 'r', 'x', 'y', 'z']
assert_that(lines, equal_to(expected))
def test_read_all_gzip(self):
_, lines = write_data(100)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with gzip.GzipFile(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
pipeline = TestPipeline()
pcoll = (pipeline
| Create([file_name])
| 'ReadAll' >> ReadAllFromText(
compression_type=CompressionTypes.GZIP))
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_gzip_large(self):
_, lines = write_data(10000)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with gzip.GzipFile(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
0, CompressionTypes.GZIP,
True, coders.StrUtf8Coder())
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_gzip_large_after_splitting(self):
_, lines = write_data(10000)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with gzip.GzipFile(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
source = TextSource(file_name, 0, CompressionTypes.GZIP, True,
coders.StrUtf8Coder())
splits = list(source.split(desired_bundle_size=1000))
if len(splits) > 1:
raise ValueError('FileBasedSource generated more than one initial '
'split for a compressed file.')
reference_source_info = (source, None, None)
sources_info = ([
(split.source, split.start_position, split.stop_position) for
split in splits])
source_test_utils.assert_sources_equal_reference_source(
reference_source_info, sources_info)
def test_read_gzip_empty_file(self):
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
0, CompressionTypes.GZIP,
True, coders.StrUtf8Coder())
assert_that(pcoll, equal_to([]))
pipeline.run()
def _remove_lines(self, lines, sublist_lengths, num_to_remove):
"""Utility function to remove num_to_remove lines from each sublist.
Args:
lines: list of items.
sublist_lengths: list of integers representing length of sublist
corresponding to each source file.
num_to_remove: number of lines to remove from each sublist.
Returns:
remaining lines.
"""
curr = 0
result = []
for offset in sublist_lengths:
end = curr + offset
start = min(curr + num_to_remove, end)
result += lines[start:end]
curr += offset
return result
def _read_skip_header_lines(self, file_or_pattern, skip_header_lines):
"""Simple wrapper function for instantiating TextSource."""
source = TextSource(
file_or_pattern,
0,
CompressionTypes.UNCOMPRESSED,
True,
coders.StrUtf8Coder(),
skip_header_lines=skip_header_lines)
range_tracker = source.get_range_tracker(None, None)
return list(source.read(range_tracker))
def test_read_skip_header_single(self):
file_name, expected_data = write_data(TextSourceTest.DEFAULT_NUM_RECORDS)
assert len(expected_data) == TextSourceTest.DEFAULT_NUM_RECORDS
skip_header_lines = 1
expected_data = self._remove_lines(expected_data,
[TextSourceTest.DEFAULT_NUM_RECORDS],
skip_header_lines)
read_data = self._read_skip_header_lines(file_name, skip_header_lines)
self.assertEqual(len(expected_data), len(read_data))
self.assertCountEqual(expected_data, read_data)
def test_read_skip_header_pattern(self):
line_counts = [
TextSourceTest.DEFAULT_NUM_RECORDS * 5,
TextSourceTest.DEFAULT_NUM_RECORDS * 3,
TextSourceTest.DEFAULT_NUM_RECORDS * 12,
TextSourceTest.DEFAULT_NUM_RECORDS * 8,
TextSourceTest.DEFAULT_NUM_RECORDS * 8,
TextSourceTest.DEFAULT_NUM_RECORDS * 4
]
skip_header_lines = 2
pattern, data = write_pattern(line_counts)
expected_data = self._remove_lines(data, line_counts, skip_header_lines)
read_data = self._read_skip_header_lines(pattern, skip_header_lines)
self.assertEqual(len(expected_data), len(read_data))
self.assertCountEqual(expected_data, read_data)
def test_read_skip_header_pattern_insufficient_lines(self):
line_counts = [
5, 3, # Fewer lines in file than we want to skip
12, 8, 8, 4
]
skip_header_lines = 4
pattern, data = write_pattern(line_counts)
data = self._remove_lines(data, line_counts, skip_header_lines)
read_data = self._read_skip_header_lines(pattern, skip_header_lines)
self.assertEqual(len(data), len(read_data))
self.assertCountEqual(data, read_data)
def test_read_gzip_with_skip_lines(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with gzip.GzipFile(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name, 0, CompressionTypes.GZIP,
True, coders.StrUtf8Coder(), skip_header_lines=2)
assert_that(pcoll, equal_to(lines[2:]))
pipeline.run()
def test_read_after_splitting_skip_header(self):
file_name, expected_data = write_data(100)
assert len(expected_data) == 100
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder(), skip_header_lines=2)
splits = list(source.split(desired_bundle_size=33))
reference_source_info = (source, None, None)
sources_info = ([
(split.source, split.start_position, split.stop_position) for
split in splits])
self.assertGreater(len(sources_info), 1)
reference_lines = source_test_utils.read_from_source(*reference_source_info)
split_lines = []
for source_info in sources_info:
split_lines.extend(source_test_utils.read_from_source(*source_info))
self.assertEqual(expected_data[2:], reference_lines)
self.assertEqual(reference_lines, split_lines)
class TextSinkTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Method has been renamed in Python 3
if sys.version_info[0] < 3:
cls.assertCountEqual = cls.assertItemsEqual
def setUp(self):
super(TextSinkTest, self).setUp()
self.lines = [b'Line %d' % d for d in range(100)]
self.tempdir = tempfile.mkdtemp()
self.path = self._create_temp_file()
def tearDown(self):
if os.path.exists(self.tempdir):
shutil.rmtree(self.tempdir)
def _create_temp_file(self, name='', suffix=''):
if not name:
name = tempfile.template
file_name = tempfile.NamedTemporaryFile(
delete=False, prefix=name,
dir=self.tempdir, suffix=suffix).name
return file_name
def _write_lines(self, sink, lines):
f = sink.open(self.path)
for line in lines:
sink.write_record(f, line)
sink.close(f)
def test_write_text_file(self):
sink = TextSink(self.path)
self._write_lines(sink, self.lines)
with open(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), self.lines)
def test_write_text_file_empty(self):
sink = TextSink(self.path)
self._write_lines(sink, [])
with open(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), [])
def test_write_bzip2_file(self):
sink = TextSink(
self.path, compression_type=CompressionTypes.BZIP2)
self._write_lines(sink, self.lines)
with bz2.BZ2File(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), self.lines)
def test_write_bzip2_file_auto(self):
self.path = self._create_temp_file(suffix='.bz2')
sink = TextSink(self.path)
self._write_lines(sink, self.lines)
with bz2.BZ2File(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), self.lines)
def test_write_gzip_file(self):
sink = TextSink(
self.path, compression_type=CompressionTypes.GZIP)
self._write_lines(sink, self.lines)
with gzip.GzipFile(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), self.lines)
def test_write_gzip_file_auto(self):
self.path = self._create_temp_file(suffix='.gz')
sink = TextSink(self.path)
self._write_lines(sink, self.lines)
with gzip.GzipFile(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), self.lines)
def test_write_gzip_file_empty(self):
sink = TextSink(
self.path, compression_type=CompressionTypes.GZIP)
self._write_lines(sink, [])
with gzip.GzipFile(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), [])
def test_write_deflate_file(self):
sink = TextSink(
self.path, compression_type=CompressionTypes.DEFLATE)
self._write_lines(sink, self.lines)
with open(self.path, 'rb') as f:
self.assertEqual(zlib.decompress(f.read()).splitlines(), self.lines)
def test_write_deflate_file_auto(self):
self.path = self._create_temp_file(suffix='.deflate')
sink = TextSink(self.path)
self._write_lines(sink, self.lines)
with open(self.path, 'rb') as f:
self.assertEqual(zlib.decompress(f.read()).splitlines(), self.lines)
def test_write_deflate_file_empty(self):
sink = TextSink(
self.path, compression_type=CompressionTypes.DEFLATE)
self._write_lines(sink, [])
with open(self.path, 'rb') as f:
self.assertEqual(zlib.decompress(f.read()).splitlines(), [])
def test_write_text_file_with_header(self):
header = b'header1\nheader2'
sink = TextSink(self.path, header=header)
self._write_lines(sink, self.lines)
with open(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), header.splitlines() + self.lines)
def test_write_text_file_empty_with_header(self):
header = b'header1\nheader2'
sink = TextSink(self.path, header=header)
self._write_lines(sink, [])
with open(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), header.splitlines())
def test_write_dataflow(self):
pipeline = TestPipeline()
pcoll = pipeline | beam.core.Create(self.lines)
pcoll | 'Write' >> WriteToText(self.path) # pylint: disable=expression-not-assigned
pipeline.run()
read_result = []
for file_name in glob.glob(self.path + '*'):
with open(file_name, 'rb') as f:
read_result.extend(f.read().splitlines())
self.assertEqual(read_result, self.lines)
def test_write_dataflow_auto_compression(self):
pipeline = TestPipeline()
pcoll = pipeline | beam.core.Create(self.lines)
pcoll | 'Write' >> WriteToText(self.path, file_name_suffix='.gz') # pylint: disable=expression-not-assigned
pipeline.run()
read_result = []
for file_name in glob.glob(self.path + '*'):
with gzip.GzipFile(file_name, 'rb') as f:
read_result.extend(f.read().splitlines())
self.assertEqual(read_result, self.lines)
def test_write_dataflow_auto_compression_unsharded(self):
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> beam.core.Create(self.lines)
pcoll | 'Write' >> WriteToText( # pylint: disable=expression-not-assigned
self.path + '.gz',
shard_name_template='')
pipeline.run()
read_result = []
for file_name in glob.glob(self.path + '*'):
with gzip.GzipFile(file_name, 'rb') as f:
read_result.extend(f.read().splitlines())
self.assertEqual(read_result, self.lines)
def test_write_dataflow_header(self):
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> beam.core.Create(self.lines)
header_text = 'foo'
pcoll | 'Write' >> WriteToText( # pylint: disable=expression-not-assigned
self.path + '.gz',
shard_name_template='',
header=header_text)
pipeline.run()
read_result = []
for file_name in glob.glob(self.path + '*'):
with gzip.GzipFile(file_name, 'rb') as f:
read_result.extend(f.read().splitlines())
# header_text is automatically encoded in WriteToText
self.assertEqual(read_result, [header_text.encode('utf-8')] + self.lines)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| markflyhigh/incubator-beam | sdks/python/apache_beam/io/textio_test.py | Python | apache-2.0 | 43,198 | 0.006898 |
from django.dispatch import receiver
from pretix.base.signals import register_payment_providers
@receiver(register_payment_providers, dispatch_uid="payment_paypal")
def register_payment_provider(sender, **kwargs):
from .payment import Paypal
return Paypal
| awg24/pretix | src/pretix/plugins/paypal/signals.py | Python | apache-2.0 | 267 | 0 |
from django.db import models
class AdjacencyListModel(models.Model):
title = models.CharField(max_length=100)
parent = models.ForeignKey(
'self', related_name='%(class)s_parent', on_delete=models.CASCADE, db_index=True, null=True, blank=True)
def __str__(self):
return 'adjacencylistmodel_%s' % self.title
class NestedSetModel(models.Model):
title = models.CharField(max_length=100)
lft = models.IntegerField(db_index=True)
rgt = models.IntegerField(db_index=True)
level = models.IntegerField(db_index=True)
def __str__(self):
return 'nestedsetmodel_%s' % self.title
| idlesign/django-admirarchy | admirarchy/tests/testapp/models.py | Python | bsd-3-clause | 633 | 0.00158 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2019-04-10 03:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('netdevice', '0006_auto_20190409_0325'),
]
operations = [
migrations.RenameField(
model_name='vrf',
old_name='vrf_name',
new_name='name',
),
migrations.RenameField(
model_name='vrf',
old_name='vrf_target',
new_name='target',
),
]
| lkmhaqer/gtools-python | netdevice/migrations/0007_auto_20190410_0358.py | Python | mit | 567 | 0 |
from functools import reduce
class ScopedString (object):
def __init__ (self):
self._stack = []
def push (self, frame):
self._stack.append (frame)
def pop (self):
frame = self._stack.pop()
return frame
def __str__ (self):
return '.'.join (self._stack)
class ScopedList (object):
def __init__ (self, stack=None):
if stack:
self._stack = stack
else:
self._stack = []
self.push()
def push (self):
self._stack.append ([])
def pop (self):
if (len (self._stack) <= 1):
raise IndexError ("Attempt to pop global scope")
self._stack.pop()
def append (self, val):
self._stack[-1].append (val)
def _normalize (self):
return reduce (lambda x, y: x + y, self._stack, [])
def __str__ (self):
return str (self._normalize())
def __repr__ (self):
return "ScopedDict(" + repr(self._stack) + ")"
def __iter__ (self):
return self._normalize().__iter__()
class ScopedDict (object):
def __init__ (self, stack=None):
if stack:
self._stack = stack
else:
self._stack = []
self.push ()
def push (self):
self._stack.insert (0, {})
def pop (self):
if (len (self._stack) <= 1):
raise IndexError ("Attempt to pop global scope")
temp = self._stack[0]
del (self._stack[0])
return temp
def _normalize (self):
normal = {}
for frame in self._stack:
for key, value in frame.items():
if key not in normal:
normal[key] = value
return normal
def __getitem__ (self, key):
for frame in self._stack:
if key in frame:
return frame[key]
raise KeyError (key)
def __setitem__ (self, key, value):
self._stack[0][key] = value
def __contains__ (self, key):
for frame in self._stack:
if key in frame:
return True
return False
def __str__ (self):
return str (self._normalize())
def __repr__ (self):
return "ScopedDict(" + repr(self._stack) + ")"
def __iter__ (self):
return self._normalize().__iter__()
def items (self):
return self._normalize().items()
def keys (self):
return self._normalize().keys()
def values (self):
return self._normalize().values()
| doffm/dbuf | src/dbuf/util.py | Python | bsd-3-clause | 3,108 | 0.026705 |
"""A likelihood function representing a Student-t distribution.
Author:
Ilias Bilionis
Date:
1/21/2013
"""
__all__ = ['StudentTLikelihoodFunction']
import numpy as np
import scipy
import math
from . import GaussianLikelihoodFunction
class StudentTLikelihoodFunction(GaussianLikelihoodFunction):
"""An object representing a Student-t likelihood function."""
# The degrees of freedom
_nu = None
@property
def nu(self):
"""Get the degrees of freedom."""
return self._nu
@nu.setter
def nu(self, value):
"""Set the degrees of freedom."""
if not isinstance(value, float):
raise TypeError('nu must be a float.')
self._nu = value
def __init__(self, nu, num_input=None, data=None, mean_function=None, cov=None,
name='Student-t Likelihood Function'):
"""Initialize the object.
Arguments:
nu --- The degrees of freedom of the distribution.
Keyword Arguments
num_input --- The number of inputs. Optional, if
mean_function is a proper Function.
data --- The observed data. A vector. Optional,
if mean_function is a proper Function.
It can be set later.
mean_function --- The mean function. See the super class
for the description.
cov --- The covariance matrix. It can either be
a positive definite matrix, or a number.
The data or a proper mean_funciton is
preassumed.
name --- A name for the likelihood function.
"""
self.nu = nu
super(StudentTLikelihoodFunction, self).__init__(num_input=num_input,
data=data,
mean_function=mean_function,
cov=cov,
name=name)
def __call__(self, x):
"""Evaluate the function at x."""
mu = self.mean_function(x)
y = scipy.linalg.solve_triangular(self.L_cov, self.data - mu)
return (
- 0.5 * (self.nu + self.num_data) * math.log(1. + np.dot(y, y) / self.nu))
| ebilionis/py-best | best/random/_student_t_likelihood_function.py | Python | lgpl-3.0 | 2,586 | 0.001933 |
End of preview. Expand
in Dataset Viewer.
No dataset card yet
New: Create and edit this dataset card directly on the website!
Contribute a Dataset Card- Downloads last month
- 124