content
stringlengths 5
1.05M
|
---|
import numpy as np
import pytest
from structure_factor.hyperuniformity import Hyperuniformity
from structure_factor.point_processes import (
GinibrePointProcess,
HomogeneousPoissonPointProcess,
)
@pytest.mark.parametrize(
"sf, expected",
[
(HomogeneousPoissonPointProcess.structure_factor, False),
(GinibrePointProcess.structure_factor, True),
],
)
def test_effective_hyperuniformity(sf, expected):
# verify that the hyperuniformity index for the ginibre ensemble is less than 1e-3
k = np.linspace(0, 10, 100)
sf_k = sf(k)
hyperuniformity_test = Hyperuniformity(k, sf_k)
index_H, _ = hyperuniformity_test.effective_hyperuniformity(k_norm_stop=4)
result = index_H < 1e-3
assert result == expected
def f(c, alpha, x):
return c * x ** alpha
x_1 = np.linspace(0, 3, 100)
x_2 = np.linspace(0.5, 2, 50)
@pytest.mark.parametrize(
"x, fx, c, alpha",
[
(x_1, f(8, 2, x_1), 8, 2),
(x_2, f(6, 0.5, x_2), 6, 0.5),
],
)
def test_hyperuniformity_class_on_polynomial(x, fx, c, alpha):
test = Hyperuniformity(x, fx)
assert alpha, c == test.hyperuniformity_class()
@pytest.mark.parametrize(
"sf, expected_alpha",
[
(GinibrePointProcess.structure_factor, 2),
],
)
def test_hyperuniformity_class_ginibre(sf, expected_alpha):
# verify that the hyperuniformity index for the ginibre ensemble is less than 1e-3
k = np.linspace(0, 1, 3000)
sf_k = sf(k)
hyperuniformity_test = Hyperuniformity(k, sf_k)
alpha, _ = hyperuniformity_test.hyperuniformity_class(k_norm_stop=0.001)
diff_alpha = alpha - expected_alpha
np.testing.assert_almost_equal(diff_alpha, 0, decimal=3)
|
import asyncio
import os
import re
id_matcher = re.compile("(?<=/)\d+(?=/)")
async def main():
files = os.listdir("links")
for file in files:
with open(f"links/{file}", encoding="utf-8") as link_file:
links = link_file.readlines()
for link in links:
nlink = link.replace("\n", "")
link_id = id_matcher.search(nlink).group(0)
folder = file.replace(".txt", "")
if not os.path.exists(f"images/{folder}"):
os.mkdir(f"images/{folder}")
print(f"Downloading {link_id} to folder {folder}",end="\r")
await ugetter(file.replace(".txt", ""), f"{link_id}.png", nlink)
async def ugetter(folder, filename, url):
cmd = [
"wget",
url,
"-O",
f"images/{folder}/{filename}",
"-q"
]
process = await asyncio.create_subprocess_exec(*cmd)
return await process.wait()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
#!/usr/bin/env python
"""
Entry point for bin/* scripts
"""
__authors__ = "James Bergstra"
__license__ = "3-clause BSD License"
__contact__ = "github.com/hyperopt/hyperopt"
import cPickle
import logging
import os
import base
import utils
logger = logging.getLogger(__name__)
from .base import SerialExperiment
import sys
import logging
logger = logging.getLogger(__name__)
def main_search():
from optparse import OptionParser
parser = OptionParser(
usage="%prog [options] [<bandit> <bandit_algo>]")
parser.add_option('--load',
default='',
dest="load",
metavar='FILE',
help="unpickle experiment from here on startup")
parser.add_option('--save',
default='experiment.pkl',
dest="save",
metavar='FILE',
help="pickle experiment to here on exit")
parser.add_option("--steps",
dest='steps',
default='100',
metavar='N',
help="exit after queuing this many jobs (default: 100)")
parser.add_option("--workdir",
dest="workdir",
default=os.path.expanduser('~/.hyperopt.workdir'),
help="create workdirs here",
metavar="DIR")
parser.add_option("--bandit-argfile",
dest="bandit_argfile",
default=None,
help="path to file containing arguments bandit constructor \
file format: pickle of dictionary containing two keys,\
{'args' : tuple of positional arguments, \
'kwargs' : dictionary of keyword arguments}")
parser.add_option("--bandit-algo-argfile",
dest="bandit_algo_argfile",
default=None,
help="path to file containing arguments for bandit_algo "
"constructor. File format is pickled dictionary containing "
"two keys: 'args', a tuple of positional arguments, and "
"'kwargs', a dictionary of keyword arguments. "
"NOTE: bandit is pre-pended as first element of arg tuple.")
(options, args) = parser.parse_args()
try:
bandit_json, bandit_algo_json = args
except:
parser.print_help()
return -1
try:
if not options.load:
raise IOError()
handle = open(options.load, 'rb')
self = cPickle.load(handle)
handle.close()
except IOError:
bandit = utils.get_obj(bandit_json, argfile=options.bandit_argfile)
bandit_algo = utils.get_obj(bandit_algo_json,
argfile=options.bandit_algo_argfile,
args=(bandit,))
self = SerialExperiment(bandit_algo)
try:
self.run(int(options.steps))
finally:
if options.save:
cPickle.dump(self, open(options.save, 'wb'))
def main(cmd, fn_pos = 1):
"""
Entry point for bin/* scripts
XXX
"""
logging.basicConfig(
stream=sys.stderr,
level=logging.INFO)
try:
runner = dict(
search='main_search',
dryrun='main_dryrun',
plot_history='main_plot_history',
)[cmd]
except KeyError:
logger.error("Command not recognized: %s" % cmd)
# XXX: Usage message
sys.exit(1)
try:
argv1 = sys.argv[fn_pos]
except IndexError:
logger.error('Module name required (XXX: print Usage)')
return 1
fn = datasets.main.load_tokens(sys.argv[fn_pos].split('.') + [runner])
sys.exit(fn(sys.argv[fn_pos+1:]))
if __name__ == '__main__':
cmd = sys.argv[1]
sys.exit(main(cmd, 2))
|
#notecard specific ATTN requests and processing
from command import Commands
CM = Commands
remoteCommandQueue = "commands.qi"
def isEndOfQueueErr(e):
return str.__contains__(e, "{note-noexist}")
def _extractAndEnqueueCommands(body):
for c in body.items():
command = c[0]
args = tuple(c[1]) if isinstance(c[1],list) else (c[1],)
CM.Enqueue(command, args)
def ReadCommands(card):
req = {"req":"note.get","file":remoteCommandQueue,"delete":True}
while True:
rsp = card.Transaction(req)
if "err" in rsp:
if isEndOfQueueErr(rsp["err"]): return
raise Exception(rsp["err"])
if "body" not in rsp:
continue
body = rsp["body"]
_extractAndEnqueueCommands(body)
def Arm(card) -> None:
req = {"req":"card.attn","mode":"rearm"}
card.Transaction(req)
def Disarm(card) -> None:
req = {"req":"card.attn","mode":"disarm"}
card.Transaction(req)
def Initialize(card) -> None:
Disarm(card)
req = {"req":"card.attn","mode":"files","files":[remoteCommandQueue]}
card.Transaction(req)
Arm(card)
def QueryTriggerSource(card) -> dict:
req = {"req":"card.attn"}
return card.Transaction(req)
def ProcessAttnInfo(card, info=None) -> None:
if not info:
info = QueryTriggerSource(card)
if "files" in info:
files = info["files"]
if remoteCommandQueue in files: ReadCommands(card) |
#!/usr/bin/env python
from setuptools import setup, find_packages
setup (
name = "katsdpdisp",
description = "Karoo Array Telescope Online Signal Displays",
author = "MeerKAT SDP team",
author_email = "[email protected]",
packages = find_packages(),
package_data={'': ['html/*']},
include_package_data = True,
scripts = [
"scripts/time_plot.py",
],
zip_safe = False,
python_requires=">=3.5",
install_requires=[
"h5py",
"manhole",
"matplotlib",
"netifaces",
"numpy",
"psutil",
"six",
"spead2>=3.0.0",
"katsdpservices[argparse]",
"katsdptelstate",
"katdal",
"katpoint"],
use_katversion=True
)
|
import numpy
# scipy.special for the sigmoid function expit()
import scipy.special
# neural network class definition
class neuralNetwork:
# initialise the neural network
def __init__(self, inputnodes, hiddennodes, hiddenlayers, outputnodes, learningrate):
# set number of nodes in each input, hidden, output layer
self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = outputnodes
self.hiddenlayers = hiddenlayers
# link weight matrices, wih and who
# weights inside the arrays are w_i_j, where link is from node i to node j in the next layer
# w11 w21
# w12 w22 etc
self.wih = []
self.who = []
for i in range(0, hiddenlayers):
if i == 0:
w = self.inodes
else:
w = self.hnodes
self.wih.append(numpy.random.normal(0.0, pow(self.inodes, -0.5), (self.hnodes, w)))
self.who.append(numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.onodes, w)))
# learning rate
self.lr = learningrate
# activation function is the sigmoid function
self.activation_function = lambda x: scipy.special.expit(x)
pass
# train the neural network
def train(self, inputs_list, targets_list):
# convert inputs list to 2d array
inputs = numpy.array(inputs_list, ndmin=2).T
targets = numpy.array(targets_list, ndmin=2).T
# calculate signals into hidden layer
#hidden_inputs = numpy.dot(self.wih, inputs)
# calculate the signals emerging from hidden layer
#hidden_outputs = self.activation_function(hidden_inputs)
hidden_outputs = self.queryHiddenLayers(inputs)
# calculate signals into final output layer
final_inputs = numpy.dot(self.who[self.hiddenlayers - 1], hidden_outputs)
# calculate the signals emerging from final output layer
final_outputs = self.activation_function(final_inputs)
# output layer error is the (target - actual)
output_errors = targets - final_outputs
# update the weights for the links between the hidden and output layers
for layerIndex in range(self.hiddenlayers - 1, -1, -1):
# hidden layer error is the output_errors, split by weights, recombined at hidden nodes
hidden_errors = numpy.dot(self.who[layerIndex].T, output_errors)
self.who[layerIndex] += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)), numpy.transpose(hidden_outputs))
# update the weights for the links between the input and hidden layers
self.wih[layerIndex] += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)), numpy.transpose(inputs))
pass
# query the neural network
def query(self, inputs_list):
# convert inputs list to 2d array
inputs = numpy.array(inputs_list, ndmin=2).T
# calculate signals into hidden layer
#hidden_inputs = numpy.dot(self.wih, inputs)
# calculate the signals emerging from hidden layer
#hidden_outputs = self.activation_function(hidden_inputs)
hidden_outputs = self.queryHiddenLayers(inputs)
# calculate signals into final output layer
final_inputs = numpy.dot(self.who, hidden_outputs)
# calculate the signals emerging from final output layer
final_outputs = self.activation_function(final_inputs)
return final_outputs
def queryHiddenLayers(self, inputs):
for layerWeights in self.wih:
hidden_inputs = numpy.dot(layerWeights, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
print(str(len(inputs)))
inputs = hidden_inputs
print(str(len(inputs)))
return hidden_outputs
|
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import app, db, models
from app.models import User, Bucket, BucketItem
import unittest
import coverage
import os
import forgery_py as faker
from random import randint
from sqlalchemy.exc import IntegrityError
# Initializing the manager
manager = Manager(app)
# Initialize Flask Migrate
migrate = Migrate(app, db)
# Add the flask migrate
manager.add_command('db', MigrateCommand)
|
__all__ = ('animate', )
from functools import partial
from kivy.clock import Clock
from kivy.animation import AnimationTransition
from asynckivy import sleep_forever
async def animate(target, **kwargs): # noqa:C901
'''
animate
=======
An async version of ``kivy.animation.Animation``.
Usage
-----
.. code-block:: python
import asynckivy as ak
async def some_async_func(widget):
# case #1: start an animation and wait for its completion
await ak.animate(widget, x=100, d=2, s=.2, t='in_cubic')
# case #2: start an animation but not wait for its completion
ak.start(ak.animate(widget, ...))
Difference from kivy.animation.Animation
----------------------------------------
``kivy.animation.Animation`` requires the object you wanna animate to
have an attribute named ``uid`` but ``asynckivy`` does not. When you have
an object like this:
.. code-block:: python
class MyClass: pass
obj = MyClass()
obj.value = 100
you already can animate it by ``asynckivy.animate(obj, value=200)``.
Therefore, ``asynckivy.animate()`` is more broadly applicable than
``kivy.animation.Animation``.
Sequence and Parallel
---------------------
Kivy has two compound animations: ``Sequence`` and ``Parallel``.
You can achieve the same functionality in asynckivy as follows:
.. code-block:: python
def kivy_Sequence(widget):
anim = Animation(x=100) + Animation(x=0)
anim.repeat = True
anim.start(widget)
async def asynckivy_Sequence(widget):
while True:
await ak.animate(widget, x=100)
await ak.animate(widget, x=0)
def kivy_Parallel(widget):
anim = Animation(x=100) & Animation(y=100, d=2)
anim.start(widget)
anim.bind(on_complete=lambda *args: print("completed"))
async def asynckivy_Parallel(widget):
await ak.and_(
ak.animate(widget, x=100),
ak.animate(widget, y=100, d=2),
)
print("completed")
'''
from asyncgui import get_step_coro
duration = kwargs.pop('d', kwargs.pop('duration', 1.))
transition = kwargs.pop('t', kwargs.pop('transition', 'linear'))
step = kwargs.pop('s', kwargs.pop('step', 0))
animated_properties = kwargs
if not duration:
for key, value in animated_properties.items():
setattr(target, key, value)
return
if isinstance(transition, str):
transition = getattr(AnimationTransition, transition)
# get current values
properties = {}
for key, value in animated_properties.items():
original_value = getattr(target, key)
if isinstance(original_value, (tuple, list)):
original_value = original_value[:]
elif isinstance(original_value, dict):
original_value = original_value.copy()
properties[key] = (original_value, value)
try:
ctx = {
'target': target,
'time': 0.,
'duration': duration,
'transition': transition,
'properties': properties,
'step_coro': await get_step_coro(),
}
clock_event = Clock.schedule_interval(partial(_update, ctx), step)
await sleep_forever()
finally:
clock_event.cancel()
def _update(ctx, dt):
time = ctx['time'] + dt
ctx['time'] = time
# calculate progression
progress = min(1., time / ctx['duration'])
t = ctx['transition'](progress)
# apply progression on target
target = ctx['target']
for key, values in ctx['properties'].items():
a, b = values
value = _calculate(a, b, t)
setattr(target, key, value)
# time to stop ?
if progress >= 1.:
ctx['step_coro']()
return False
def _calculate(a, b, t):
if isinstance(a, list) or isinstance(a, tuple):
if isinstance(a, list):
tp = list
else:
tp = tuple
return tp([_calculate(a[x], b[x], t) for x in range(len(a))])
elif isinstance(a, dict):
d = {}
for x in a:
if x not in b:
# User requested to animate only part of the dict.
# Copy the rest
d[x] = a[x]
else:
d[x] = _calculate(a[x], b[x], t)
return d
else:
return (a * (1. - t)) + (b * t)
|
from logging import getLogger
def settings_handler():
"""
This function is started if the bot receives settings command (not used yet)
"""
logger = getLogger()
logger.info("settings_handler started")
|
"""
gfa_reduce.xmatch
================
Cross-matching utilities for the DESI GFA
off-line reduction pipeline.
"""
|
"""Second-Generation p-values and delta-gaps."""
def sgpvalue(*, null_lo, null_hi, est_lo, est_hi, inf_correction: float = 1e-5,
warnings: bool = True):
"""
Second-Generation p-values and delta-gaps.
#TODO: Output is still not pretty-> need to remove numpy type information
Parameters
----------
null_lo : array_like
Lower bounds of the null interval(s). Values may be finite or -Inf or Inf.
Must be of same length as null_hi.
null_hi : array_like
Upper bounds of the null interval(s). Values may be finite or -Inf or Inf.
Must be of same length as null_hi.
est_lo : array_like
Lower bounds of interval estimates. Values may be finite or -Inf or Inf.
Must be of same length as est_hi.
est_hi : array_like
Upper bounds of interval estimates. Values may be finite or -Inf or Inf.
Must be of same length as est_lo.
infcorrection : TYPE, optional
A small number to denote a positive but infinitesimally small SGPV.
Default is 1e-5. SGPVs that are infinitesimally close to 1 are assigned 1-infcorrection.
This option can only be invoked when one of the intervals has infinite length.
warnings : bool, optional
Warnings toggle. Showing the warnings about potentially problematic intervals.
Warnings are on by default.
Raises
------
ValueError
Indicates that some value was outside of the expected range or
outside of the accepted options.
Returns
-------
pdelta : numpy_array
Second-generation p-values.
deltagap : numpy_array
The delta gaps, Reported as None when the corresponding
second-generation p-value is not zero.
Examples
# TODO : add references to original R-code and further comments
--------
# Simple example for three estimated log odds ratios but the same null interval
>>> import numpy as np
>>> from sgpvalue import sgpvalue
>>> lb = (np.log(1.05), np.log(1.3), np.log(0.97))
>>> ub = (np.log(1.8), np.log(1.8), np.log(1.02))
>>> sgpvalue(est_lo = lb, est_hi = ub,
null_lo = np.log(1/1.1), null_hi = np.log(1.1))
sgpv(pdelta=array([0.1220227, 0. , 1. ]),
deltagap=array([None, 1.7527413, None], dtype=object))
>>> sgpvalue(est_lo = np.log(1.3), est_hi = np.inf,
null_lo = np.NINF, null_hi = np.log(1.1))
At least one interval has infinite length
sgpv(pdelta=array([0.]), deltagap=array([0.1670541], dtype=object))
>>> sgpvalue(est_lo = np.log(1.05), est_hi = np.inf,
null_lo = np.NINF, null_hi = np.log(1.1))
At least one interval has infinite length
sgpv(pdelta=array([0.]), deltagap=array([-0.04652], dtype=object))
# Example t-test with simulated data
>>> from scipy.stats import ttest_ind
>>> from scipy.stats import norm
>>> from scipy.stats import t
>>> np.random.seed(1776)
>>> x1 = norm.rvs(size=15, loc=0, scale=2)
>>> x2 = norm.rvs(size=15, loc=3, scale=2)
>>> se = (x1-x2).std()/np.sqrt(15)
>>> ci1 = (x1.mean()-x2.mean()) - se*t.ppf(df=13, q=0.975)
>>> ci2 = (x1.mean()-x2.mean()) + se*t.ppf(df=13 ,q=0.975)
>>> sgpvalue(est_lo = ci1, est_hi = ci2, null_lo = -1, null_hi = 1)
sgpv(pdelta=array([0.]), deltagap=array([0.3000322], dtype=object))
# Simulated two-group dichotomous data for different parameters
>>> from scipy.stats import binom
>>> from statsmodels.stats.proportion import proportions_ztest
>>> np.random.seed(1492)
>>> n = 30
>>> p1, p2 = 0.15, 0.50
>>> x1 = binom.rvs(1, p=p1, size=n).sum()
>>> x2 = binom.rvs(1, p=p2, size=n).sum()
>>> prop1 = x1.sum()/n # Proportion of successes
>>> prop2 = x2.sum()/n
>>> ci1 = (prop1 - prop2) - 1.96*np.sqrt((prop1 *(1-prop1)/n) + (prop2*(1- prop2)/n))
>>> ci2 = (prop1 - prop2) + 1.96*np.sqrt((prop1 *(1-prop1)/n) + (prop2*(1- prop2)/n))
>>> sgpvalue(est_lo=ci1, est_hi=ci2, null_lo=-0.2, null_hi=0.2)
sgpv(pdelta=array([0.2756205]), deltagap=array([None], dtype=object))
#On the log odds ratio scale
>>> a = x1
>>> b = x2
>>> c = 30-x1
>>> d = 30-x2
>>> cior1 = np.log(a*d/(b*c)) - 1.96*np.sqrt(1/a+1/b+1/c+1/d) # Delta-method SE for log odds ratio
>>> cior2 = np.log(a*d/(b*c)) + 1.96*np.sqrt(1/a+1/b+1/c+1/d)
>>> sgpvalue(est_lo=cior1, est_hi=cior2,
null_lo=np.log(1/1.5), null_hi=np.log(1.5))
sgpv(pdelta=array([0.]), deltagap=array([0.65691], dtype=object))
"""
import numpy as np
from termcolor import colored
from collections import namedtuple
# Convert inputs into np.array to emulate R behaviour
null_lo = np.asarray(null_lo, dtype=np.float64)
null_hi = np.asarray(null_hi, dtype=np.float64)
est_lo = np.asarray(est_lo, dtype=np.float64)
est_hi = np.asarray(est_hi, dtype=np.float64)
if null_hi.size != null_lo.size:
raise ValueError('null_lo and null_hi are of different lengths.')
if est_lo.size != est_hi.size:
raise ValueError('est_lo and est_hi are of different lengths.')
if null_lo.size != est_lo.size & null_lo.size > 1:
raise ValueError("'null_lo' and 'null_hi' must only have one argument\
or exactly as many arguments as 'est_hi' and 'est_lo'.")
if null_lo.size == 1:
null_lo = np.repeat(null_lo, est_lo.size)
null_hi = np.repeat(null_hi, est_hi.size)
# Compute Interval Lengths
est_len = np.array(est_hi) - np.array(est_lo)
null_len = np.array(null_hi) - np.array(null_lo)
# Warnings -> to be added once I know how to check for these
# -> might not be 100% correct yet
na_any = (np.any(est_lo is None) or np.any(est_hi is None) or
np.any(null_lo is None) or np.any(null_hi is None))
if (na_any is True) and warnings:
print(colored('At least one input is NA', 'red'))
if (na_any is not True) and np.any(est_len < 0) and np.any(null_len < 0) and warnings:
print('At least one interval length is negative')
if (na_any is not True) and np.any(np.isinf(abs(est_len) + abs(null_len))) and warnings:
print('At least one interval has infinite length')
if (na_any is not True) and (np.any(est_len == 0) or np.any(null_len == 0)) and warnings:
print('At least one interval has zero length')
# SGPV computation
overlap = np.minimum(est_hi, null_hi) - np.maximum(est_lo, null_lo)
overlap = np.maximum(overlap, 0)
bottom = np.minimum(2 * null_len, est_len)
pdelta = np.round(overlap / bottom, 7)
# Zero-length & Infinite-length intervals
np.where((overlap == 0), 0, pdelta)
# Overlap finite & non-zero but bottom = Inf
np.where(overlap != 0 & np.isfinite(overlap) &
np.isinf(bottom), inf_correction, pdelta)
# Interval estimate is a point (overlap=zero) but can be in null or equal null pt
pdelta[(est_len == 0) & (null_len >= 0) & (
est_lo >= null_lo) & (est_hi <= null_hi)] = 1
# Null interval is a point (overlap=zero) but is in interval estimate
pdelta[(est_len > 0) & (null_len == 0) & (
est_lo <= null_lo) & (est_hi >= null_hi)] = 1/2
# One-sided intervals with overlap; overlap == Inf & bottom==Inf
pdelta[np.isinf(overlap) & np.isinf(bottom) & (
(est_hi <= null_hi) | (est_lo >= null_lo))] = 1
pdelta[np.isinf(overlap) & np.isinf(bottom) &
((est_hi > null_hi) | (est_lo < null_lo))] = 1-inf_correction
# ## Interval estimate is entire real line and null interval is NOT entire real line
pdelta[np.isneginf(est_lo) & np.isposinf(est_hi)] = 1/2
# ## Null interval is entire real line
pdelta[np.isneginf(null_lo) & np.isposinf(null_hi)] = None
if np.any(null_lo == np.NINF) & np.any(null_hi == np.inf) and warnings:
print('At least one null interval is entire real line.')
# Return NA for nonsense intervals -> not working correctly yet
pdelta[(est_lo > est_hi) | (null_lo > null_hi)] = None
if (np.any(est_lo > est_hi) or np.any(null_lo > null_hi)) and warnings:
print('Some interval limits likely reversed')
# Calculate delta gap
deltagap = np.repeat(None, len(pdelta))
deltagap[(pdelta is not None) & (pdelta == 0)] = 0
gap = np.maximum(est_lo, null_lo) - np.minimum(null_hi, est_hi)
delta = null_len / 2
# Report unscaled delta gap if null has infinite length
delta[null_len == np.inf] = 1
# Report unscaled delta gap if null has length zero
delta[null_len == 0] = 1
dg = np.round(gap / delta, 7)
deltagap[pdelta is not None and (pdelta == 0)] = dg[
pdelta is not None and (pdelta == 0)]
sgpv = namedtuple('sgpv', 'pdelta, deltagap')
return sgpv(pdelta, deltagap)
|
#!/usr/bin/env python3
# Requires Python 3.6 and above
from os import chdir, makedirs, path
from sys import platform
from math import pi, sin, cos, inf
import webbrowser
import tkinter as tk
from tkinter import ttk, filedialog, messagebox
from ezdxf.r12writer import r12writer
from polylabel import polylabel
# Use Windows high DPI scaling
if platform == 'win32':
try:
from ctypes import OleDLL
OleDLL('shcore').SetProcessDpiAwareness(1)
except (ImportError, AttributeError, OSError):
pass
class MenuBar(tk.Menu):
def __init__(self, root):
super().__init__()
self.option_add("*tearOff", False)
file_menu = tk.Menu(self)
file_menu.add_command(label="Exit", command=root.quit)
help_menu = tk.Menu(self)
help_menu.add_command(label="Support", command=lambda: webbrowser.open(r"https://github.com/Archer4499/Maximum-Inscribed-Circle"))
help_menu.add_command(label="About",
command=lambda: messagebox.showinfo("About", "Reads data files containing polygons and outputs the co-ordinates and diameter "
"(and optionally points of the circle) of maximum inscribed circles to be "
"contained within the digitized polygons.\n\n"
"Read more at https://github.com/Archer4499/Maximum-Inscribed-Circle\n\n"
"This project is licensed under the MIT License."))
self.add_cascade(menu=file_menu, label="File")
self.add_cascade(menu=help_menu, label="Help")
root.config(menu=self)
class NumEntry(ttk.Spinbox):
# A number validated Spinbox
def __init__(self, length, min_val, max_val, *args, **kwargs):
super().__init__(*args, **kwargs)
self.length = length
self.min_val = min_val
self.max_val = max_val
self.default_val = self.get()
self.configure(from_=self.min_val, to=self.max_val, width=self.length + 1, validate="all",
validatecommand=(self.register(self.on_validate), "%P", "%d", "%V"))
def on_validate(self, new_value, action_type, validate_type):
if validate_type == "key":
# Don't validate if action is delete
if action_type != "0" and new_value.strip() != "":
try:
value = int(new_value)
except ValueError:
self.bell()
return False
elif validate_type == "focusout":
try:
value = int(new_value)
if value < self.min_val:
self.bell()
self.set(self.min_val)
return False
if value > self.max_val:
self.bell()
self.set(self.max_val)
return False
except ValueError:
self.bell()
self.set(self.default_val)
return False
return True
class Gui(tk.Tk):
def __init__(self):
super().__init__()
self.polygons = []
self.numPolygons = tk.IntVar()
self.numPolygons.set(0)
self.circles = []
# Settings
self.outputDXF = tk.IntVar()
self.outputDXF.set(1)
self.outputDXFCircle = tk.IntVar()
self.outputDXFCircle.set(0)
self.outputDXFDiameter = tk.IntVar()
self.outputDXFDiameter.set(1)
self.outputDXFLabel = tk.IntVar()
self.outputDXFLabel.set(0)
self.outputDXFPoints = tk.IntVar()
self.outputDXFPoints.set(0)
self.outputDXFPolyLines = tk.IntVar()
self.outputDXFPolyLines.set(1)
self.outputCircles = tk.IntVar()
self.outputCircles.set(0)
self.outputPoints = tk.IntVar()
self.outputPoints.set(1)
self.outputPointsNum = tk.StringVar()
self.outputPointsNum.set("16")
self.outputFolder = tk.StringVar()
self.outputFolder.set("./")
self.title("Maximum Inscribed Circle")
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
MenuBar(self)
mainframe = ttk.Frame(self)
mainframe.grid(column=0, row=0, sticky="NESW")
# Clear focus from text boxes on click
mainframe.bind("<1>", lambda event: mainframe.focus_set())
# TODO(Derek): Not sure how to set correct minsizes
# Uses 3 columns
self.initLoad(mainframe, 1)
mainframe.columnconfigure(1, weight=1)
mainframe.columnconfigure(2, weight=0)
mainframe.columnconfigure(3, weight=1)
ttk.Separator(mainframe, orient="vertical")\
.grid(column=4, row=0, rowspan=30, padx=5, pady=0, sticky="NS")
mainframe.rowconfigure(29, weight=1)
# Uses 2 columns
self.initSave(mainframe, 5)
mainframe.columnconfigure(5, weight=0, minsize=15)
mainframe.columnconfigure(6, weight=2)
def initLoad(self, parentFrame, column):
self.loadButton = ttk.Button(parentFrame, text="Open csv file/s", command=self.load)
self.loadButton.grid(column=column, row=0, padx=5, pady=5)
self.loadButton.focus_set()
ttk.Label(parentFrame, text="Number of polygons found:")\
.grid(column=column+1, row=0, sticky="E", padx=(5, 0), pady=0)
ttk.Label(parentFrame, textvariable=self.numPolygons)\
.grid(column=column+2, row=0, sticky="W", padx=(0, 5), pady=0)
ttk.Label(parentFrame, text="Preview of polygons and output circles:", anchor="center")\
.grid(column=column, columnspan=3, row=2, sticky="EW", padx=5, pady=0)
self.canvas = tk.Canvas(parentFrame, background="white")
self.canvas.grid(column=column, columnspan=3, row=3, rowspan=27, sticky="NESW", padx=(10, 5), pady=(0, 10))
self.canvas.bind("<Configure>", self.drawShapes)
def initSave(self, parentFrame, column):
ttk.Checkbutton(parentFrame, text="Output to DXF", variable=self.outputDXF, command=self.disableDXF)\
.grid(column=column, row=0, columnspan=2, sticky="W", padx=5, pady=(5, 0))
self.dxfCheckButtons = []
self.dxfCheckButtons.append(ttk.Checkbutton(parentFrame, text="Output Circle in DXF", variable=self.outputDXFCircle))
self.dxfCheckButtons.append(ttk.Checkbutton(parentFrame, text="Output Diameter Line in DXF", variable=self.outputDXFDiameter))
self.dxfCheckButtons.append(ttk.Checkbutton(parentFrame, text="Output Diameter Label in DXF", variable=self.outputDXFLabel))
self.dxfCheckButtons.append(ttk.Checkbutton(parentFrame, text="Output Points in DXF", variable=self.outputDXFPoints, command=self.disablePointsNum))
self.dxfCheckButtons.append(ttk.Checkbutton(parentFrame, text="Output PolyLine in DXF", variable=self.outputDXFPolyLines, command=self.disablePointsNum))
for i, button in enumerate(self.dxfCheckButtons):
button.grid(column=column+1, row=i+1, sticky="W", padx=5, pady=0)
ttk.Checkbutton(parentFrame, text="Output to Circles csv", variable=self.outputCircles)\
.grid(column=column, row=6, columnspan=2, sticky="W", padx=5, pady=5)
ttk.Checkbutton(parentFrame, text="Output to Points csv", variable=self.outputPoints, command=self.disablePointsNum)\
.grid(column=column, row=7, columnspan=2, sticky="W", padx=5, pady=5)
ttk.Label(parentFrame, text="Number of points on circle:")\
.grid(column=column, row=8, columnspan=2, sticky="W", padx=5, pady=(5, 0))
self.pointsNumCheckButton = NumEntry(4, 3, 9999, parentFrame, textvariable=self.outputPointsNum)
self.pointsNumCheckButton.grid(column=column, row=9, columnspan=2, sticky="W", padx=5, pady=0)
ttk.Label(parentFrame, text="Output Folder:")\
.grid(column=column, row=10, columnspan=2, sticky="W", padx=5, pady=(5, 0))
ttk.Entry(parentFrame, textvariable=self.outputFolder)\
.grid(column=column, row=11, columnspan=2, sticky="EW", padx=5, pady=0)
self.browseButton = ttk.Button(parentFrame, text="Browse", command=self.browse)
self.browseButton.grid(column=column, row=14, columnspan=2, padx=5, pady=(5, 0))
self.saveButton = ttk.Button(parentFrame, text="Save", command=self.save)
self.saveButton.grid(column=column, row=15, columnspan=2, padx=5, pady=(0, 5))
self.saveButton.state(["disabled"])
def disableDXF(self):
# Bound to dxf CheckButton
if self.outputDXF.get():
for button in self.dxfCheckButtons:
button.state(["!disabled"])
else:
for button in self.dxfCheckButtons:
button.state(["disabled"])
self.disablePointsNum()
def disablePointsNum(self):
# Bound to CheckButtons related to pointsNumCheckButton
if self.outputPoints.get() or self.outputDXF.get() and (self.outputDXFPoints.get() or self.outputDXFPolyLines.get()):
self.pointsNumCheckButton.state(["!disabled"])
else:
self.pointsNumCheckButton.state(["disabled"])
def load(self):
# Bound to loadButton
fileNames = filedialog.askopenfilenames(filetypes=[("All Data Files", ".csv .str .txt .arch_d"),
("CSV", ".csv"),
("STR", ".str"),
("Text", ".txt"),
("Vulcan Data", ".arch_d")])
if not fileNames:
return
polygons = []
for fileName in fileNames:
polygons.extend(parseData(fileName))
if not polygons:
return
circles = []
for polygon in polygons:
# TODO(Derek): polylabel sometimes infinite loops if bad data is given
# contained multiple polygons in one, with 0, 0 in between.
# circle is formatted as [[x,y,z],radius]
circle = list(polylabel(polygon[0], precision=0.001, with_distance=True))
if not circle[1]:
prettyPolygon = [[polygon[0][i][0], polygon[0][i][1], polygon[1][i]] for i in range(len(polygon[0]))]
messagebox.showerror(title="Error", message=f"Could not create circle from polygon:\n{prettyPolygon}")
return
circle[0].append(sum(polygon[1])/len(polygon[1]))
circles.append(circle)
self.polygons = polygons
self.circles = circles
self.numPolygons.set(len(polygons))
self.saveButton.state(["!disabled"])
self.drawShapes()
def drawShapes(self, _=None):
# Bound to self.canvas resize event
# _ argument to allow being used as resize callback
if self.polygons:
# Clear the canvas before drawing new shapes
self.canvas.delete("all")
colours = ["#e6194B", "#3cb44b", "#ffe119", "#4363d8", "#f58231",
"#42d4f4", "#f032e6", "#fabebe", "#469990", "#e6beff",
"#9A6324", "#fffac8", "#800000", "#aaffc3", "#000075",
"#a9a9a9", "#000000"]
xMin = inf
xMax = 0
yMin = inf
yMax = 0
# Polygon max and mins
for polygon in self.polygons:
for point in polygon[0]:
if point[0] < xMin:
xMin = point[0]
if point[0] > xMax:
xMax = point[0]
if point[1] < yMin:
yMin = point[1]
if point[1] > yMax:
yMax = point[1]
canvasWidth = self.canvas.winfo_width()
canvasHeight = self.canvas.winfo_height()
# Flip y-axis because origin of canvas is top left
xCanvasMin = 10
xCanvasMax = canvasWidth - 10
yCanvasMin = canvasHeight - 10
yCanvasMax = 10
xScale = (xCanvasMax-xCanvasMin)/(xMax-xMin)
yScale = (yCanvasMin-yCanvasMax)/(yMax-yMin)
if xScale < yScale:
scale = xScale
# Centre vertically
yCanvasMin -= (canvasHeight - scale*(yMax-yMin)) / 2.0
else:
scale = yScale
# Centre horizontally
xCanvasMin += (canvasWidth - scale*(xMax-xMin)) / 2.0
for i, polygon in enumerate(self.polygons):
scaledPoints = []
for point in polygon[0]:
scaledPoints.append((point[0]-xMin)*scale + xCanvasMin)
scaledPoints.append((point[1]-yMin)*-scale + yCanvasMin)
self.canvas.create_polygon(scaledPoints, fill="", outline=colours[i%len(colours)], width=1)
for i, circle in enumerate(self.circles):
radius = circle[1]
x = (circle[0][0]-xMin)*scale + xCanvasMin
y = (circle[0][1]-yMin)*-scale + yCanvasMin
x1 = (circle[0][0]-radius-xMin)*scale + xCanvasMin
x2 = (circle[0][0]+radius-xMin)*scale + xCanvasMin
y1 = (circle[0][1]-radius-yMin)*-scale + yCanvasMin
y2 = (circle[0][1]+radius-yMin)*-scale + yCanvasMin
self.canvas.create_oval(x, y, x, y, outline=colours[i%len(colours)])
self.canvas.create_oval(x1, y1, x2, y2, outline=colours[i%len(colours)])
def browse(self):
# Bound to browse_button
directory = filedialog.askdirectory(mustexist=True)
if not directory:
return
try:
chdir(directory)
except OSError as e:
messagebox.showerror(title="Error", message=repr(e))
return
self.outputFolder.set(directory)
def save(self):
# Bound to saveButton
dxfFileName = "circles.dxf"
circlesFileName = "circles.csv"
pointsFileName = "points.csv"
if not self.outputFolder.get():
messagebox.showerror(title="Error", message="Output Folder not set.")
return
try:
if self.outputFolder.get()[-1] != "/":
makedirs(self.outputFolder.get(), exist_ok=True)
self.outputFolder.set(self.outputFolder.get()+"/")
else:
makedirs(self.outputFolder.get()[:-1], exist_ok=True)
except OSError:
messagebox.showerror(title="Error", message=f"Output Folder: {self.outputFolder.get()} is not able to be created.")
return
if self.outputPoints.get() or self.outputDXFPoints.get() or self.outputDXFPolyLines.get():
if int(self.outputPointsNum.get()) < 3:
messagebox.showerror(title="Error", message="Number of points on circle should be greater than 2.")
return
if self.outputDXF.get():
if self.outputDXFCircle.get() or self.outputDXFDiameter.get() or self.outputDXFLabel.get() or self.outputDXFPoints.get() or self.outputDXFPolyLines.get():
self.saveDXF(self.outputFolder.get()+dxfFileName)
else:
messagebox.showerror(title="Error", message="Output to DXF is selected, at least one of the sub options needs to also be selected.")
return
if self.outputCircles.get():
self.saveCircles(self.outputFolder.get()+circlesFileName)
if self.outputPoints.get():
self.savePoints(self.outputFolder.get()+pointsFileName)
messagebox.showinfo(title="Success", message="Saved File/s")
def saveDXF(self, outFileNameDXF):
try:
with r12writer(outFileNameDXF) as dxf:
for i, circle in enumerate(self.circles):
pointsNum = int(self.outputPointsNum.get())
centre = circle[0]
radius = circle[1]
x = centre[0]
x1 = x + radius
x2 = x - radius
y = centre[1]
z = centre[2]
arc = 2 * pi / pointsNum
# Draw the circle
if self.outputDXFCircle.get():
dxf.add_circle(centre, radius=radius, layer="Circle"+str(i))
# Draw the diameter line
if self.outputDXFDiameter.get():
dxf.add_line((x1, y, z), (x2, y, z), layer="Circle"+str(i))
# Draw the diameter label
if self.outputDXFLabel.get():
diameter = radius * 2.0 # polylabel gives the radius of the circle, we want the diameter
lineCentre = [(x2-x1)/2.0 + x1, y + 0.2, z] # Centre of the line with a slight offset
dxf.add_text(f"{diameter:.2f}", lineCentre, align="CENTER", layer="Circle"+str(i))
# Draw the points approximating circle
if self.outputDXFPoints.get():
# For each circle calculate outputPointsNum number of points around it
for j in range(pointsNum):
angle = arc * j
currX = x + radius*cos(angle)
currY = y + radius*sin(angle)
dxf.add_point((currX, currY, z), layer="Circle"+str(i))
# Draw the polylines approximating circle
if self.outputDXFPolyLines.get():
# For each circle calculate outputPointsNum number of points around it
points = [(x+radius*cos(arc*j), y+radius*sin(arc*j), z) for j in range(pointsNum)]
points.append(points[0])
dxf.add_polyline(points, layer="Circle"+str(i))
except OSError:
messagebox.showerror(title="Error", message=f"Could not write to output file: {outFileNameDXF}")
return 1
return 0
def saveCircles(self, outFileNameCircles):
try:
with open(outFileNameCircles, "w") as f:
for circle in self.circles:
diameter = circle[1] * 2.0 # polylabel gives the radius of the circle, we want to print the diameter
# Output to 2 decimal places
output = f"{circle[0][0]:.2f},{circle[0][1]:.2f},{circle[0][2]:.2f},{diameter:.2f}\n"
f.write(output)
except OSError:
messagebox.showerror(title="Error", message=f"Could not write to output file: {outFileNameCircles}")
return 1
return 0
def savePoints(self, outFileNamePoints):
pointsNum = int(self.outputPointsNum.get())
try:
with open(outFileNamePoints, "w") as f:
for circle in self.circles:
# For each circle calculate outputPointsNum number of points around it
arc = 2 * pi / pointsNum
for i in range(pointsNum):
angle = arc * i
x = circle[0][0] + circle[1]*cos(angle)
y = circle[0][1] + circle[1]*sin(angle)
# Output to 2 decimal places
output = f"{x:.2f},{y:.2f},{circle[0][2]:.2f}\n"
f.write(output)
f.write("\n")
except OSError:
messagebox.showerror(title="Error", message=f"Could not write to output file: {outFileNamePoints}")
return 1
return 0
class AskColumns(tk.Toplevel):
def __init__(self, fileName):
self.parent = tk._default_root # pylint: disable=W0212
super().__init__(self.parent)
self.result = None
self.separatorList = {"Comma":",", "Whitespace":" ", "Colon":":", "Semicolon":";", "Equals Sign":"="}
self.currSeparator = self.separatorList["Comma"]
self.fileName = fileName
self.csvLines = []
self.maxWidth = 0
self.loadLines()
self.withdraw() # remain invisible for now
# If the master is not viewable, don't
# make the child transient, or else it
# would be opened withdrawn
if self.parent.winfo_viewable():
self.transient(self.parent)
self.title("Select Columns")
# Layout
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.mainframe = ttk.Frame(self)
self.mainframe.bind("<1>", lambda event: self.mainframe.focus_set())
self.mainframe.grid(column=0, row=0, sticky="NESW")
self.mainframe.columnconfigure(0, weight=1)
self.mainframe.rowconfigure(1, weight=1)
descLabel = ttk.Label(self.mainframe, text=f"Select the columns that contain the X,Y,Z co-ordinates of the polygons.\n"
"If the polygons aren't separated by non-numerical lines, a column needs to be chosen to use as an ID string. "
"The ID needs to be the same for each point in a polygon and different or not continuous between polygons.\n"
"Use the selection box at the bottom to change the delimiter if the file isn't comma delimited.",
anchor="w", justify="left", wraplength=500)
descLabel.grid(column=0, row=0, padx=10, pady=10, sticky="EW")
descLabel.bind('<Configure>', lambda e: descLabel.config(wraplength=descLabel.winfo_width()))
# NOTE(Derek): possibly add tooltip for showing full path? (https://stackoverflow.com/questions/20399243/display-message-when-hovering-over-something-with-mouse-cursor-in-python)
ttk.Label(self.mainframe, text=f"File: {path.basename(self.fileName)}",
anchor="e", justify="right", wraplength=300)\
.grid(column=1, row=0, padx=10, pady=10, sticky="ESW")
self.data(self.mainframe)
self.buttonbox(self.mainframe)
##
self.protocol("WM_DELETE_WINDOW", self.cancel)
# become visible now
self.deiconify()
# wait for window to appear on screen before calling grab_set
self.wait_visibility()
self.grab_set()
self.focus_force()
self.wait_window(self)
def destroy(self):
tk.Toplevel.destroy(self)
def yview(self, *args):
for canvas in self.dataCanvases:
canvas.yview(*args)
def _bind_mouse(self, _=None):
self.dataFrame.bind_all("<4>", self._on_mousewheel)
self.dataFrame.bind_all("<5>", self._on_mousewheel)
self.dataFrame.bind_all("<MouseWheel>", self._on_mousewheel)
def _unbind_mouse(self, _=None):
self.dataFrame.unbind_all("<4>")
self.dataFrame.unbind_all("<5>")
self.dataFrame.unbind_all("<MouseWheel>")
def _on_mousewheel(self, event):
# Linux uses event.num; Windows / Mac uses event.delta
if event.num == 4 or event.delta > 0:
for canvas in self.dataCanvases:
canvas.yview_scroll(-1, "units")
elif event.num == 5 or event.delta < 0:
for canvas in self.dataCanvases:
canvas.yview_scroll(1, "units")
def data(self, master):
def onFrameConfigure(canvas):
# Reset the scroll region to encompass the inner frame
x1, y1, x2, y2 = canvas.bbox("all")
canvas.configure(scrollregion=(x1, y1, x2, y2))
canvas.configure(width=x2-x1)
self.dataFrame = ttk.Frame(master, relief="sunken", borderwidth=4)
self.dataFrame.grid(column=0, columnspan=2, row=1, padx=10, pady=0, sticky="NESW")
# self.dataFrame.grid(column=0, row=1, padx=10, pady=0, sticky="NESW")
self.dataFrame.rowconfigure(1, weight=1)
self.dataFrame.bind("<Enter>", self._bind_mouse)
self.dataFrame.bind("<Leave>", self._unbind_mouse)
scroll = tk.Scrollbar(self.dataFrame, orient="vertical", command=self.yview)
scroll.grid(column=self.maxWidth, row=1, sticky="NS")
options = ["Ignore", "X", "Y", "Z", "ID"]
self.selectionBoxes = []
self.dataCanvases = []
for column in range(self.maxWidth):
self.dataFrame.columnconfigure(column, weight=1)
# Header
header = ttk.Frame(self.dataFrame, relief="groove", borderwidth=1)
header.grid(column=column, row=0, sticky="EW")
header.columnconfigure(0, weight=1)
ttk.Label(header, text=column, anchor="center")\
.grid(column=0, row=0, sticky="EW")
selection = ttk.Combobox(header, values=options, width=5, state="readonly")
selection.grid(column=0, row=1, sticky="EW")
selection.bind("<<ComboboxSelected>>", self.selected)
selection.current(0)
self.selectionBoxes.append(selection)
# Show a preview of some of the data file
canvas = tk.Canvas(self.dataFrame, borderwidth=0, highlightthickness=0)
canvas.grid(column=column, row=1, sticky="NSW")
canvasFrame = ttk.Frame(canvas, borderwidth=0)
canvasFrame.grid(column=0, row=0, sticky="NESW")
canvas.create_window((0, 0), window=canvasFrame, anchor="nw")
canvas.configure(yscrollcommand=scroll.set)
canvasFrame.bind("<Configure>", lambda event, canvas=canvas: onFrameConfigure(canvas))
self.dataCanvases.append(canvas)
for row, line in enumerate(self.csvLines):
if column < len(line):
token = line[column]
ttk.Label(canvasFrame, text=token.strip(), relief="sunken", borderwidth=1)\
.grid(column=0, row=row, padx=1, sticky="EW")
else:
ttk.Label(canvasFrame, text="", borderwidth=1)\
.grid(column=0, row=row, padx=1, sticky="EW")
def buttonbox(self, master):
box = ttk.Frame(master)
box.grid(column=0, columnspan=2, row=2, padx=10, pady=10, sticky="EW")
# NOTE(Derek): possibly allow entering characters
ttk.Label(box, text="Delimiter:")\
.grid(column=0, row=0, padx=(5, 0), pady=5, sticky="E")
self.separatorSelect = ttk.Combobox(box, values=list(self.separatorList), width=9, state="readonly")
self.separatorSelect.grid(column=1, row=0, padx=(0, 5), pady=5, sticky="W")
self.separatorSelect.bind("<<ComboboxSelected>>", self.separatorSet)
self.separatorSelect.current(0)
self.okButton = ttk.Button(box, text="OK", width=10, command=self.ok, default=tk.ACTIVE)
self.okButton.grid(column=2, row=0, padx=5, pady=5, sticky="E")
ttk.Button(box, text="Cancel", width=10, command=self.cancel)\
.grid(column=3, row=0, padx=5, pady=5, sticky="E")
box.columnconfigure(1, weight=1)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
def selected(self, event):
current = event.widget.current()
# Check for other selections having the same value if not "Ignore"
# and reset any to "Ignore".
if current > 0:
for selection in self.selectionBoxes:
# Only check other widgets
if selection is not event.widget:
if selection.current() == current:
selection.current(0)
def separatorSet(self, event):
newSeparator = self.separatorList[event.widget.get()]
if newSeparator != self.currSeparator:
self.currSeparator = newSeparator
self.loadLines()
self.dataFrame.grid_forget()
self.dataFrame.destroy()
self.data(self.mainframe)
def loadLines(self):
self.csvLines = []
self.maxWidth = 0
try:
with open(self.fileName, "r") as f:
for i, line in enumerate(f):
self.csvLines.append(smartSplit(line.strip(), self.currSeparator))
self.maxWidth = max(self.maxWidth, len(self.csvLines[i]))
# Only take at most 100 lines
if i > 100:
break
except OSError:
messagebox.showerror(title="Error", message=f"Could not open input file:\n{self.fileName}")
return
def getSelections(self):
selections = [-1, -1, -1, -1]
for i, selection in enumerate(self.selectionBoxes):
current = selection.current()
if current > 0:
selections[current-1] = i
return selections
def ok(self, _=None):
# _ to allow event binding
# Make sure X,Y,Z are all selected (>0)
selections = self.getSelections()
if min(selections[:3]) < 0:
self.bell()
# Flash effect
self.after(70, self.mainframe.focus_set)
self.after(140, self.okButton.focus_set)
self.after(210, self.mainframe.focus_set)
self.after(280, self.okButton.focus_set)
self.after(350, self.mainframe.focus_set)
return
self.withdraw()
self.update_idletasks()
self.result = self.getSelections()
self.cancel()
def cancel(self, _=None):
# _ to allow event binding
self.parent.focus_set()
self.destroy()
class AskAuto(tk.Toplevel):
def __init__(self, baseFileName):
self.parent = tk._default_root # pylint: disable=W0212
super().__init__(self.parent)
self.result = None
self.baseFileName = baseFileName
self.withdraw() # remain invisible for now
# If the master is not viewable, don't
# make the child transient, or else it
# would be opened withdrawn
if self.parent.winfo_viewable():
self.transient(self.parent)
self.title("Process File")
# Layout
self.resizable(False, False)
self.geometry("+%d+%d" % (self.parent.winfo_rootx()+50,
self.parent.winfo_rooty()+50))
self.mainframe = ttk.Frame(self)
self.mainframe.bind("<1>", lambda event: self.mainframe.focus_set())
self.mainframe.grid(column=0, row=0, sticky="NESW")
self.mainframe.columnconfigure(0, weight=1)
self.body(self.mainframe)
self.buttonbox(self.mainframe)
##
self.protocol("WM_DELETE_WINDOW", self.skip)
# become visible now
self.deiconify()
# wait for window to appear on screen before calling grab_set
self.wait_visibility()
self.grab_set()
self.focus_force()
self.wait_window(self)
def body(self, master):
bodyFrame = ttk.Frame(master)
bodyFrame.grid(column=0, columnspan=4, row=0, padx=20, pady=20, sticky="NESW")
bodyFrame.columnconfigure(3, weight=1)
ttk.Label(bodyFrame, wraplength=400, text=f"{self.baseFileName}", font="-weight bold")\
.grid(column=0, columnspan=4, row=0, sticky="NESW")
ttk.Label(bodyFrame, wraplength=400, text="Is not in a recognised format.\nAttempt to parse automatically or manually specify columns?")\
.grid(column=0, columnspan=4, row=1, sticky="NESW")
ttk.Label(bodyFrame, wraplength=400, text="If you would like this format to be automatically processed, please report an issue to the")\
.grid(column=0, columnspan=4, row=2, sticky="NESW")
linkLabel = ttk.Label(bodyFrame, foreground="#0645AD", font="-underline 1", anchor="w",
text=r"GitHub")
linkLabel.grid(column=0, row=3, sticky="NW")
linkLabel.bind("<Button-1>", lambda event: webbrowser.open(r"https://github.com/Archer4499/Maximum-Inscribed-Circle"))
ttk.Label(bodyFrame, anchor="w", text="page or email")\
.grid(column=1, row=3, sticky="NW")
emailLabel = ttk.Label(bodyFrame, foreground="#0645AD", font="-underline 1", anchor="w",
text=r"[email protected]")
emailLabel.grid(column=2, row=3, sticky="NW")
emailLabel.bind("<Button-1>", lambda event: webbrowser.open(r"mailto:[email protected]&subject=Add%20support%20for%20new%20file%20format"))
def buttonbox(self, master):
autoButton = ttk.Button(master, text="Auto", command=self.auto, default=tk.ACTIVE)
autoButton.grid(column=1, row=1, padx=0, pady=10, sticky="E")
autoButton.focus_set()
ttk.Button(master, text="Manual", command=self.manual)\
.grid(column=2, row=1, padx=5, pady=10, sticky="E")
ttk.Button(master, text="Skip", command=self.skip)\
.grid(column=3, row=1, padx=10, pady=10, sticky="E")
self.bind("<Return>", self.auto)
self.bind("<Escape>", self.skip)
def auto(self, _=None):
self.result = True
self.skip()
def manual(self, _=None):
self.result = False
self.skip()
def skip(self, _=None):
self.parent.focus_set()
self.destroy()
def smartSplit(line, separator):
# Using line.split() if whitespace used as separator allows
# counting multiple sequential separators as one
if separator.isspace():
tokens = line.split()
else:
tokens = line.split(separator)
return tokens
def parseWithoutID(fileName, columns, separator):
# Parse columns given in columns[] without ID
polygons = []
points = []
elevations = []
try:
with open(fileName, "r") as f:
for line in f:
tokens = smartSplit(line, separator)
# Make sure the line has at least as many tokens as required, otherwise treat it as empty
if len(tokens) >= max(columns):
try:
x = float(tokens[columns[0]])
y = float(tokens[columns[1]])
z = float(tokens[columns[2]])
points.append([x, y])
elevations.append(z)
continue # for line in f
except ValueError:
pass
# If either empty line or floats can't be found in specified columns treat as end of polygon
if points:
if len(points) < 3:
messagebox.showerror(title="Error", message=f"Not enough points in number {len(polygons)} polygon in file: {fileName}")
return []
polygons.append([points, elevations])
points = []
elevations = []
if points:
if len(points) < 3:
messagebox.showerror(title="Error", message=f"Not enough points in number {len(polygons)} polygon in file: {fileName}")
return []
polygons.append([points, elevations])
except OSError:
messagebox.showerror(title="Error", message=f"Could not open input file: {fileName}")
return []
return polygons
def parseWithID(fileName, columns, separator):
# Parse columns given in columns[] with ID
polygons = []
points = []
elevations = []
try:
with open(fileName, "r") as f:
currID = ""
for line in f:
tokens = smartSplit(line, separator)
# Make sure the line has at least as many tokens as required, otherwise treat it as empty
if len(tokens) > max(columns):
try:
newID = tokens[columns[3]]
# If ID is different we are in a new object
if newID != currID:
if len(points) >= 3:
polygons.append([points, elevations])
points = []
elevations = []
currID = newID
x = float(tokens[columns[0]])
y = float(tokens[columns[1]])
z = float(tokens[columns[2]])
points.append([x, y])
elevations.append(z)
continue # for line in f
except ValueError:
pass
# If either empty line or floats can't be found in specified columns treat as end of polygon
if len(points) >= 3:
polygons.append([points, elevations])
points = []
elevations = []
if len(points) >= 3:
polygons.append([points, elevations])
except OSError:
messagebox.showerror(title="Error", message=f"Could not open input file: {fileName}")
return []
return polygons
def parseUnknown(fileName):
# Attempt to parse unknown format
separators = [",", " ", ";"]
polygons = []
try:
with open(fileName, "r") as f:
for separator in separators:
f.seek(0) # Go back to start of file for each separator
points = []
elevations = []
for line in f:
tokens = smartSplit(line, separator)
# Searches the line for a group of 3 consecutive numbers
for i in range(len(tokens)-2):
try:
x = float(tokens[i])
y = float(tokens[i+1])
z = float(tokens[i+2])
points.append([x, y])
elevations.append(z)
break
except ValueError:
pass
else:
# If line is either too short or doesn't contain 3 floats,
# then it counts as an empty line and we move onto the next polygon
if len(points) >= 3:
polygons.append([points, elevations])
points = []
elevations = []
if len(points) >= 3:
polygons.append([points, elevations])
if polygons:
# If we found polygons in file finish processing, else try again with a different separator
break
except OSError:
messagebox.showerror(title="Error", message=f"Could not open input file: {fileName}")
return []
return polygons
def parseData(fileName):
# Parses data from the file fileName in the CSV format GEM4D outputs
# And attempts to parse similar CSV files, main requirements are:
# At least one line, without 3 consecutive numbers, separating each polygon
# Comma separated values
# 3 consecutive numbers on each polygon line interpreted as x,y,z
try:
with open(fileName, "r") as f:
firstLine = f.readline()
if not firstLine:
messagebox.showerror(title="Error", message=f"File: {fileName} is empty")
return []
# Reset position in file to beginning after reading first line
except OSError:
messagebox.showerror(title="Error", message=f"Could not open input file: {fileName}")
return []
polygons = []
columns = []
separator = ","
firstToken = smartSplit(firstLine, separator)[0]
baseFileName = path.basename(fileName)
# Check for recognised formats else ask user to specify columns
if ".csv" in baseFileName and firstToken == "DHid":
# GEM4D csv format
columns = [1, 2, 3, -1]
elif ".csv" in baseFileName and "Leapfrog" in firstToken and "v1.2" in firstToken:
# Leapfrog v1.2 csv format
columns = [0, 1, 2, -1]
elif ".arch_d" in baseFileName and firstLine.split()[0] == "FMT_3":
# Vulcan arch_d format
columns = [2, 3, 4, -1]
separator = " "
elif "SimpleFormat" in firstToken:
# Custom SimpleFormat
columns = [0, 1, 2, -1]
else:
# TODO(Derek): checkbox to allow temp suppress warning? (while program still open)
ask = AskAuto(baseFileName)
answer = ask.result
if answer is None:
# Skip file
return []
elif not answer:
# Ask user to specify columns
ask = AskColumns(fileName)
columns = ask.result
separator = ask.currSeparator
if columns is None:
# Cancel, skip file
return []
# Parse
if columns:
if columns[3] < 0:
polygons = parseWithoutID(fileName, columns, separator)
else:
polygons = parseWithID(fileName, columns, separator)
else:
polygons = parseUnknown(fileName)
if not polygons:
messagebox.showerror(title="Error", message=f"No polygons found in file: {fileName}")
return []
return polygons
if __name__ == '__main__':
Gui().mainloop()
|
import os
import copy
import torch
import torch.nn as nn
import numpy as np
from .network_blocks import Focus, SPPBottleneck, BaseConv, CoordAtt
from .bev_transformer import DropPath, TransBlock
from .swin import BasicLayer
class LayerNormChannel(nn.Module):
"""
LayerNorm only for Channel Dimension.
Input: tensor in shape [B, C, H, W]
"""
def __init__(self, num_channels, eps=1e-05):
super().__init__()
self.weight = nn.Parameter(torch.ones(num_channels))
self.bias = nn.Parameter(torch.zeros(num_channels))
self.eps = eps
def forward(self, x):
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight.unsqueeze(-1).unsqueeze(-1) * x \
+ self.bias.unsqueeze(-1).unsqueeze(-1)
return x
class GroupNorm(nn.GroupNorm):
"""
Group Normalization with 1 group.
Input: tensor in shape [B, C, H, W]
"""
def __init__(self, num_channels, **kwargs):
super().__init__(1, num_channels, **kwargs)
class Pooling(nn.Module):
"""
Implementation of pooling for PoolFormer
--pool_size: pooling size
"""
def __init__(self, pool_size=3):
super().__init__()
self.pool = nn.AvgPool2d(
pool_size, stride=1, padding=pool_size//2, count_include_pad=False)
def forward(self, x):
return self.pool(x) - x
class Mlp(nn.Module):
"""
Implementation of MLP with 1*1 convolutions.
Input: tensor with shape [B, C, H, W]
"""
def __init__(self, in_features, hidden_features=None,
out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class PoolFormerBlock(nn.Module):
"""
Implementation of one PoolFormer block.
--dim: embedding dim
--pool_size: pooling size
--mlp_ratio: mlp expansion ratio
--act_layer: activation
--norm_layer: normalization
--drop: dropout rate
--drop path: Stochastic Depth,
refer to https://arxiv.org/abs/1603.09382
--use_layer_scale, --layer_scale_init_value: LayerScale,
refer to https://arxiv.org/abs/2103.17239
"""
def __init__(self, dim, pool_size=3, mlp_ratio=4.,
act_layer=nn.GELU, norm_layer=GroupNorm,
drop=0., drop_path=0.,
use_layer_scale=True, layer_scale_init_value=1e-5):
super().__init__()
self.norm1 = norm_layer(dim)
self.token_mixer = Pooling(pool_size=pool_size)
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
# The following two techniques are useful to train deep PoolFormers.
self.drop_path = DropPath(drop_path) if drop_path > 0. \
else nn.Identity()
self.use_layer_scale = use_layer_scale
if use_layer_scale:
self.layer_scale_1 = nn.Parameter(
layer_scale_init_value * torch.ones((dim)), requires_grad=True)
self.layer_scale_2 = nn.Parameter(
layer_scale_init_value * torch.ones((dim)), requires_grad=True)
def forward(self, x):
if self.use_layer_scale:
x = x + self.drop_path(
self.layer_scale_1.unsqueeze(-1).unsqueeze(-1)
* self.token_mixer(self.norm1(x)))
x = x + self.drop_path(
self.layer_scale_2.unsqueeze(-1).unsqueeze(-1)
* self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.token_mixer(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PoolFormerLayer(nn.Module):
def __init__(self, dim, pool_size, depth, mlp_ratio=4.,
act_layer=nn.GELU, norm_layer=GroupNorm,
drop=0., drop_path=0.):
super().__init__()
self.dim = dim
self.depth = depth
self.blocks = nn.ModuleList([
PoolFormerBlock(dim, pool_size, mlp_ratio, act_layer, norm_layer, drop, drop_path)
for i in range(depth)
])
def forward(self, x):
for blk in self.blocks:
x = blk(x)
return x
class PatchEmbedding(nn.Module):
def __init__(self, in_channels, out_channels, image_size, dropout = 0.):
super().__init__()
# down sample BEV image from 704, 800 to 176, 200
self.patch_embedding = nn.Sequential(
BaseConv(in_channels, out_channels // 8, 3, 2),
BaseConv(out_channels // 8, out_channels // 8, 3, 1),
BaseConv(out_channels // 8, out_channels // 8, 3, 2),
BaseConv(out_channels // 8, out_channels // 4, 3, 1),
BaseConv(out_channels // 4, out_channels, 1, 1)
)
self.compress = nn.Conv2d(out_channels * 2, out_channels, 1, 1)
# To output shape
position = torch.randn([1, out_channels, image_size[1] // 4, image_size[0] // 4], requires_grad=True)
# To input shape
cls = torch.zeros([1, out_channels, image_size[0] // 4, image_size[1] // 4], requires_grad=True)
self.position_embedding = nn.Parameter(position)
self.cls_token = nn.Parameter(cls)
self.dropout = nn.Dropout(dropout)
def forward(self, batch_data):
x = batch_data['bev']
inputs = batch_data['spatial_features']
b,c,h,w = inputs.shape
cls_token = self.cls_token.expand(x.shape[0], -1, -1, -1)
x = self.patch_embedding(x)
x = torch.cat([cls_token, x], 1)
x = self.compress(x)
x = x.permute(0, 1, 3, 2)
positions = x + self.position_embedding
embeddings = inputs + positions
embeddings = self.dropout(embeddings)
return embeddings
class PositionEmbedding(nn.Module):
def __init__(self, in_channels, out_channels, dropout = 0):
super().__init__()
self.patch_embedding = nn.Sequential(
BaseConv(in_channels, out_channels // 8, 3, 2),
BaseConv(out_channels // 8, out_channels // 8, 3, 1),
BaseConv(out_channels // 8, out_channels // 4, 3, 2),
BaseConv(out_channels // 4, out_channels // 2, 3, 1),
BaseConv(out_channels // 2, out_channels, 1, 1)
)
self.compress = BaseConv(out_channels * 2, out_channels, 1, 1)
self.drop_path = DropPath(dropout)
def forward(self, batch_data):
bev_images = batch_data['bev']
bev_feature = batch_data['spatial_features']
position = self.patch_embedding(bev_images).permute(0, 1, 3, 2)
cat_feature = torch.cat([bev_feature, position], 1)
bev_feature = self.drop_path(self.compress(cat_feature))
return bev_feature
class FourierEmbedding(nn.Module):
def __init__(self, in_channels, out_channels, dropout = 0):
super().__init__()
self.patch_embedding = nn.Sequential(
BaseConv(in_channels * 2, out_channels // 4, 3, 2),
BaseConv(out_channels // 4, out_channels // 4, 3, 1),
BaseConv(out_channels // 4, out_channels // 2, 3, 2),
BaseConv(out_channels // 2, out_channels // 2, 3, 1),
BaseConv(out_channels // 2, out_channels, 1, 1)
)
self.compress = BaseConv(out_channels * 2, out_channels, 1, 1)
self.drop_path = DropPath(dropout)
def forward(self, batch_data):
bev_images = batch_data['bev']
bev_feature = batch_data['spatial_features']
fourier_bev_x = torch.cos(bev_images * 2 * np.pi)
class TransSPFANet(nn.Module):
'''
SWIN with BEV INPUT branch.
'''
def __init__(self, model_cfg, input_channels):
super().__init__()
self.model_cfg = model_cfg
dim = input_channels
out_dim = dim
num_head = self.model_cfg.NUM_HEADS
drop = self.model_cfg.DROP_RATE
act = self.model_cfg.ACT
self.num_bev_features = 128
self.num_filters = 256
self.fcous = Focus(3, 256)
self.spp = SPPBottleneck(256, 256)
self.compress = nn.Sequential(
BaseConv(dim + 256, dim, 1, 1),
BaseConv(dim, dim // 2, 1, 1)
)
self.transformer = TransBlock(dim // 2, out_dim // 2, num_head, None, drop, act)
self.layer_block1 = PoolFormerLayer(128, 7, 3, mlp_ratio=1)
self.down_sample = BaseConv(128, 128, 3, 2)
self.layer_block2 = BasicLayer(128, (100, 88), 3, 4, 4)
self.deconv = nn.Sequential(
nn.ConvTranspose2d(in_channels=128, out_channels=128, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False),
nn.BatchNorm2d(128),
nn.SiLU(),
)
self.weight_spatil = nn.Sequential(
BaseConv(128, 128, 3, 1),
BaseConv(128, 1, 1, 1),
)
self.weight_segment= nn.Sequential(
BaseConv(128, 128, 3, 1),
BaseConv(128, 1, 1, 1),
)
def forward(self, data_dict):
origin_bev = data_dict["bev"]
features = data_dict["spatial_features"]
origin_for = self.spp(self.fcous(origin_bev))
origin_for = origin_for.permute(0, 1, 3, 2)
concat_fea = torch.cat([features, origin_for], 1)
x = self.compress(concat_fea)
trans_out = self.transformer(x)
# spatial information group use the poolformer
block1 = self.layer_block1(trans_out)
down_block1= self.down_sample(block1)
# segmation information group use the swin-transformer
block_temp = down_block1.permute(0, 2, 3, 1)
b, h, w, c = block_temp.shape
block_temp = block_temp.reshape(b, h * w, c)
block2 = self.layer_block2(block_temp)
block2 = block2.reshape(b, h, w, c)
block2 = block2.permute(0, 3, 1, 2)
block2 = self.deconv(block2)
weight1 = self.weight_spatil(block1)
weight2 = self.weight_segment(block2)
weight = torch.softmax(torch.cat([weight1, weight2], dim=1), dim=1)
result = block1 * weight[:, 0:1, :, :] + block2 * weight[:, 1:2, :, :]
data_dict["spatial_features_2d"] = result
return data_dict
class TransSPoolformer(nn.Module):
'''
CIA-SSD version 2d backbone
'''
def __init__(self, model_cfg, input_channels):
super().__init__()
self.model_cfg = model_cfg
dim = input_channels
out_dim = dim
'''self.position_embedding = nn.Sequential(
BaseConv(3, 64, 4, 4),
BaseConv(64, 128, 1, 1),
BaseConv(128, 256, 1, 1),
)'''
self.position_embedding = PatchEmbedding(3, 256, (704, 800))
self.project = nn.Conv2d(out_dim, out_dim // 2, 1)
self.bottom_up_block_0 = nn.Sequential(
BaseConv(128, 128, 3, 1),
BaseConv(128, 128, 3, 1),
BaseConv(128, 128, 3, 1),
)
self.num_bev_features = 128
self.bottom_up_block_1 = nn.Sequential(
# [200, 176] -> [100, 88]
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1, bias=False, ),
nn.BatchNorm2d(256),
nn.ReLU(),
)
self.swin_block = BasicLayer(256, (100, 88), 3, 8, 4)
self.trans_0 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=1, stride=1, padding=0, bias=False, ),
nn.BatchNorm2d(128),
nn.ReLU(),
)
self.trans_1 = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=1, stride=1, padding=0, bias=False, ),
nn.BatchNorm2d(256),
nn.ReLU(),
)
self.deconv_block_0 = nn.Sequential(
nn.ConvTranspose2d(in_channels=256, out_channels=128, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False, ),
nn.BatchNorm2d(128),
nn.ReLU(),
)
self.deconv_block_1 = nn.Sequential(
nn.ConvTranspose2d(in_channels=256, out_channels=128, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False, ),
nn.BatchNorm2d(128),
nn.ReLU(),
)
self.conv_0 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False, ),
nn.BatchNorm2d(128),
nn.ReLU(),
)
self.w_0 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=1, kernel_size=1, stride=1, padding=0, bias=False, ),
nn.BatchNorm2d(1),
)
self.conv_1 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False, ),
nn.BatchNorm2d(128),
nn.ReLU(),
)
self.w_1 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=1, kernel_size=1, stride=1, padding=0, bias=False, ),
nn.BatchNorm2d(1),
)
def forward_swin_block_1(self, inputs):
x = inputs.permute(0, 2, 3, 1)
b, h, w, c = x.shape
x = x.reshape(b, h * w, c)
x = self.swin_block(x)
x = x.reshape(b, h, w, c)
x = x.permute(0, 3, 1, 2)
return x
def forward(self, data_dict):
x = data_dict["spatial_features"]
x = self.position_embedding(data_dict)
x = self.project(x)
x_0 = self.bottom_up_block_0(x)
x_1 = self.bottom_up_block_1(x_0)
x_1 = self.forward_swin_block_1(x_1)
x_trans_0 = self.trans_0(x_0)
x_trans_1 = self.trans_1(x_1)
x_middle_0 = self.deconv_block_0(x_trans_1) + x_trans_0
x_middle_1 = self.deconv_block_1(x_trans_1)
x_output_0 = self.conv_0(x_middle_0)
x_output_1 = self.conv_1(x_middle_1)
x_weight_0 = self.w_0(x_output_0)
x_weight_1 = self.w_1(x_output_1)
x_weight = torch.softmax(torch.cat([x_weight_0, x_weight_1], dim=1), dim=1)
x_output = x_output_0 * x_weight[:, 0:1, :, :] + x_output_1 * x_weight[:, 1:, :, :]
data_dict["spatial_features_2d"] = x_output.contiguous()
return data_dict
class TransSwinBase(nn.Module):
'''
CIA-SSD version 2d backbone
'''
def __init__(self, model_cfg, input_channels):
super().__init__()
self.model_cfg = model_cfg
dim = input_channels
out_dim = dim
self.position_embedding = nn.Sequential(
BaseConv(3, 64, 4, 4),
BaseConv(64, 128, 1, 1),
BaseConv(128, 256, 1, 1),
)
self.project = nn.Conv2d(out_dim, out_dim // 2, 1)
self.bottom_up_block_0 = nn.Sequential(
BaseConv(128, 128, 3, 1),
BaseConv(128, 128, 3, 1),
BaseConv(128, 128, 3, 1),
)
self.num_bev_features = 128
self.bottom_up_block_1 = nn.Sequential(
# [200, 176] -> [100, 88]
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1, bias=False, ),
nn.BatchNorm2d(256),
nn.ReLU(),
)
self.swin_block = BasicLayer(256, (100, 88), 5, 8, 4)
self.trans_0 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=1, stride=1, padding=0, bias=False, ),
nn.BatchNorm2d(128),
nn.ReLU(),
)
self.trans_1 = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=1, stride=1, padding=0, bias=False, ),
nn.BatchNorm2d(256),
nn.ReLU(),
)
self.deconv_block_0 = nn.Sequential(
nn.ConvTranspose2d(in_channels=256, out_channels=128, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False, ),
nn.BatchNorm2d(128),
nn.ReLU(),
)
self.deconv_block_1 = nn.Sequential(
nn.ConvTranspose2d(in_channels=256, out_channels=128, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False, ),
nn.BatchNorm2d(128),
nn.ReLU(),
)
self.conv_0 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False, ),
nn.BatchNorm2d(128),
nn.ReLU(),
)
self.w_0 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=1, kernel_size=1, stride=1, padding=0, bias=False, ),
nn.BatchNorm2d(1),
)
self.conv_1 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False, ),
nn.BatchNorm2d(128),
nn.ReLU(),
)
self.w_1 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=1, kernel_size=1, stride=1, padding=0, bias=False, ),
nn.BatchNorm2d(1),
)
def forward_swin_block_1(self, inputs):
x = inputs.permute(0, 2, 3, 1)
b, h, w, c = x.shape
x = x.reshape(b, h * w, c)
x = self.swin_block(x)
x = x.reshape(b, h, w, c)
x = x.permute(0, 3, 1, 2)
return x
def forward(self, data_dict):
x = data_dict["spatial_features"]
bev = data_dict["bev"]
bev = bev.permute(0, 1, 3, 2)
position = self.position_embedding(bev)
x = x + position
x = self.project(x)
x_0 = self.bottom_up_block_0(x)
x_1 = self.bottom_up_block_1(x_0)
x_1 = self.forward_swin_block_1(x_1)
x_trans_0 = self.trans_0(x_0)
x_trans_1 = self.trans_1(x_1)
x_middle_0 = self.deconv_block_0(x_trans_1) + x_trans_0
x_middle_1 = self.deconv_block_1(x_trans_1)
x_output_0 = self.conv_0(x_middle_0)
x_output_1 = self.conv_1(x_middle_1)
x_weight_0 = self.w_0(x_output_0)
x_weight_1 = self.w_1(x_output_1)
x_weight = torch.softmax(torch.cat([x_weight_0, x_weight_1], dim=1), dim=1)
x_output = x_output_0 * x_weight[:, 0:1, :, :] + x_output_1 * x_weight[:, 1:, :, :]
data_dict["spatial_features_2d"] = x_output.contiguous()
return data_dict
class Trans_Coor_Swin_Net(nn.Module):
'''
Coordinate_SSD
'''
def __init__(self, model_cfg, input_channels):
super().__init__()
self.model_cfg = model_cfg
dim = input_channels
out_dim = dim
self.position_embedding = PositionEmbedding(3, 256, 0.1)
self.project = nn.Conv2d(out_dim, out_dim // 2, 1)
self.spatial_block = CoordAtt(128, 128, 16)
self.num_bev_features = 128
self.bottom_up_block_1 = BaseConv(128, 256, 3, 2)
self.swin_block = BasicLayer(256, (100, 88), 3, 8, 4)
self.trans_0 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=1, stride=1, padding=0, bias=False, ),
nn.BatchNorm2d(128),
nn.ReLU(),
)
self.trans_1 = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=1, stride=1, padding=0, bias=False, ),
nn.BatchNorm2d(256),
nn.ReLU(),
)
self.deconv_block_0 = nn.Sequential(
nn.ConvTranspose2d(in_channels=256, out_channels=128, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False, ),
nn.BatchNorm2d(128),
nn.ReLU(),
)
self.deconv_block_1 = nn.Sequential(
nn.ConvTranspose2d(in_channels=256, out_channels=128, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False, ),
nn.BatchNorm2d(128),
nn.ReLU(),
)
self.conv_0 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False, ),
nn.BatchNorm2d(128),
nn.ReLU(),
)
self.w_0 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=1, kernel_size=1, stride=1, padding=0, bias=False, ),
nn.BatchNorm2d(1),
)
self.conv_1 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False, ),
nn.BatchNorm2d(128),
nn.ReLU(),
)
self.w_1 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=1, kernel_size=1, stride=1, padding=0, bias=False, ),
nn.BatchNorm2d(1),
)
def forward_swin_block_1(self, inputs):
x = inputs.permute(0, 2, 3, 1)
b, h, w, c = x.shape
x = x.reshape(b, h * w, c)
x = self.swin_block(x)
x = x.reshape(b, h, w, c)
x = x.permute(0, 3, 1, 2)
return x
def forward(self, data_dict):
x = data_dict["spatial_features"]
x = self.position_embedding(data_dict)
x = self.project(x)
spatial_mask = self.spatial_block(x)
x_0 = spatial_mask * x
x_1 = self.bottom_up_block_1(x_0)
x_1 = self.forward_swin_block_1(x_1)
x_trans_0 = self.trans_0(x_0)
x_trans_1 = self.trans_1(x_1)
x_middle_0 = self.deconv_block_0(x_trans_1) + x_trans_0
x_middle_1 = self.deconv_block_1(x_trans_1)
x_output_0 = self.conv_0(x_middle_0)
x_output_1 = self.conv_1(x_middle_1)
x_weight_0 = self.w_0(x_output_0)
x_weight_1 = self.w_1(x_output_1)
x_weight = torch.softmax(torch.cat([x_weight_0, x_weight_1], dim=1), dim=1)
x_output = x_output_0 * x_weight[:, 0:1, :, :] + x_output_1 * x_weight[:, 1:, :, :]
data_dict["spatial_features_2d"] = x_output.contiguous()
return data_dict |
from schematics.types import IntType, StringType, UTCDateTimeType
from sqlalchemy.sql import and_, func, select
from playlog.lib.validation import OrderType, validate
from playlog.models import album, artist
async def create(conn, artist_id, name, plays, first_play, last_play):
return await conn.scalar(album.insert().values(
artist_id=artist_id,
name=name,
plays=plays,
first_play=first_play,
last_play=last_play
))
async def find_one(conn, **kwargs):
query = select([artist.c.name.label('artist_name'), album])
for key, value in kwargs.items():
query = query.where(getattr(album.c, key) == value)
query = query.select_from(album.join(artist))
result = await conn.execute(query)
return await result.fetchone()
@validate.params(
artist=StringType(min_length=1, max_length=50),
name=StringType(min_length=1, max_length=50),
first_play_lt=UTCDateTimeType(),
first_play_gt=UTCDateTimeType(),
last_play_lt=UTCDateTimeType(),
last_play_gt=UTCDateTimeType(),
order=OrderType('artist', 'name', 'first_play', 'last_play', 'plays'),
limit=IntType(required=True, min_value=1, max_value=100),
offset=IntType(required=True, min_value=0)
)
async def find_many(conn, params):
artist_name = artist.c.name.label('artist')
filters = []
if 'artist' in params:
filters.append(artist_name.ilike('%{}%'.format(params['artist'])))
if 'name' in params:
filters.append(album.c.name.ilike('%{}%'.format(params['name'])))
if 'first_play_gt' in params:
filters.append(album.c.first_play >= params['first_play_gt'])
if 'first_play_lt' in params:
filters.append(album.c.first_play <= params['first_play_lt'])
if 'last_play_gt' in params:
filters.append(album.c.last_play >= params['last_play_gt'])
if 'last_play_lt' in params:
filters.append(album.c.last_play <= params['last_play_lt'])
order = params.get('order')
order_field = order['column'] if order else 'artist'
order_direction = order['direction'] if order else 'asc'
order_clause = artist_name if order_field == 'artist' else album.c[order_field]
order_clause = getattr(order_clause, order_direction)()
stmt = select([album, artist_name]).select_from(album.join(artist))
if filters:
stmt = stmt.where(and_(*filters))
total = await conn.scalar(stmt.with_only_columns([func.count(album.c.id)]))
stmt = stmt.offset(params['offset']).limit(params['limit']).order_by(order_clause)
result = await conn.execute(stmt)
items = await result.fetchall()
return {'items': items, 'total': total}
async def find_for_artist(conn, artist_id):
query = select([album]).where(album.c.artist_id == artist_id).order_by(album.c.plays.desc())
result = await conn.execute(query)
return await result.fetchall()
async def update(conn, album_id, **params):
await conn.execute(album.update().values(**params).where(album.c.id == album_id))
async def count_total(conn):
return await conn.scalar(album.count())
async def count_new(conn, since):
return await conn.scalar(select([func.count()]).where(album.c.first_play >= since))
async def submit(conn, artist_id, name, date):
data = await find_one(conn, artist_id=artist_id, name=name)
if data:
album_id = data['id']
await update(
conn=conn,
album_id=album_id,
plays=album.c.plays + 1,
last_play=date
)
else:
album_id = await create(
conn=conn,
artist_id=artist_id,
name=name,
plays=1,
first_play=date,
last_play=date
)
return album_id
|
"""
Module that provides a class that filters profanities
"""
__author__ = "leoluk"
__version__ = '0.0.1'
import random
import re
arrBad = [
'2g1c',
'2 girls 1 cup',
'acrotomophilia',
'anal',
'anilingus',
'anus',
'arsehole',
'ass',
'asshole',
'assmunch',
'auto erotic',
'autoerotic',
'babeland',
'baby batter',
'ball gag',
'ball gravy',
'ball kicking',
'ball licking',
'ball sack',
'ball sucking',
'bangbros',
'bareback',
'barely legal',
'barenaked',
'bastardo',
'bastinado',
'bbw',
'bdsm',
'beaver cleaver',
'beaver lips',
'bestiality',
'bi curious',
'big black',
'big breasts',
'big knockers',
'big tits',
'bimbos',
'birdlock',
'bitch',
'black cock',
'blonde action',
'blonde on blonde action',
'blow j',
'blow your l',
'blue waffle',
'blumpkin',
'bollocks',
'bondage',
'boner',
'boob',
'boobs',
'booty call',
'brown showers',
'brunette action',
'bukkake',
'bulldyke',
'bullet vibe',
'bung hole',
'bunghole',
'busty',
'butt',
'buttcheeks',
'butthole',
'camel toe',
'camgirl',
'camslut',
'camwhore',
'carpet muncher',
'carpetmuncher',
'chocolate rosebuds',
'circlejerk',
'cleveland steamer',
'clit',
'clitoris',
'clover clamps',
'clusterfuck',
'cock',
'cocks',
'coprolagnia',
'coprophilia',
'cornhole',
'cum',
'cumming',
'cunnilingus',
'cunt',
'darkie',
'date rape',
'daterape',
'deep throat',
'deepthroat',
'dick',
'dildo',
'dirty pillows',
'dirty sanchez',
'dog style',
'doggie style',
'doggiestyle',
'doggy style',
'doggystyle',
'dolcett',
'domination',
'dominatrix',
'dommes',
'donkey punch',
'double dong',
'double penetration',
'dp action',
'eat my ass',
'ecchi',
'ejaculation',
'erotic',
'erotism',
'escort',
'ethical slut',
'eunuch',
'faggot',
'fecal',
'felch',
'fellatio',
'feltch',
'female squirting',
'femdom',
'figging',
'fingering',
'fisting',
'foot fetish',
'footjob',
'frotting',
'fuck',
'fucking',
'fuck buttons',
'fudge packer',
'fudgepacker',
'futanari',
'g-spot',
'gang bang',
'gay sex',
'genitals',
'giant cock',
'girl on',
'girl on top',
'girls gone wild',
'goatcx',
'goatse',
'gokkun',
'golden shower',
'goo girl',
'goodpoop',
'goregasm',
'grope',
'group sex',
'guro',
'hand job',
'handjob',
'hard core',
'hardcore',
'hentai',
'homoerotic',
'honkey',
'hooker',
'hot chick',
'how to kill',
'how to murder',
'huge fat',
'humping',
'incest',
'intercourse',
'jack off',
'jail bait',
'jailbait',
'jerk off',
'jigaboo',
'jiggaboo',
'jiggerboo',
'jizz',
'juggs',
'kike',
'kinbaku',
'kinkster',
'kinky',
'knobbing',
'leather restraint',
'leather straight jacket',
'lemon party',
'lolita',
'lovemaking',
'make me come',
'male squirting',
'masturbate',
'menage a trois',
'milf',
'missionary position',
'motherfucker',
'mound of venus',
'mr hands',
'muff diver',
'muffdiving',
'nambla',
'nawashi',
'negro',
'neonazi',
'nig nog',
'nigga',
'nigger',
'nimphomania',
'nipple',
'nipples',
'nsfw images',
'nude',
'nudity',
'nympho',
'nymphomania',
'octopussy',
'omorashi',
'one cup two girls',
'one guy one jar',
'orgasm',
'orgy',
'paedophile',
'panties',
'panty',
'pedobear',
'pedophile',
'pegging',
'penis',
'phone sex',
'piece of shit',
'piss pig',
'pissing',
'pisspig',
'playboy',
'pleasure chest',
'pole smoker',
'ponyplay',
'poof',
'poop chute',
'poopchute',
'porn',
'porno',
'pornography',
'prince albert piercing',
'pthc',
'pubes',
'pussy',
'queaf',
'raghead',
'raging boner',
'rape',
'raping',
'rapist',
'rectum',
'reverse cowgirl',
'rimjob',
'rimming',
'rosy palm',
'rosy palm and her 5 sisters',
'rusty trombone',
's&m',
'sadism',
'scat',
'schlong',
'scissoring',
'semen',
'sex',
'sexo',
'sexy',
'shaved beaver',
'shaved pussy',
'shemale',
'shibari',
'shit',
'shota',
'shrimping',
'slanteye',
'slut',
'smut',
'snatch',
'snowballing',
'sodomize',
'sodomy',
'spic',
'spooge',
'spread legs',
'strap on',
'strapon',
'strappado',
'strip club',
'style doggy',
'suck',
'sucks',
'suicide girls',
'sultry women',
'swastika',
'swinger',
'tainted love',
'taste my',
'tea bagging',
'threesome',
'throating',
'tied up',
'tight white',
'tit',
'tits',
'titties',
'titty',
'tongue in a',
'topless',
'tosser',
'towelhead',
'tranny',
'tribadism',
'tub girl',
'tubgirl',
'tushy',
'twat',
'twink',
'twinkie',
'two girls one cup',
'undressing',
'upskirt',
'urethra play',
'urophilia',
'vagina',
'venus mound',
'vibrator',
'violet blue',
'violet wand',
'vorarephilia',
'voyeur',
'vulva',
'wank',
'wet dream',
'wetback',
'white power',
'women rapping',
'wrapping men',
'wrinkled starfish',
'xx',
'xxx',
'yaoi',
'yellow showers',
'yiffy',
'zoophilia']
class ProfanitiesFilter(object):
def __init__(self, filterlist=arrBad, ignore_case=True, replacements="$@%-?!",
complete=True, inside_words=False):
"""
Inits the profanity filter.
filterlist -- a list of regular expressions that
matches words that are forbidden
ignore_case -- ignore capitalization
replacements -- string with characters to replace the forbidden word
complete -- completely remove the word or keep the first and last char?
inside_words -- search inside other words?
"""
self.badwords = filterlist
self.ignore_case = ignore_case
self.replacements = replacements
self.complete = complete
self.inside_words = inside_words
def _make_clean_word(self, length):
"""
Generates a random replacement string of a given length
using the chars in self.replacements.
"""
return ''.join([random.choice(self.replacements) for i in
range(length)])
def __replacer(self, match):
value = match.group()
if self.complete:
return self._make_clean_word(len(value))
else:
return value[0] + self._make_clean_word(len(value) - 2) + value[-1]
def clean(self, text):
"""Cleans a string from profanity."""
if text is None:
return text
regexp_insidewords = {
True: r'(%s)',
False: r'\b(%s)\b',
}
regexp = (regexp_insidewords[self.inside_words] %
'|'.join(self.badwords))
r = re.compile(regexp, re.IGNORECASE if self.ignore_case else 0)
return r.sub(self.__replacer, text)
if __name__ == '__main__':
f = ProfanitiesFilter(['bad', 'un\w+'], replacements="-")
example = "I am doing bad ungood badlike things."
print(f.clean(example))
# Returns "I am doing --- ------ badlike things."
f.inside_words = True
print(f.clean(example)) # Returns "I am doing --- ------ ---like things."
f.complete = False
print(f.clean(example))
# Returns "I am doing b-d u----d b-dlike things."
|
#!/usr/bin/python
from mod_pywebsocket import msgutil, util
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
msgutil.send_message(request, request.ws_location.split('?', 1)[1])
|
from django.shortcuts import render
from formtools.wizard.views import SessionWizardView
from .forms import StepOneForm, StepTwoForm
def index(request):
return render(request, 'multistepapp/index.html')
class FormWizardView(SessionWizardView):
template_name ='multistepapp/steps.html'
form_list = [StepOneForm, StepTwoForm]
def get(self, request, *args, **kwargs):
try:
return self.render(self.get_form())
except KeyError:
return super().get(request, *args, **kwargs)
def done(self, form_list, **kwargs):
return render(
self.request,
'multistepapp/done.html',
{
'form_data': [form.cleaned_data for form in form_list],
}
)
|
from django.db import models
from django.db.models import CharField
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length = 20)
def __str__(self):
return self.name
class Post(models.Model):
title = models.CharField(max_length = 255)
body = models.TextField()
created_on = models.DateTimeField(auto_now_add = True)
last_modified = models.DateTimeField(auto_now = True)
categories = models.ManyToManyField('Category', related_name = "posts")
def __str__(self):
return self.title
class Comment(models.Model):
author = models.CharField(max_length = 60)
body = models.TextField()
created_on = models.DateTimeField(auto_now_add=True)
post = models.ForeignKey("Post", on_delete = models.CASCADE)
|
from typing import Union
class MessageTransfer:
id: str
text: Union[str, None]
skip: bool
terminate_group: bool
def __init__(
self,
*,
id: str,
text: Union[str, None] = None,
skip: bool = False,
terminate_group: bool = False
) -> None:
self.id = id
self.text = text
self.skip = skip
self.terminate_group = terminate_group
|
import config as cfig
import bot as udpbot
import commands as twitchcommands
import sys
import urllib.request
import random
# Load the config settings
print('==> Loading settings')
conf = cfig.config()
# Check if we have generated a default config.ini, if so exit
if conf.default == True:
print('[!] Could not find config.ini. A default config.ini has been generated in the bot folder response. Please edit it and run the bot again.')
sys.exit()
# If we haven't generated a default config.ini, check if it's valid
if conf.verifyConfigFile() == False:
print('[!] Invalid config file')
sys.exit()
else:
print('==> Settings loaded')
# Load commands.ini
print('==> Loading commands')
cmd = twitchcommands.commands()
# Check if we have generated a default commands.ini, if so exit
if cmd.default == True:
print('[!] Could not find command.ini. A default command.ini has been generated in the bot folder response. Please edit it and run the bot again.')
sys.exit()
# Ini files are valid, create a bot instance
print('==> Connecting to Twitch IRC server')
bot = udpbot.bot(conf.config['auth']['host'], int(conf.config['auth']['port']), conf.config['auth']['username'], conf.config['auth']['password'], conf.config['auth']['channel'], int(conf.config['auth']['timeout']))
# Connect to IRC server
if bot.connect() == False:
print('[!] Connection error response. Please check your internet connection and config.ini file')
sys.exit()
# Send login packets
print('==> Logging in')
bot.login()
# Check login errors
response = bot.getResponse()
if response.lower().find('error') != -1:
print('[!] Login error response. Please check your config.ini file')
if conf.config['debug']['showServerOutput']: print('/r/n/r/n' + response)
sys.exit()
# Send start message if needed
if conf.config['chat']['startMessage'] != '':
bot.sendChatMessage(conf.config['chat']['startMessage'])
# No errors, start the loop
print('==> smoonybot is listening!')
while 1:
# Debug message
conf.debugMessage('==> Looping...')
# Loop through all file hooks
for i in cmd.fileHooks:
try:
# Get content of that file
oldContent = cmd.fileHooks[i]
newContent = open(cmd.commands[i]['response'], 'r').read()
# If content is different, update fileHook and send message
if newContent != oldContent and newContent != '':
cmd.fileHooks[i] = newContent
print('==> Content changed, sending new content to chat (' + i + ')')
bot.sendChatMessage(newContent)
except:
print('[!] Error while reading file (' + i + ')')
try:
# Get new packets
response = bot.getResponse().lower()
# Check if we have new packets
# TODO: this if is probably useless
if response != None:
# Make sure this is a PRIVMSG packet
if response.find('privmsg') != -1:
# Increment received messages
bot.receivedMessages += 1
# Get who has sent the message
rFrom = response.split('!')[0][1:]
# Set final message to empty
message=''
# Check if that message triggered an interal command
if response.find('!reloadcmd') != -1:
# Reload commands (!reloadCmd)
bot.sendChatMessage('Commands reloaded!')
cmd.reloadCommands()
# elif response.find('!othercommand') != -1: ...
# Check if that message triggered a custom command
# Loop through all commands
for i in cmd.commands:
# Get command data
cmdName = i
cmdType = int(cmd.commands[i]['type'])
cmdTrigger = cmd.commands[i]['trigger'].lower()
cmdResponse = cmd.commands[i]['response']
cmdDefaultResponse = cmd.commands[i]['defaultResponse']
cmdReply = int(cmd.commands[i]['reply'])
cmdPeriod = int(cmd.commands[i]['period'])
cmdAdminOnly = int(cmd.commands[i]['adminOnly'])
cmdFirstValue = int(cmd.commands[i]['firstValue'])
cmdSecondValue = int(cmd.commands[i]['secondValue'])
cmdPossibleAnswers = [x for x in cmd.commands[i]['possibleAnswers'].split(',')]
# Make sure the command has valid response and period (default for non-periodic commands is -1)
if cmdResponse != '' and cmdPeriod != 0:
if cmdType == 1:
# Normal command
if response.find(cmdTrigger) != -1:
if cmdAdminOnly == 1 and not conf.isAdmin(rFrom):
print('==> ' + rFrom + ' triggered a simple admin command, but they are not an admin')
else:
print('==> ' + rFrom + ' triggered a simple command (' + cmdName + ')')
message=cmdResponse
if cmdReply == 1: message=rFrom + ' >> ' + message
elif cmdType == 2:
# Periodic command
if bot.receivedMessages % cmdPeriod == 0:
print('==> Sending periodic command (' + cmdName + ')')
message=cmdResponse
bot.receivedMessages = 0
elif cmdType == 3:
# API command
if response.find(cmdTrigger) != -1:
try:
# Get API content and send it
req = urllibot.request.Request(cmdResponse,data=None,headers={'User-Agent': 'Mozilla/5.0'})
apiResponse = urllibot.request.urlopen(req).read().decode('UTF-8')
message=apiResponse
if cmdReply == 1: message=rFrom + ' >> ' + message
print('==> ' + rFrom + ' triggered an API command (' + cmdName + ')')
except:
print('[!] Error while requesting API command (' + cmdName + ')')
elif cmdType == 5:
# File read command
if response.find(cmdTrigger) != -1:
try:
# Read file content and send it
print('==> ' + rFrom + ' triggered a file read command (' + cmdName + ')')
content = open(cmdResponse, 'r').read()
# If content is empty, send default response
if content == '':
message = cmdDefaultResponse
else:
message = content
if cmdReply == 1: message=rFrom + ' >> ' + message
except:
print('[!] Error while reading file (' + i + ')')
elif cmdType == 6:
# Callout command, any command that uses a recipient name at the end
if response.find(cmdTrigger) != -1:
if cmdAdminOnly == 1 and not conf.isAdmin(rFrom):
print('==> ' + rFrom + ' triggered a simple admin command, but they are not an admin')
else:
print('==> ' + rFrom + ' triggered a simple command (' + cmdName + ')')
recipient = response.split(' ')[-1]
message=cmdResponse + recipient
if cmdReply == 1: message=rFrom + ' >> ' + message
elif cmdType == 7:
# any type of command that uses the form: subject cmdReplay user name
if response.find(cmdTrigger) != -1:
if cmdAdminOnly == 1 and not conf.isAdmin(rFrom):
print('==> ' + rFrom + ' triggered a simple admin command, but they are not an admin')
else:
print('==> ' + rFrom + ' triggered a simple command (' + cmdName + ')')
recipient = response.split(' ')[-1].strip('\r\n')
message = recipient + ' ' + cmdResponse + ' ' + rFrom
if cmdReply == 1: message=rFrom+' >> ' + message
elif cmdType == 8:
# Command that answers yes or no questions with
if response.find(cmdTrigger) != -1:
text = response.split(':')[2]
try:
question = text.split(' ', 1)[1].strip('\r\n')
except:
question = False
if cmdAdminOnly == 1 and not conf.isAdmin(rFrom):
print('==> ' + rFrom + ' triggered a simple admin command, but they are not an admin')
else:
if question:
print('==> ' + rFrom + ' triggered a simple command (' + cmdName + ')')
answerIndex = random.randint(cmdFirstValue, cmdSecondValue)
message = rFrom + ' asked: ' + question + ' '*30 + cmdResponse + ' ' + cmdPossibleAnswers[answerIndex]
if cmdReply == 1: message=rFrom+' >> ' + message
else:
print('==> ' + rFrom + ' triggered a simple command (' + cmdName + ') without a question!')
answerIndex = random.randint(cmdFirstValue, cmdSecondValue)
message = 'You must ask me a question if you want an answer ' + rFrom
if cmdReply == 1: message=rFrom + ' >> ' + message
# Send final message if needed
if message != '':
bot.sendChatMessage(message)
# Print received packet if needed
if int(conf.config['debug']['showServerOutput']) == 1:
print(response, end='')
except:
pass
|
# IMAGE FILE
import struct
import imghdr
def getImageSize(fname):
with open(fname, 'rb') as fhandle:
head = fhandle.read(24)
if len(head) != 24:
raise RuntimeError("Invalid Header")
if imghdr.what(fname) == 'png':
check = struct.unpack('>i', head[4:8])[0]
if check != 0x0d0a1a0a:
raise RuntimeError("PNG: Invalid check")
width, height = struct.unpack('>ii', head[16:24])
elif imghdr.what(fname) == 'gif':
width, height = struct.unpack('<HH', head[6:10])
elif imghdr.what(fname) == 'jpeg':
fhandle.seek(0) # Read 0xff next
size = 2
ftype = 0
while not 0xc0 <= ftype <= 0xcf:
fhandle.seek(size, 1)
byte = fhandle.read(1)
while ord(byte) == 0xff:
byte = fhandle.read(1)
ftype = ord(byte)
size = struct.unpack('>H', fhandle.read(2))[0] - 2
# We are at a SOFn block
fhandle.seek(1, 1) # Skip `precision' byte.
height, width = struct.unpack('>HH', fhandle.read(4))
else:
raise RuntimeError("Unsupported format")
return width, height
|
#!/usr/bin/python3
import argparse
parser = argparse.ArgumentParser(
description="Caesar Encryptor/Decryptor.")
parser.add_argument("-dec",dest="mode",action="store_true",help="For decryption.")
args = parser.parse_args()
if args.mode == 0:
text = input("Message to be encrypted: ")
key = int(input("Key(Integer): ")) #luam cheia de tip integer
encrypted = ""
for char in text:
if char != ' ':
encrypted = encrypted + chr(ord(char)+key) #transformam caracterul in integer (ASCII)
else: #si adunam cheia dupa care transformam inapoi in caracter
encrypted = encrypted + ' '
print ("Encrypted text is: {}".format(encrypted))
else:
encrypted = input("Encrypted message to be decrypted: ")
key = int(input("Key(Integer): "))
text = ""
for char in encrypted:
if char != ' ':
text = text + chr(ord(char)-key) #transformam caracterul in integer (ASCII)
else: #si scadem cheia dupa care transformam inapoi in caracter
encrypted = encrypted + ' '
print ("Decrypted text is: {}".format(text))
|
from __future__ import absolute_import
from sentry.relay.projectconfig_debounce_cache.base import ProjectConfigDebounceCache
from sentry.utils.redis import get_dynamic_cluster_from_options, validate_dynamic_cluster
REDIS_CACHE_TIMEOUT = 3600 # 1 hr
def _get_redis_key(project_id, organization_id):
if organization_id:
return "relayconfig-debounce:o:%s" % (organization_id,)
elif project_id:
return "relayconfig-debounce:p:%s" % (project_id,)
else:
raise ValueError()
class RedisProjectConfigDebounceCache(ProjectConfigDebounceCache):
def __init__(self, **options):
self.is_redis_cluster, self.cluster, options = get_dynamic_cluster_from_options(
"SENTRY_RELAY_PROJECTCONFIG_DEBOUNCE_CACHE_OPTIONS", options
)
super(RedisProjectConfigDebounceCache, self).__init__(**options)
def validate(self):
validate_dynamic_cluster(self.is_redis_cluster, self.cluster)
def __get_redis_client(self, routing_key):
if self.is_redis_cluster:
return self.cluster
else:
return self.cluster.get_local_client_for_key(routing_key)
def check_is_debounced(self, project_id, organization_id):
key = _get_redis_key(project_id, organization_id)
client = self.__get_redis_client(key)
if client.get(key):
return True
client.setex(key, REDIS_CACHE_TIMEOUT, 1)
return False
def mark_task_done(self, project_id, organization_id):
key = _get_redis_key(project_id, organization_id)
client = self.__get_redis_client(key)
client.delete(key)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import register_grad_sampler
@register_grad_sampler(nn.GroupNorm)
def compute_group_norm_grad_sample(
layer: nn.GroupNorm,
activations: torch.Tensor,
backprops: torch.Tensor,
) -> Dict[nn.Parameter, torch.Tensor]:
"""
Computes per sample gradients for GroupNorm
Args:
layer: Layer
activations: Activations
backprops: Backpropagations
"""
gs = F.group_norm(activations, layer.num_groups, eps=layer.eps) * backprops
ret = {layer.weight: torch.einsum("ni...->ni", gs)}
if layer.bias is not None:
ret[layer.bias] = torch.einsum("ni...->ni", backprops)
return ret
|
import sys
import os
cwd = os.getcwd()
path = "files"
try:
os.mkdir(path)
except OSError:
print ("Creation of the directory %s failed" % path)
else:
print ("Successfully created the directory %s " % path)
filename = 'files/yourprogram'
writefile = open(filename, "w+")
writefile.write("the current working directory is:")
writefile.write(cwd)
writefile.write("\r\n")
for i in range(10):
writefile.write("This is line %d\r\n" % (i+1))
writefile.close()
|
from abc import abstractmethod
from typing import List
from open_mafia_engine.core.all import (
Actor,
EPrePhaseChange,
Faction,
Game,
handler,
Event,
)
from .auxiliary import TempPhaseAux
class ECreateFactionChat(Event):
"""Event that signals creating a faction chat."""
def __init__(self, game: Game, /, faction: Faction):
super().__init__(game)
self.faction = faction
@property
def actors(self) -> List[Actor]:
return self.faction.actors
class FactionChatCreatorAux(TempPhaseAux):
"""Base class to create the faction chat for some faction."""
def __init__(self, game: Game, /, faction: Faction):
self.faction = faction
key = f"create chat for {self.faction.name}"
super().__init__(game, key=key, use_default_constraints=False)
@handler
def handle_startup(self, event: EPrePhaseChange):
if event.old_phase != self.game.phase_system.startup:
return
# NOTE: Rather than create an action, since it's startup, we should
# just be able to trigger event responses.
self.game.process_event(ECreateFactionChat(self.game, self.faction))
@property
def actors(self) -> List[Actor]:
return self.faction.actors
|
import numpy as np
from numpy.linalg import lstsq
from scipy.optimize import lsq_linear
from . import moduleFrame
class FitSignals(moduleFrame.Strategy):
def __call__(self, signalVars, knownSpectra):
# rows are additions, columns are contributors
knownMask = ~np.isnan(knownSpectra[:, 0])
knownSignals = signalVars[:, knownMask]
unknownSignals = signalVars[:, ~knownMask]
knownSpectrum = knownSignals @ knownSpectra[knownMask, :]
unknownSpectrum = self.titration.processedData - knownSpectrum
fittedSignals, residuals, _, _ = lstsq(
unknownSignals, unknownSpectrum, rcond=None
)
fittedCurves = unknownSignals @ fittedSignals + knownSpectrum
allSignals = knownSpectra.copy()
allSignals[~knownMask, :] = fittedSignals
return allSignals, residuals, fittedCurves
class FitSignalsNonnegative(moduleFrame.Strategy):
# TODO: account for known spectra
def __call__(self, signalVars, knownSpectra):
fittedSignals = np.empty((0, signalVars.shape[1]))
residuals = np.empty((1, 0))
for signal in self.titration.processedData.T:
result = lsq_linear(signalVars, signal, (0, np.inf), method="bvls")
fittedSignals = np.vstack((fittedSignals, result.x))
residuals = np.append(residuals, result.cost)
fittedSignals = fittedSignals.T
fittedCurves = signalVars @ fittedSignals
return fittedSignals, residuals, fittedCurves
class ModuleFrame(moduleFrame.ModuleFrame):
frameLabel = "Fit signals"
dropdownLabelText = "Fit signals to curve using:"
# TODO: add least squares with linear constraints
dropdownOptions = {
"Ordinary least squares": FitSignals,
"Nonnegative least squares": FitSignalsNonnegative,
}
attributeName = "fitSignals"
|
"""
Created on Wed Jan 20 10:15:31 2021
@author: Lucas.singier
"""
import socket
import select
IP = "127.0.0.1"
PORT = 1234
# Creation de la socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# On set les options du socket
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#Binding de la socket = permet de lier une communication par une adresse et un port
server_socket.bind((IP, PORT))
# Mise en place d'un serveur pour écouter les connexions
server_socket.listen()
#Liste des socket
listeSocket = [server_socket]
# création d'un dictionnaire-> cle=pseudo,valeur=message
clients = {}
#Affiche sur le serveur que le serveur fonctionne bien et qu'il attend des conenxions
print(f'Le serveur fonctionne sur {IP}:{PORT} \n')
# fonction de récupération du message
def recup_message(client_socket):
try:
# on recois la taille du message
tailledumessage = client_socket.recv(10)
# Si on ne reçois rien..
if not len(tailledumessage):
return False
# Convertis en int la taille du message (etait en byt)
tailledumessageint = int(tailledumessage.decode('utf-8'))
#On retourne donc un dictionnaire
return {'cle': tailledumessage, 'mess': client_socket.recv(tailledumessageint)}
except:
# Exception si le client ferme sa connection ctrl+c
return False
while True:
#Connecteur
lecturesockets, _, exceptionssockets = select.select(listeSocket, [], listeSocket)
# parcours la liste de socket
for sock in lecturesockets:
#Accepte les nouvelles connections
if sock == server_socket:
client_socket, client_address = server_socket.accept()
# Recuperation du pseudo
cli_mess = recup_message(client_socket)
# Si le client se deconnecte
if cli_mess is False:
continue
# apprend le client dans la liste de socket
listeSocket.append(client_socket)
clients[client_socket] = cli_mess
print('Nouvel arrivant: {}'.format(cli_mess['mess'].decode('utf-8')))
# Si il existe une socket on envoie un message
else:
# Utilisation de la fonction de reception de messages
message = recup_message(sock)
# Si client se deconnecte
if message is False:
print('Déconnexion de {}'.format(clients[sock]['mess'].decode('utf-8')))
# Enleve le client de la liste
listeSocket.remove(sock)
# suppression dans la liste client
del clients[sock]
continue
# Pour savoir qui envoie le message
user = clients[sock]
print(f'{user["mess"].decode("utf-8")}: {message["mess"].decode("utf-8")}')
# On parcours la liste des clients socket
for clisock in clients:
if clisock != sock:
# Affichage du pseudo et de son message
clisock.send(user['cle'] + user['mess'] + message['cle'] + message['mess'])
|
import distdl
import distdl.nn as dnn
import matplotlib.pyplot as plt
import numpy as np
import os
import time
import torch
from argparse import ArgumentParser
from distdl.utilities.torch import *
from dfno import *
from mat73 import loadmat
from matplotlib.animation import FuncAnimation
from mpi4py import MPI
from pathlib import Path
from scipy import io
Partition = distdl.backend.backend.Partition
parser = ArgumentParser()
parser.add_argument('--input', '-i', type=Path)
parser.add_argument('--partition-shape', '-ps', type=int, default=(1,1,2,2,1), nargs=5)
parser.add_argument('--num-data', '-nd', type=int, default=1000)
parser.add_argument('--sampling-rate', '-sr', type=int, default=1)
parser.add_argument('--in-timesteps', '-it', type=int, default=10)
parser.add_argument('--out-timesteps', '-ot', type=int, default=40)
parser.add_argument('--num-gpus', '-ng', type=int, default=1)
parser.add_argument('--train-split', '-ts', type=float, default=0.8)
parser.add_argument('--width', '-w', type=int, default=20)
parser.add_argument('--modes', '-m', type=int, default=(4, 4, 4), nargs=3)
parser.add_argument('--decomposition-order', '-do', type=int, default=1)
parser.add_argument('--num-blocks', '-nb', type=int, default=4)
parser.add_argument('--num-epochs', '-ne', type=int, default=500)
parser.add_argument('--batch-size', '-bs', type=int, default=10)
parser.add_argument('--checkpoint-interval', '-ci', type=int, default=25)
parser.add_argument('--generate-visualization', '-gv', action='store_true')
args = parser.parse_args()
if np.prod(args.partition_shape) != MPI.COMM_WORLD.size:
raise ValueError(f'The number of processes {MPI.COMM_WORLD.size} does not match the partition shape {args.partition_shape}.')
P_world, P_x, P_0 = create_standard_partitions(args.partition_shape)
use_cuda, cuda_aware, device_ordinal, device, ctx = get_env(P_x, num_gpus=args.num_gpus)
with ctx:
torch.manual_seed(P_x.rank + 123)
np.random.seed(P_x.rank + 123)
B = dnn.Broadcast(P_0, P_x)
timestamp = torch.tensor([int(time.time())]) if P_0.active else zero_volume_tensor()
timestamp = B(timestamp).item()
torch.set_anomaly_enabled(True)
out_dir = Path(f'data/{args.input.stem}_{timestamp}')
if P_0.active:
os.makedirs(out_dir)
print(f'created output directory: {out_dir.resolve()}')
if P_0.active:
#u = torch.rand(args.num_data, 1, 64, 64, args.in_timesteps+args.out_timesteps, device=device, dtype=torch.float32)
u = torch.tensor(loadmat(args.input)['u'], dtype=torch.float32)[:args.num_data].unsqueeze(1).to(device)
x_slice = (slice(None, args.num_data, 1), slice(None, None, 1), *[slice(None, None, args.sampling_rate)]*(dim-3), slice(None, args.in_timesteps, 1))
y_slice = (slice(None, args.num_data, 1), slice(None, None, 1), *[slice(None, None, args.sampling_rate)]*(dim-3), slice(args.in_timesteps, args.in_timesteps+args.out_timesteps, 1))
data = {}
x, data['mu_x'], data['std_x'] = unit_guassian_normalize(u[x_slice])
y, data['mu_y'], data['std_y'] = unit_guassian_normalize(u[y_slice])
split_index = int(args.train_split*args.num_data)
data['x_train'] = x[:split_index, ...]
data['x_test'] = x[split_index:, ...]
data['y_train'] = y[:split_index, ...]
data['y_test'] = y[split_index:, ...]
for k, v in data.items():
print(f'{k}.shape = {v.shape}')
else:
data = {}
data['x_train'] = zero_volume_tensor(device=device)
data['x_test'] = zero_volume_tensor(device=device)
data['y_train'] = zero_volume_tensor(device=device)
data['y_test'] = zero_volume_tensor(device=device)
data['mu_x'] = zero_volume_tensor(device=device)
data['std_x'] = zero_volume_tensor(device=device)
data['mu_y'] = zero_volume_tensor(device=device)
data['std_y'] = zero_volume_tensor(device=device)
for k, v in sorted(data.items(), key=lambda i: i[0]):
S = dnn.DistributedTranspose(P_0, P_x)
vars()[k] = S(v)
del data
print(f'index = {P_x.index}, x_train.shape = {x_train.shape}')
print(f'index = {P_x.index}, x_test.shape = {x_test.shape}')
print(f'index = {P_x.index}, mu_x.shape = {mu_x.shape}')
print(f'index = {P_x.index}, std_x.shape = {std_x.shape}')
print(f'index = {P_x.index}, y_train.shape = {y_train.shape}')
print(f'index = {P_x.index}, y_test.shape = {y_test.shape}')
print(f'index = {P_x.index}, mu_y.shape = {mu_y.shape}')
print(f'index = {P_x.index}, std_y.shape = {std_y.shape}')
x_train.requires_grad = True
y_train.requires_grad = True
network = DistributedFNO(P_x,
[args.batch_size, 1, 64//args.sampling_rate, 64//args.sampling_rate, args.in_timesteps],
args.out_timesteps,
args.width,
args.modes,
num_blocks=args.num_blocks,
device=device,
dtype=x_train.dtype)
parameters = [p for p in network.parameters()]
criterion = dnn.DistributedMSELoss(P_x).to(device)
mse = dnn.DistributedMSELoss(P_x).to(device)
optimizer = torch.optim.Adam(parameters, lr=1e-3, weight_decay=1e-4)
if P_0.active and args.generate_visualization:
steps = []
train_accs = []
test_accs = []
for i in range(args.num_epochs):
network.train()
batch_indices = generate_batch_indices(P_x, x_train.shape[0], args.batch_size, shuffle=True)
train_loss = 0.0
n_train_batch = 0.0
for j, (a, b) in enumerate(batch_indices):
optimizer.zero_grad()
x = x_train[a:b]
y = y_train[a:b]
y_hat = network(x)
y = unit_gaussian_denormalize(y, mu_y, std_y)
y_hat = unit_gaussian_denormalize(y_hat, mu_y, std_y)
loss = criterion(y_hat, y)
if P_0.active:
print(f'epoch = {i}, batch = {j}, loss = {loss.item()}')
train_loss += loss.item()
n_train_batch += 1
loss.backward()
optimizer.step()
if P_0.active:
print(f'epoch = {i}, average train loss = {train_loss/n_train_batch}')
steps.append(i)
train_accs.append(train_loss/n_train_batch)
network.eval()
with torch.no_grad():
test_loss, test_mse = 0.0, 0.0
y_true, y_pred = [], []
batch_indices = generate_batch_indices(P_x, x_test.shape[0], args.batch_size, shuffle=False)
n_test_batch = 0
for j, (a, b) in enumerate(batch_indices):
x = x_test[a:b]
y = y_test[a:b]
y_hat = network(x)
y = unit_gaussian_denormalize(y, mu_y, std_y)
y_hat = unit_gaussian_denormalize(y_hat, mu_y, std_y)
loss = criterion(y_hat, y)
mse_loss = mse(y_hat, y)
test_loss += loss.item()
test_mse += mse_loss.item()
y_true.append(y)
y_pred.append(y_hat)
n_test_batch += 1
if P_0.active:
print(f'average test loss = {test_loss/n_test_batch}')
print(f'average test mse = {test_mse/n_test_batch}')
test_accs.append(test_loss/n_test_batch)
j = i+1
if j % args.checkpoint_interval == 0:
with torch.no_grad():
model_path = out_dir.joinpath(f'model_{j:04d}_{P_x.rank:04d}.pt')
torch.save(network.state_dict(), model_path)
print(f'saved model: {model_path.resolve()}')
y_true = torch.cat(tuple(y_true))
y_pred = torch.cat(tuple(y_pred))
mdict = {'y_true': y_true, 'y_pred': y_pred}
mat_path = out_dir.joinpath(f'mat_{j:04d}_{P_x.rank:04d}.mat')
io.savemat(mat_path, mdict)
print(f'saved mat: {mat_path.resolve()}')
if args.generate_visualization:
G = dnn.DistributedTranspose(P_x, P_0)
y_true = G(y_true).cpu().detach().numpy()
y_pred = G(y_pred).cpu().detach().numpy()
if P_0.active:
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
im1 = ax1.imshow(np.squeeze(y_true[0, :, :, :, 0]), animated=True)
im2 = ax2.imshow(np.squeeze(y_pred[0, :, :, :, 0]), animated=True)
def animate(k):
im1.set_data(np.squeeze(y_true[0, :, :, :, k]))
im2.set_data(np.squeeze(y_pred[0, :, :, :, k]))
return (im1, im2)
anim_path = out_dir.joinpath(f'anim_{j:04d}.gif')
ax1.title.set_text(r'$y_{true}$')
ax2.title.set_text(r'$y_{pred}$')
plt.axis('on')
anim = FuncAnimation(fig, animate, frames=args.out_timesteps, repeat=True)
anim.save(anim_path)
print(f'saved animation: {anim_path.resolve()}')
curve_path = out_dir.joinpath(f'curves_{j:04d}.png')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(steps, train_accs, label='Average Train Loss')
ax.plot(steps, test_accs, label='Average Test Loss')
plt.axis('on')
plt.legend()
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.savefig(curve_path)
print(f'saved training curve plot: {curve_path.resolve()}')
|
import tradeStock as t
import robin_stocks as r
import financial as f
import trading_algorithms as a
import numpy as np
import signal
import concurrent.futures
import time
import matplotlib.pyplot as plt
import indicators as ind
from Method_kd import KD
from long_algo import Long_algo
from Method_BOLL_SMA import BOLL_SMA
#user input robinhood account and password
#you may be asked to provide text message verify code
t.login()
def algo_buy(tker):
try:
data = ind.load_stock_30min(tker)
timeFrame = 20
bar = BOLL_SMA(tker,data,timeFrame)
if bar.buy():
print(tker,'is to buy')
money = 50
check = a.checkCap(tker,200)
if check:
return a.buyStock(tker,money)
except Exception as exc:
print('failed to track ', tker,'error:',exc)
#this is for test purpose
def algo_buy_test(tker):
try:
data = ind.load_stock_30min(tker)
timeFrame = 20
bar = BOLL_SMA(tker,20, 3, data,timeFrame)
if bar.buy():
print(tker,'is to buy')
return tker
else:
pass
except Exception as exc:
print('failed to track ', tker,'error:',exc)
while True:
df = f.read_stocks('stocks/stocks.csv')
watch_list = list(df['tiker'])
long_list = []
buy_list = []
for tk in watch_list:
try:
data = ind.load_stock(tk, 200)
timeFrame = 20
a = Long_algo(tk,data,timeFrame)
if a.buy():
print('long position:', tk)
long_list.append(tk)
except Exception as exc:
print('error:', exc)
while not f.isMarketOpen():
#scan the long list of history price, check if any stock in long position
#if in long position, put it into watch list with
if len(long_list) > 0:
#print('stock list',my_stock_list)
print('[Info]:Long_list:',long_list)
print('[Info]:buy_list:', buy_list)
#sell loss
"""try:
with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:
results = list(map(lambda x: executor.submit(a.sellByReturn, x), my_stock_list))
for result in concurrent.futures.as_completed(results):
if result.result() in my_stock_list:
print('sell', result.result())
my_stock_list.remove(result.result())
except Exception as exc:
print('error:', exc)"""
"""try:
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
results = list(map(lambda x: executor.submit(a.buyByAverage,x), my_stock_list))
for result in concurrent.futures.as_completed(results):
data = result.result()
if data not in watch_list and data is not None:
print(result.result(),'add to watch list')
watch_list.append(result.result())
except Exception as exc:
print('buy evarage error: ', exc)"""
#This section is buy action
try:
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
results = list(map(lambda x: executor.submit(algo_buy_test,x), long_list))
for result in concurrent.futures.as_completed(results):
if result.result() in long_list:
long_list.remove(result.result())
buy_list.append(result.result())
except Exception as exc:
print('error: ',exc)
time.sleep(3)
print('in the market loop')
print('still alive')
time.sleep(60)
|
#!/usr/bin/env python
import argparse
import sys
import jinja2
import markdown
from os import listdir, makedirs
from os.path import isfile, join, exists
reload(sys)
sys.setdefaultencoding('utf-8')
def main(args=None):
src_path = 'src/pages'
dist_path = 'dist'
with open('src/layouts/template.html', 'r') as f:
template = f.read()
if not exists(dist_path):
makedirs(dist_path)
onlyfiles = [f for f in listdir(src_path) if isfile(join(src_path, f))]
for file in onlyfiles:
name, ext = file.split('.')
if ext == 'md':
infile = join(src_path, file)
outfile = '{}/{}.{}'.format(dist_path, name, 'html')
title = "Markdown to HTML from {}".format(name)
with open(infile, 'r') as f:
md = f.read()
html = markdown.markdown(md, output_format='html5')
doc = jinja2.Template(template).render(body=html, subject=title)
out = open(outfile, 'w')
out.write(doc)
out.close()
if __name__ == '__main__':
sys.exit(main())
|
# -*- coding: utf-8 -*-
"""
meraki
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
class UpdateDeviceSwitchPortModel(object):
"""Implementation of the 'updateDeviceSwitchPort' model.
TODO: type model description here.
Attributes:
name (string): The name of the switch port
tags (string): The tags of the switch port
enabled (bool): The status of the switch port
mtype (string): The type of the switch port ("access" or "trunk")
vlan (int): The VLAN of the switch port. A null value will clear the
value set for trunk ports.
voice_vlan (int): The voice VLAN of the switch port. Only applicable
to access ports.
allowed_vlans (string): The VLANs allowed on the switch port. Only
applicable to trunk ports.
poe_enabled (bool): The PoE status of the switch port
isolation_enabled (bool): The isolation status of the switch port
rstp_enabled (bool): The rapid spanning tree protocol status
stp_guard (string): The state of the STP guard ("disabled", "Root
guard", "BPDU guard", "Loop guard")
access_policy_number (int): The number of the access policy of the
switch port. Only applicable to access ports.
link_negotiation (string): The link speed for the switch port
port_schedule_id (string): The ID of the port schedule. A value of
null will clear the port schedule.
udld (UdldEnum): The action to take when Unidirectional Link is
detected (Alert only, Enforce). Default configuration is Alert
only.
mac_whitelist (list of string): Only devices with MAC addresses
specified in this list will have access to this port. Up to 20
MAC addresses can be defined.
sticky_mac_whitelist (list of string): The initial list of MAC
addresses for sticky Mac whitelist.
sticky_mac_whitelist_limit (int): The maximum number of MAC addresses
for sticky MAC whitelist.
"""
# Create a mapping from Model property names to API property names
_names = {
"name":'name',
"tags":'tags',
"enabled":'enabled',
"mtype":'type',
"vlan":'vlan',
"voice_vlan":'voiceVlan',
"allowed_vlans":'allowedVlans',
"poe_enabled":'poeEnabled',
"isolation_enabled":'isolationEnabled',
"rstp_enabled":'rstpEnabled',
"stp_guard":'stpGuard',
"access_policy_number":'accessPolicyNumber',
"link_negotiation":'linkNegotiation',
"port_schedule_id":'portScheduleId',
"udld":'udld',
"mac_whitelist":'macWhitelist',
"sticky_mac_whitelist":'stickyMacWhitelist',
"sticky_mac_whitelist_limit":'stickyMacWhitelistLimit'
}
def __init__(self,
name=None,
tags=None,
enabled=None,
mtype=None,
vlan=None,
voice_vlan=None,
allowed_vlans=None,
poe_enabled=None,
isolation_enabled=None,
rstp_enabled=None,
stp_guard=None,
access_policy_number=None,
link_negotiation=None,
port_schedule_id=None,
udld=None,
mac_whitelist=None,
sticky_mac_whitelist=None,
sticky_mac_whitelist_limit=None):
"""Constructor for the UpdateDeviceSwitchPortModel class"""
# Initialize members of the class
self.name = name
self.tags = tags
self.enabled = enabled
self.mtype = mtype
self.vlan = vlan
self.voice_vlan = voice_vlan
self.allowed_vlans = allowed_vlans
self.poe_enabled = poe_enabled
self.isolation_enabled = isolation_enabled
self.rstp_enabled = rstp_enabled
self.stp_guard = stp_guard
self.access_policy_number = access_policy_number
self.link_negotiation = link_negotiation
self.port_schedule_id = port_schedule_id
self.udld = udld
self.mac_whitelist = mac_whitelist
self.sticky_mac_whitelist = sticky_mac_whitelist
self.sticky_mac_whitelist_limit = sticky_mac_whitelist_limit
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
tags = dictionary.get('tags')
enabled = dictionary.get('enabled')
mtype = dictionary.get('type')
vlan = dictionary.get('vlan')
voice_vlan = dictionary.get('voiceVlan')
allowed_vlans = dictionary.get('allowedVlans')
poe_enabled = dictionary.get('poeEnabled')
isolation_enabled = dictionary.get('isolationEnabled')
rstp_enabled = dictionary.get('rstpEnabled')
stp_guard = dictionary.get('stpGuard')
access_policy_number = dictionary.get('accessPolicyNumber')
link_negotiation = dictionary.get('linkNegotiation')
port_schedule_id = dictionary.get('portScheduleId')
udld = dictionary.get('udld')
mac_whitelist = dictionary.get('macWhitelist')
sticky_mac_whitelist = dictionary.get('stickyMacWhitelist')
sticky_mac_whitelist_limit = dictionary.get('stickyMacWhitelistLimit')
# Return an object of this model
return cls(name,
tags,
enabled,
mtype,
vlan,
voice_vlan,
allowed_vlans,
poe_enabled,
isolation_enabled,
rstp_enabled,
stp_guard,
access_policy_number,
link_negotiation,
port_schedule_id,
udld,
mac_whitelist,
sticky_mac_whitelist,
sticky_mac_whitelist_limit)
|
from flask import Flask, request, render_template, jsonify
from flask_cors import CORS
from flask_sqlalchemy import SQLAlchemy
# Time zones
import pytz
# time & atexit: scheduler of temperature recording
import time
import atexit
from apscheduler.schedulers.background import BackgroundScheduler
import datetime
import math
import requests
import random as rdm
from backend import motor as motor
import board
import busio
import adafruit_sht31d
i2c = busio.I2C(board.SCL, board.SDA)
sensor = adafruit_sht31d.SHT31D(i2c)
tz = pytz.timezone('Europe/Paris')
status = {
'temperature': sensor.temperature,
'humidity': sensor.relative_humidity,
'regulation': 'manual',
'percentageMotor': 0,
'motorStatus': "OK"
}
# ## ------------------------ ## #
# ## -- SERVER -- ## #
# ## ------------------------ ## #
FLASK_DEBUG = 1
app = Flask(__name__,
static_folder="./dist/static",
template_folder="./dist")
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////var/www/flaskregul/test1.db'
db = SQLAlchemy(app)
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
# ## RANDOM TEST ## #
@app.route('/api/random')
def random_number():
response = {
'randomNumber': rdm.randint(1, 100)
}
return jsonify(response)
# ## INITITALIZATION OF MOTOR POSITION ## #
@app.route('/api/initmotor')
def init_motor_request():
global status
print("Motor asked for init... ")
isOK = motor.initPosition()
status["motorStatus"] = isOK
status["percentageMotor"] = 0
if isOK == "OK":
return jsonify({'motorStatus': isOK})
else:
return jsonify({'motorStatus': isOK}), 500
# ## GET TEMPERATURE ## #
@app.route('/api/gettemperature', methods=['GET'])
def send_temperature():
# demand = float(request.get_data('demanded'))
temp = measure_temperature()
return jsonify({'temperature': temp})
# ## GET HUMIDITY ## #
@app.route('/api/gethumidity', methods=['GET'])
def send_humidity():
# demand = float(request.get_data('demanded'))
return jsonify({'humidity': measure_humidity()})
# ## GET TEMPERATURE HISTORY ## #
def date_handler(obj):
if isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
else:
return None
@app.route('/api/gettemperaturehistory', methods=['GET'])
def send_temperature_history():
db.create_all() # only to be created once
# demand = float(request.get_data('demanded'))
Temp_History = get_measurement_history()
Dictionary = {'time': [], 'temperature': []}
for i in range(1, Temp_History.__len__()):
Dictionary['time'].append(Temp_History[i].time)
Dictionary['temperature'].append(Temp_History[i].temperature)
return jsonify(Dictionary)
# ## MANUAL COMMAND ## #
@app.route('/api/manualdemand', methods=['POST'])
def receive_manual_demand():
demanded = request.get_json()["demanded"]
print("I received manual demand = " + str(demanded) + " %")
print("Executing...")
newPercentage = demand_motor(demanded)
print("Executed = " + str(math.floor(newPercentage)))
return jsonify({'realized': math.floor(newPercentage)})
# ## GET REGULATION STATUS (AUTO/MANUAL) ## #
@app.route('/api/getregulation', methods=['GET'])
def send_regulation():
global status
# retrieving regulation status from client
regulation = status["regulation"]
print("sending info on regulation type = " + regulation)
if regulation == 'manual':
return jsonify({'regulation': regulation,
'realized': status["percentageMotor"]})
else:
return jsonify({'regulation': regulation})
# ## SET REGULATION STATUS (AUTO/MANUAL) ## #
@app.route('/api/setregulation', methods=['POST'])
def receive_regulation():
global status
# retrieving regulation status from client
regulation = request.get_json()["regulation"]
print("I received the regulation type = " + regulation)
# sanity check of the demanded status
if (regulation != "auto" and regulation != "manual"):
return jsonify({'error': 'unauthorized regulation mode: '
+ '\'' + regulation + '\''
}), 400
else:
status["regulation"] = regulation
return jsonify({'realized': regulation})
# ## ROUTING ## #
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
if app.debug:
return requests.get('http://192.168.1.67/{}'.format(path)).text
return render_template("index.html")
# ## ------------------------- ## #
# ## -- MEASURE -- ## #
# ## ------------------------- ## #
def measure_temperature():
return sensor.temperature
def measure_humidity():
return sensor.relative_humidity
def get_measurement_history():
return TimeAndTemp.query.all()
# ## ------------------------- ## #
# ## -- COMMAND -- ## #
# ## ------------------------- ## #
def demand_motor(percentage):
global status
oldPercentage = status["percentageMotor"]
newPercentage = motor.setPercentage(oldPercentage, percentage)
status["percentageMotor"] = newPercentage
return newPercentage
# ## ------------------------- ## #
# ## -- REGULATION -- ## #
# ## ------------------------- ## #
def regulation(temperature, demand):
return True
# ## ------------------------- ## #
# ## -- TEMPERATURE LOG -- ## #
# ## ------------------------- ## #
class TimeAndTemp(db.Model):
id = db.Column(db.Integer, primary_key=True)
time = db.Column(db.DateTime(timezone=True), nullable=False, default=datetime.datetime.utcnow)
temperature = db.Column(db.Float, unique=False, nullable=False)
def __repr__(self):
tempstr = "%.1f" % self.temperature
return "\n<Time: " + tz.localize(self.time).__str__() + " // Temp. = " + tempstr + " °C>"
def RecordTemperature():
fTemp = measure_temperature()
T0 = TimeAndTemp(time=db.func.now(), temperature=fTemp)
db.session.add(T0)
db.session.commit()
# scheduler
scheduler = BackgroundScheduler()
scheduler.add_job(func=RecordTemperature, trigger="interval", seconds=30)
scheduler.start()
atexit.register(lambda: scheduler.shutdown())
# ## ------------------------- ## #
# ## -- LAUNCH -- ## #
# ## ------------------------- ## #
if __name__ == '__main__':
global status
temperature = measure_temperature()
print("Starting... // temperature = " + str(temperature))
# Motor checking for initPosition
print("Motor intialization...")
isOK = motor.initPosition()
status = {
'temperature': temperature,
'regulation': 'auto',
'percentageMotor': 0,
'motorStatus': isOK
}
# app launch
app.run()
# Creation of database if not existing
db.create_all() # only to be created once
RecordTemperature(temperature)
# scheduler
scheduler = BackgroundScheduler()
scheduler.add_job(func=RecordTemperature, trigger="interval", seconds=30)
scheduler.start()
|
# Compare between two learning rates for the same model and dataset
EXP_GROUPS = {'mnist':
[
{'lr':1e-3, 'model':'mlp', 'dataset':'mnist'},
{'lr':1e-4, 'model':'mlp', 'dataset':'mnist'}
]
} |
#
# MIT License
#
# Copyright (c) 2018 WillQ
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from typing import Dict, List
import pandas
from monkq.exception import DataError
from monkq.utils.i18n import _
class LazyHDFTableStore():
def __init__(self, hdf_path: str):
self.hdf_path = hdf_path
self._cached: Dict[str, pandas.DataFrame] = dict()
@property
def cached_table(self) -> List[str]:
return [key.strip('/') for key in self._cached.keys()]
def get(self, key: str) -> pandas.DataFrame:
if key in self._cached:
return self._cached[key]
else:
try:
df = pandas.read_hdf(self.hdf_path, key)
self._cached[key] = df
return df
except KeyError:
raise DataError(_("Not found hdf data {} in {}").format(key, self.hdf_path))
|
import demistomock as demisto
import pytest
import ShowCampaignSenders
INCIDENT_IDS = [{"id": '1'}, {"id": '2'}, {"id": '3'}]
DEMISTO_RESULT = [
{
'Contents': '[{"emailfrom": "[email protected]"}, {"emailfrom": "[email protected]"}, '
'{"emailfrom": "[email protected]"}, {"emailfrom": "[email protected]"},'
'{"emailfrom": "[email protected]"}, {"emailfrom": "[email protected]"},'
'{"emailfrom": "[email protected]"}]',
'Type': 3
}
]
EXPECTED_TABLE = '|Email|Number Of Appearances|\n|---|---|\n| [email protected] | 4 |\n| ' \
'[email protected] | 2 |\n| [email protected] | 1 |\n'
@pytest.mark.parametrize('execute_command_result, expected_md_result', [
(DEMISTO_RESULT, EXPECTED_TABLE),
([{'Contents': '[]', 'Type': 3}], 'No incidents found.'),
([{'Contents': '[{}]', 'Type': 3}], 'No incident senders found.')
])
def test_show_campaign_senders(mocker, execute_command_result, expected_md_result):
"""
Given:
- Campaign incidents.
When:
- Running the show campaign senders script main function.
Then:
- Ensure the returned markdown result as expected.
"""
mocker.patch.object(demisto, 'get', return_value=INCIDENT_IDS)
mocker.patch.object(demisto, 'executeCommand', return_value=execute_command_result)
mocker.patch.object(ShowCampaignSenders, 'return_results')
ShowCampaignSenders.main()
res = ShowCampaignSenders.return_results.call_args[0][0].readable_output
assert expected_md_result == res
|
# -*- coding: utf-8 -*-
"""
VTK Point Cloud Rendered using PyVista Library
Create and render car shapes
Author: Jari Honkanen
"""
import numpy as np
import math
import pyvista as pv
from pyvista import examples
def get_example_point_cloud(decimateFactor = 0.05):
""" Create numpy array of points from PyVista LiDAR example """
# Get PyVista Lidar Example Data
print("Downloading PyVista LiDAR Example data ...")
dataset = examples.download_lidar()
print(f"Downloading complete. Downloaded {dataset.n_points} points")
print(f"Data type {type(dataset)}")
# Get random points from the dataset
pointIds = np.random.randint(low=0, high=dataset.n_points-1, size=int(dataset.n_points * decimateFactor) )
print(f"Number of points after decimation: {len(pointIds)}")
return dataset.points[pointIds]
def create_ellipse_points(radius=0.5, height=2.0, step=0.05, x_pos=0.0, y_pos=0.0, z_pos=0.0):
""" Create an ellipse shape points array
"""
points_array = []
z_range = abs(height/2)
for z in np.arange(-z_range, z_range, step):
for angle in np.arange(0.0, 2*math.pi, step):
x = radius * math.cos(angle)
y = math.sin(angle)
point = [x + x_pos, y + y_pos, z + z_pos]
points_array.append(point)
return np.array(points_array)
def create_torus_points(torus_radius=1.0, tube_radius=0.4, step=0.05, x_pos=0.0, y_pos=0.0, z_pos=0.0):
""" Create an trous shape points array
"""
points_array = []
for theta in np.arange(0.0, 2*math.pi, step):
for phi in np.arange(0.0, 2*math.pi, step):
x = (torus_radius + tube_radius * math.cos(theta))*math.cos(phi)
z = (torus_radius + tube_radius * math.cos(theta))*math.sin(phi)
y = tube_radius*math.sin(theta)
point = [x + x_pos, y + y_pos, z + z_pos]
points_array.append(point)
return np.array(points_array)
def create_box_points(x_size=1.0, y_size=1.0, z_size=1.0, step=0.05, x_pos=0.0, y_pos=0.0, z_pos=0.0):
""" Create an box shape points array
"""
points_array = []
for z in [0, z_size]:
for x in np.arange(0.0, x_size, step):
for y in np.arange(0.0, y_size, step):
point = [x + x_pos, y + y_pos, z + z_pos]
points_array.append(point)
for y in [0, y_size]:
for x in np.arange(0, x_size, step):
for z in np.arange(0, z_size, step):
point = [x + x_pos, y + y_pos, z + z_pos]
points_array.append(point)
for x in [0, x_size]:
for y in np.arange(0.0, y_size, step):
for z in np.arange(0.0, z_size, step):
point = [x + x_pos, y + y_pos, z + z_pos]
points_array.append(point)
return np.array(points_array)
def create_car_sedan_points(x_size=4.1, y_size=1.8, z_size=1.5, step=0.05, x_pos=0.0, y_pos=0.0, z_pos=0.0):
# Typical Sedan
# Length = 4.1m, Width = 1.8m, height = 1.5m
body_lower = create_box_points(x_size, y_size, 0.5*z_size, step, x_pos, y_pos, z_pos)
body_upper = create_box_points(0.5*x_size, 0.9*y_size, 0.5*z_size, step, x_pos + 0.25*x_size, y_pos + 0.05*y_size, z_pos + 0.5*z_size)
wheel_rr = create_torus_points(torus_radius=0.15*z_size, tube_radius=0.05*z_size, step=2*step,
x_pos=x_pos + 0.2*x_size, y_pos=y_pos, z_pos=z_pos)
wheel_rf = create_torus_points(torus_radius=0.15*z_size, tube_radius=0.05*z_size, step=2*step,
x_pos=x_pos + 0.8*x_size, y_pos=y_pos, z_pos=z_pos)
wheel_lr = create_torus_points(torus_radius=0.15*z_size, tube_radius=0.05*z_size, step=2*step,
x_pos=x_pos + 0.2*x_size, y_pos=y_pos + y_size, z_pos=z_pos)
wheel_lf = create_torus_points(torus_radius=0.15*z_size, tube_radius=0.05*z_size, step=2*step,
x_pos=x_pos + 0.8*x_size, y_pos=y_pos + y_size, z_pos=z_pos)
car = np.concatenate((body_lower, body_upper, wheel_rr, wheel_rf, wheel_lr, wheel_lf), axis=0)
#return upper_body
return car
class Car:
""" Simple Car Point Cloud Class """
def __init__(self, x_size=4.1, y_size=1.8, z_size=1.5, step=0.05):
self.x_size = x_size
self.y_size = y_size
self.z_size = z_size
self.step = step
def setSize(self, x_size=4.1, y_size=1.8, z_size=1.5, step=0.05):
self.x_size = x_size
self.y_size = y_size
self.z_size = z_size
self.step = step
def spawn(self, x_pos = 0.0, y_pos=0.0, z_pos=0.0):
return create_car_sedan_points(self.x_size, self.y_size, self.z_size, self.step, x_pos, y_pos, z_pos)
if __name__ == "__main__":
car1 = Car()
car1_points = car1.spawn()
car2 = Car()
car2_points = car2.spawn(x_pos = 5.0, y_pos = 2.5)
points_array = np.concatenate((car1_points, car2_points), axis=0)
# Create PyVista Mesh
point_cloud = pv.PolyData(points_array)
# Get a Z component of the point array
#zData = points_array[:,-1]
xData = points_array[:,0]
print(f"xData points Array type: {type(xData)}")
print(f"xData points Array shape: {xData.shape}")
# Add to mesh
#point_cloud["height"] = zData
point_cloud["distance"] = xData
# Plot PyVista mesh
point_cloud.plot(render_points_as_spheres=True)
|
"""Serializer fields"""
import collections
from django.contrib.gis import geos, forms
from django.db.models.query import QuerySet
from rest_framework import renderers
from rest_framework.fields import Field, FileField
from spillway.compat import json
from spillway.forms import fields
class GeometryField(Field):
def bind(self, field_name, parent):
try:
renderer = parent.context["request"].accepted_renderer
except (AttributeError, KeyError):
pass
else:
obj = parent.root.instance
try:
has_format = renderer.format in obj.query.annotations
except AttributeError:
if not isinstance(obj, QuerySet):
try:
obj = obj[0]
except (IndexError, TypeError):
pass
has_format = hasattr(obj, renderer.format)
if has_format:
self.source = renderer.format
super().bind(field_name, parent)
def get_attribute(self, instance):
# SpatiaLite returns empty/invalid geometries in WKT or GeoJSON with
# exceedingly high simplification tolerances.
try:
return super().get_attribute(instance)
except geos.GEOSException:
return None
def to_internal_value(self, data):
# forms.GeometryField cannot handle geojson dicts.
if isinstance(data, collections.Mapping):
data = json.dumps(data)
field = fields.GeometryField(widget=forms.BaseGeometryWidget())
return field.to_python(data)
def to_representation(self, value):
# Create a dict from the GEOSGeometry when the value is not previously
# serialized from the spatial db.
try:
return {"type": value.geom_type, "coordinates": value.coords}
# Value is already serialized as geojson, kml, etc.
except AttributeError:
return value
|
SECRET_KEY = 'secret'
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'priorities',
'USER': 'vagrant',
}
}
# This should be a local folder created for use with the install_media command
MEDIA_ROOT = '/usr/local/apps/madrona-priorities/mediaroot/'
MEDIA_URL = 'http://localhost:8000/media/'
STATIC_URL = 'http://localhost:8000/media/'
POSTGIS_TEMPLATE='template1'
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Madrona', '[email protected]')
)
import logging
logging.getLogger('django.db.backends').setLevel(logging.ERROR)
import os
LOG_FILE = os.path.join(os.path.dirname(__file__),'..','seak.log')
MARXAN_OUTDIR = '/home/vagrant/marxan_output' # for vagrant boxes, put this outside the shared dir so we can symlink
MARXAN_TEMPLATEDIR = os.path.join(MARXAN_OUTDIR, 'template')
|
#!/usr/bin/python3
# server.py - Test server for web services.
# Monitor a TCP port for messages to be displayed on the pimoroni scroll phat hd.
import socket
import scrollphathd
import configparser
import logging
import signal
import sys
import os
import datetime
sock = 0
# expand_file_name - If file_name is not an absolute path, prepend the root
# path of the executing file.
def expand_file_name(file_name):
if os.path.isabs(file_name):
return file_name
else:
return os.path.join(os.path.dirname(os.path.realpath(__file__)), file_name)
# get_server_address - get the host and port values from an .ini file
def get_server_address(file_name):
file_name = expand_file_name(file_name)
parser = configparser.ConfigParser()
parser.read(file_name)
host = parser.get('server_address', 'host')
port = parser.getint('server_address', 'port')
return host, port
# cleanup - Close any open resource
def cleanup():
logging.debug(str(datetime.datetime.now()) + " Server terminated")
sock.close()
logging.shutdown()
# sig_term_handler - Handle SIGTERM (e.g. kill) signal
def sig_term_handler(signal, frame):
cleanup()
sys.exit(0)
# Main - recive and print messages.
def Main():
# Setup signal handler so we can run in background.
signal.signal(signal.SIGTERM, sig_term_handler)
# Enable basic logging
file_name = expand_file_name('server.log')
logging.basicConfig(filename=file_name,
filemode='w',
level=logging.DEBUG)
# Get socket infomration.
server_address = get_server_address('socket.ini')
# Create and intilize a TCP/IP socket.
global sock
sock = socket.socket()
sock.bind(server_address)
sock.settimeout(0.05)
# Set defaults for scrolling display.
scrollphathd.set_brightness(0.25)
isFliped = False
# Let the user know the server has started.
logging.debug(str(datetime.datetime.now()) + " Server started")
if os.getpgrp() == os.tcgetpgrp(sys.stdout.fileno()):
print("Server started, ctrl-c to exit")
else:
print("Server started, 'kill {}' to exit".format(os.getpid()))
# Listen for incomming connections.
sock.listen(1)
try:
while True:
# Wait for a connection.
try:
conn, addr = sock.accept()
logging.debug(str(datetime.datetime.now()) +" Connection from: " + str(addr))
while True:
data = conn.recv(1024).decode()
if not data:
conn.close()
break
command, junk, data = data.partition(':')
# Parse the command.
if command == "0": # message
scrollphathd.clear()
scrollphathd.write_string(data, x=17) # 17 for scroll
# 0 for static
elif command == "1": # set brightness
scrollphathd.set_brightness(float(data))
elif command == "2": # invert display
if isFliped:
scrollphathd.flip(x=False, y=False)
isFliped = False
else:
scrollphathd.flip(x=True, y=True)
isFliped = True
except socket.timeout:
# On socket timeout, scroll the displayed message.
scrollphathd.show()
scrollphathd.scroll(1) # comment this out for static
except KeyboardInterrupt:
cleanup()
if __name__ == '__main__':
Main() |
#!/usr/bin/python3
import socket
import sys
if len(sys.argv) < 2:
print('usage: tcp_server port')
sys.exit(1)
GREEN = '\033[38;5;82m'
RED = '\033[38;5;'
print(GREEN)
# Banner
print("================")
print("|| TCP Server ||")
print("================")
port = int(sys.argv[1])
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(('', port))
server.listen(1)
client, addr = server.accept()
print("Received connection from {}:\033[1m\033[7m{}\033[27m\033[21m".format(addr[0], addr[1]))
# Server loop:
while 1:
data = client.recv(1024)
if not data:
client.shutdown(socket.SHUT_RDWR)
break # if there's no more data to receive.
print("Received Data:\n", data)
client.send("ACK!".encode())
client.close()
|
import os
import requests
import json
import pandas as pd
from datetime import datetime, timedelta
ENV = "sandbox" #Use "sandbox" when testing, and "api" if you have an account at Tradier
API_TOKEN = "" #Fill in your Tradier API Token here
###
#Script starts here
###
def main():
#Get list of symbols from file
filename_in = "symbols.csv"
listOfSymbols = importCSV(filename_in)
#Find Cash Secured Puts
#Parameters: Symbols, min DTE, max DTE
findCashSecuredPuts(listOfSymbols, 10, 47)
###
#API Functions
###
#Get Data from Tradier API
def getAPIData(url):
bearer_token = f"Bearer {API_TOKEN}"
headers={'Authorization': bearer_token, 'Accept': 'application/json'}
response = requests.get(url, headers=headers)
if response.status_code == 200:
return json.loads(response.content.decode('utf-8'))
#Get all the upcoming expirations for given symbol
def getOptionExpirations(symbol):
url = f"https://{ENV}.tradier.com/v1/markets/options/expirations?symbol={symbol}"
expirations_data = getAPIData(url)
expirations = []
if (expirations_data['expirations']):
expirations = expirations_data['expirations']['date']
return expirations
#Retrieve the options chain for given symbol and expiration
def getOptionsChain(symbol, expiration):
url = f"https://{ENV}.tradier.com/v1/markets/options/chains?symbol={symbol}&expiration={expiration}&greeks=true"
options_chain_data = getAPIData(url)
options_chain = []
if (options_chain_data['options']):
options_chain = options_chain_data['options']['option']
return options_chain
#Retrieves latest stock price from Tradier Market API
def getLastStockPrice(symbol):
url = f"https://{ENV}.tradier.com/v1/markets/quotes?symbols={symbol}"
quote_data = getAPIData(url)
last_price = -1
if ('quote' in quote_data['quotes']):
last_price = quote_data['quotes']['quote']['last']
return last_price
###
#Utility functions
###
#Import CSV files using Pandas library
def importCSV(filename_in):
data = pd.read_csv(filename_in)
symbols = data['Symbol'].to_list()
return symbols
#Limit expirations of symbol to provided min_dte (Min Days Until Expiration) and max_dte (Max Days Until Expiration)
def listOfLimitedExpirations(symbol, min_dte, max_dte):
#Get option expirations for symbol
expirations_list = getOptionExpirations(symbol)
expirations = []
if(isinstance(expirations_list, str)):
return []
for expiration_date in expirations_list:
#Extract dates within set DTE
date_object = datetime.strptime(expiration_date,"%Y-%m-%d")
expiration_min_date = datetime.now() + timedelta(min_dte)
expiration_max_date = datetime.now() + timedelta(max_dte)
if (date_object <= expiration_min_date):
continue
if (date_object >= expiration_max_date):
continue
expirations.append(expiration_date)
return expirations
def exportToFile(data, filename_out):
output = pd.DataFrame(data, columns=['Symbol','Expiration','Strike','Bid','Ask','Volume','Delta','Premium'])
output.to_csv(filename_out,index=False)
#Creates a new dictionary with options data
def gatherOptionData(option):
option_data = {}
option_data['symbol'] = option['underlying']
option_data['type'] = option['option_type']
option_data['expiration'] = option['expiration_date']
option_data['strike'] = option['strike']
option_data['bid'] = option['bid']
option_data['ask'] = option['ask']
option_data['volume'] = option['volume']
option_data['open_int'] = option['open_interest']
#Add necessary greeks here
option_greeks = option.get('greeks',None)
if (option_greeks):
option_data['delta'] = option_greeks['delta']
option_data['theta'] = option_greeks['theta']
option_data['gamma'] = option_greeks['gamma']
return option_data
###
# Main function for filtering the PUT options we are looking for
# You will have to set your own critera
# Generally, for minimum critera, you want:
# tight bid/ask spreads (under .15)
# Some liquidity (Looking for volume greater than 0)
# Certain delta, minium premium, etc.
###
def findCashSecuredPuts(ListOfSymbols, minDays, maxDays):
#Adjust these according to your criteria
MAX_BID_ASK_SPREAD = .15
MIN_PRICE = 10
MAX_PRICE = 70
MIN_PREM = .30
MAX_DELTA = -.2
matching_options = []
data_frame = []
for symbol in ListOfSymbols:
print(f"Processing {symbol}...")
#Depending on your list of symbols, you may want to filter by current price, since you will need buying power
last_price = getLastStockPrice(symbol)
if (last_price <= MIN_PRICE or last_price >= MAX_PRICE):
continue
#We only want options expiring within a certain timeframe
expirations_list = listOfLimitedExpirations(symbol, minDays, maxDays)
numOptions = 0
for expiration in expirations_list:
#First we need the options chain
options = getOptionsChain(symbol, expiration)
for option_item in options:
#This will just gather data from option into a more useful dictionary
option = gatherOptionData(option_item)
#Start filtering by your criteria here
#Make sure there is a bid/ask, otherwise there's probably no liquidity
if (option['bid'] is None or option['ask'] is None):
continue
#Estimated premium (this goes by the approx mid price)
premium = round((option['bid'] + option['ask']) / 2,2)
#Check for delta if it exists
delta = -999
if ('delta' in option):
delta = option['delta']
#Filter out the options we actually want
if (option['type'] == "put"
and option['bid'] > 0
and delta >= MAX_DELTA
and premium >= MIN_PREM
and (option['ask'] - option['bid']) <= MAX_BID_ASK_SPREAD
and option['volume'] > 0
):
#Format the output
option_output = '{}, {}, BID:{}, ASK:{}, {}, {}(D), Premium: {}'\
.format(
option['expiration'],
option['strike'],
option['bid'],
option['ask'],
option['volume'],
delta,
premium)
#Separate by symbol
if (numOptions == 0):
matching_options.append(f"Symbol: {symbol}")
numOptions += 1
#Print the screen when a match is found
print(f"Wheel: {option_output}")
#Add data to Pandas DataFrame
data_frame.append([symbol,
option['expiration'],
option['strike'],
option['bid'],
option['ask'],
option['volume'],
delta,
premium])
#Export results to a new csv file
exportToFile(data_frame, "output_cash_secured_puts.csv")
if __name__ == '__main__':
main()
|
"""Modified code from https://developers.google.com/optimization/routing/tsp#or-tools """
# Copyright Matthew Mack (c) 2020 under CC-BY 4.0: https://creativecommons.org/licenses/by/4.0/
from __future__ import print_function
import math
from ortools.constraint_solver import routing_enums_pb2
from ortools.constraint_solver import pywrapcp
from PIL import Image, ImageDraw
import os
import time
import copy
from itertools import permutations
# Change these file names to the relevant files.
ORIGINAL_IMAGE = "images/brother-1024-stipple.png"
IMAGE_TSP = "images/brother-1024-stipple.tsp"
# Change the number of points according to the base tsp file you are using.
NUMBER_OF_POINTS = 1024
NUMBER_OF_PARTITIONS = 8
INITIAL_VERTEX = 0
def create_data_model():
"""Stores the data for the problem."""
# Extracts coordinates from IMAGE_TSP and puts them into an array
list_of_nodes = []
with open(IMAGE_TSP) as f:
for _ in range(6):
next(f)
for line in f:
i,x,y = line.split()
list_of_nodes.append((int(float(x)),int(float(y))))
data = {}
# Locations in block units
data['locations'] = list_of_nodes # yapf: disable
data['num_vehicles'] = 1
data['depot'] = 0
return data
def compute_euclidean_distance_matrix(locations):
"""Creates callback to return distance between points."""
distances = {}
for from_counter, from_node in enumerate(locations):
distances[from_counter] = {}
for to_counter, to_node in enumerate(locations):
if from_counter == to_counter:
distances[from_counter][to_counter] = 0
else:
# Euclidean distance
distances[from_counter][to_counter] = (int(
math.hypot((from_node[0] - to_node[0]),
(from_node[1] - to_node[1]))))
return distances
def print_solution(manager, routing, solution):
"""Prints solution on console."""
print('Objective: {}'.format(solution.ObjectiveValue()))
index = routing.Start(0)
plan_output = 'Route:\n'
route_distance = 0
while not routing.IsEnd(index):
plan_output += ' {} ->'.format(manager.IndexToNode(index))
previous_index = index
index = solution.Value(routing.NextVar(index))
route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)
plan_output += ' {}\n'.format(manager.IndexToNode(index))
print(plan_output)
plan_output += 'Objective: {}m\n'.format(route_distance)
def get_routes(solution, routing, manager):
"""Get vehicle routes from a solution and store them in an array."""
# Get vehicle routes and store them in a two dimensional array whose
# i,j entry is the jth location visited by vehicle i along its route.
routes = []
for route_nbr in range(routing.vehicles()):
index = routing.Start(route_nbr)
route = [manager.IndexToNode(index)]
#while not routing.IsEnd(index):
# index = solution.Value(routing.NextVar(index))
counter = 0
while counter < len(solution):
counter += 1
index = solution[index]
route.append(manager.IndexToNode(index))
routes.append(route)
return routes[0]
def draw_routes(nodes, path):
"""Takes a set of nodes and a path, and outputs an image of the drawn TSP path"""
tsp_path = []
for location in path:
tsp_path.append(nodes[int(location)])
original_image = Image.open(ORIGINAL_IMAGE)
width, height = original_image.size
tsp_image = Image.new("RGBA",(width,height),color='white')
tsp_image_draw = ImageDraw.Draw(tsp_image)
#tsp_image_draw.point(tsp_path,fill='black')
tsp_image_draw.line(tsp_path,fill='black',width=1)
tsp_image = tsp_image.transpose(Image.FLIP_TOP_BOTTOM)
FINAL_IMAGE = IMAGE_TSP.replace("-stipple.tsp","-tsp.png")
tsp_image.save(FINAL_IMAGE)
print("TSP solution has been drawn and can be viewed at", FINAL_IMAGE)
def nearest_neighbors_solution(distance_matrix):
visited = {i: False for i in range(NUMBER_OF_POINTS)}
nearest_neighbors = {i: -1 for i in range(NUMBER_OF_POINTS)}
last_vertex = INITIAL_VERTEX
should_continue = True
while should_continue:
should_continue = False
visited[last_vertex] = True
shortest_distance = float("inf")
closest_neighbor = -1
for i in distance_matrix[last_vertex]:
if distance_matrix[last_vertex][i] < shortest_distance and not (visited[i]):
shortest_distance = distance_matrix[last_vertex][i]
closest_neighbor = i
should_continue = True
if should_continue:
nearest_neighbors[last_vertex] = closest_neighbor
last_vertex = closest_neighbor
else:
nearest_neighbors[last_vertex] = INITIAL_VERTEX
return nearest_neighbors
def two_opt_solution(distance_matrix):
solution = nearest_neighbors_solution(distance_matrix)
original_group = convert_solution_to_group(solution)
partitions = NUMBER_OF_PARTITIONS
while(partitions > 0):
two_opt(distance_matrix, original_group, partitions)
partitions = int(partitions / 2)
new_solution = convert_group_to_solution(original_group)
return new_solution
def two_opt(distance_matrix, group, partitions):
partition_size = int(len(group)/partitions)
for k in range(partitions):
while True:
min_change = 0
min_i = -1
min_j = -1
for i in range(1 + (k*partition_size), ((k+1)*partition_size)-2):
for j in range(i+1, ((k+1)*partition_size)):
u = group[i-1]
v = group[i]
w = group[j]
x = group[(j+1) % ((k+1)*partition_size)]
current_distance = (distance_matrix[u][v] + distance_matrix[w][x])
new_distance = (distance_matrix[u][w] + distance_matrix[v][x])
change = new_distance - current_distance
if change < min_change:
min_change = change
min_i = i
min_j = j
swap_edges(group, min_i, min_j)
if min_change == 0:
break
print(min_change)
def swap_edges(group, v, w):
#Reverses the entire slice, from vertex v to vertex w (including v and w)
group[v:w+1] = group[v:w+1][::-1]
def convert_group_to_solution(group):
solution = {}
for i in range(len(group)-1):
solution[group[i]] = group[i+1]
solution[group[-1]] = NUMBER_OF_POINTS
print(solution)
return solution
def convert_solution_to_group(solution):
head = INITIAL_VERTEX
group = []
for i in range(NUMBER_OF_POINTS):
group.append(head)
head = solution[head]
return group
def calculate_group_cost(distance_matrix, group):
cost = 0
for i in range(len(group)):
cost += distance_matrix[group[i]][group[(i+1) % len(group)]]
return cost
def main():
"""Entry point of the program."""
starting_moment = time.time()
# Instantiate the data problem.
print("Step 1/5: Initialising variables")
data = create_data_model()
# Create the routing index manager.
manager = pywrapcp.RoutingIndexManager(len(data['locations']),
data['num_vehicles'], data['depot'])
# Create Routing Model.
routing = pywrapcp.RoutingModel(manager)
print("Step 2/5: Computing distance matrix")
distance_matrix = compute_euclidean_distance_matrix(data['locations'])
def distance_callback(from_index, to_index):
"""Returns the distance between the two nodes."""
# Convert from routing variable Index to distance matrix NodeIndex.
from_node = manager.IndexToNode(from_index)
to_node = manager.IndexToNode(to_index)
return distance_matrix[from_node][to_node]
transit_callback_index = routing.RegisterTransitCallback(distance_callback)
# Define cost of each arc.
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
# Setting first solution heuristic.
print("Step 3/5: Setting an initial solution")
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
# Solve the problem.
print("Step 4/5: Solving")
#solution = routing.SolveWithParameters(search_parameters)
#solution = nearest_neighbors_solution(distance_matrix)
solution = two_opt_solution(distance_matrix)
# Print solution on console.
if solution:
#print_solution(manager, routing, solution)
print("Step 5/5: Drawing the solution")
routes = get_routes(solution, routing, manager)
draw_routes(data['locations'], routes)
else:
print("A solution couldn't be found :(")
finishing_moment = time.time()
print("Total time elapsed during execution: " + str(finishing_moment - starting_moment) + " seconds")
print("Total distance: " + str(calculate_group_cost(distance_matrix, convert_solution_to_group(solution))))
if __name__ == '__main__':
main() |
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2020 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.nsx_policy.infra.domains.
#---------------------------------------------------------------------------
"""
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class CommunicationMaps(VapiInterface):
"""
"""
REVISE_OPERATION_TOP = "insert_top"
"""
Possible value for ``operation`` of method :func:`CommunicationMaps.revise`.
"""
REVISE_OPERATION_BOTTOM = "insert_bottom"
"""
Possible value for ``operation`` of method :func:`CommunicationMaps.revise`.
"""
REVISE_OPERATION_AFTER = "insert_after"
"""
Possible value for ``operation`` of method :func:`CommunicationMaps.revise`.
"""
REVISE_OPERATION_BEFORE = "insert_before"
"""
Possible value for ``operation`` of method :func:`CommunicationMaps.revise`.
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx_policy.infra.domains.communication_maps'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _CommunicationMapsStub)
self._VAPI_OPERATION_IDS = {}
def delete(self,
domain_id,
communication_map_id,
):
"""
Deletes the communication map along with all the communication entries
This API is deprecated. Please use the following API instead. DELETE
/infra/domains/domain-id/security-policies/security-policy-id
:type domain_id: :class:`str`
:param domain_id: (required)
:type communication_map_id: :class:`str`
:param communication_map_id: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'domain_id': domain_id,
'communication_map_id': communication_map_id,
})
def get(self,
domain_id,
communication_map_id,
):
"""
Read communication-map for a domain. This API is deprecated. Please use
the following API instead. GET
/infra/domains/domain-id/security-policies/security-policy-id
:type domain_id: :class:`str`
:param domain_id: (required)
:type communication_map_id: :class:`str`
:param communication_map_id: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.CommunicationMap`
:return: com.vmware.nsx_policy.model.CommunicationMap
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'domain_id': domain_id,
'communication_map_id': communication_map_id,
})
def list(self,
domain_id,
cursor=None,
include_mark_for_delete_objects=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
List all communication maps for a domain. This API is deprecated.
Please use the following API instead. GET
/infra/domains/domain-id/security-policies
:type domain_id: :class:`str`
:param domain_id: (required)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type include_mark_for_delete_objects: :class:`bool` or ``None``
:param include_mark_for_delete_objects: Include objects that are marked for deletion in results (optional,
default to false)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx_policy.model_client.CommunicationMapListResult`
:return: com.vmware.nsx_policy.model.CommunicationMapListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'domain_id': domain_id,
'cursor': cursor,
'include_mark_for_delete_objects': include_mark_for_delete_objects,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def patch(self,
domain_id,
communication_map_id,
communication_map,
):
"""
Patch the communication map for a domain. If a communication map for
the given communication-map-id is not present, the object will get
created and if it is present it will be updated. This is a full replace
This API is deprecated. Please use the following API instead. PATCH
/infra/domains/domain-id/security-policies/security-policy-id
:type domain_id: :class:`str`
:param domain_id: (required)
:type communication_map_id: :class:`str`
:param communication_map_id: (required)
:type communication_map: :class:`com.vmware.nsx_policy.model_client.CommunicationMap`
:param communication_map: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('patch',
{
'domain_id': domain_id,
'communication_map_id': communication_map_id,
'communication_map': communication_map,
})
def revise(self,
domain_id,
communication_map_id,
communication_map,
anchor_path=None,
operation=None,
):
"""
This is used to set a precedence of a communication map w.r.t others.
This API is deprecated. Please use the following API instead. POST
/infra/domains/domain-id/security-policies/security-policy-id?action=revise
:type domain_id: :class:`str`
:param domain_id: (required)
:type communication_map_id: :class:`str`
:param communication_map_id: (required)
:type communication_map: :class:`com.vmware.nsx_policy.model_client.CommunicationMap`
:param communication_map: (required)
:type anchor_path: :class:`str` or ``None``
:param anchor_path: The communication map/communication entry path if operation is
'insert_after' or 'insert_before' (optional)
:type operation: :class:`str` or ``None``
:param operation: Operation (optional, default to insert_top)
:rtype: :class:`com.vmware.nsx_policy.model_client.CommunicationMap`
:return: com.vmware.nsx_policy.model.CommunicationMap
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('revise',
{
'domain_id': domain_id,
'communication_map_id': communication_map_id,
'communication_map': communication_map,
'anchor_path': anchor_path,
'operation': operation,
})
def update(self,
domain_id,
communication_map_id,
communication_map,
):
"""
Create or Update the communication map for a domain. This is a full
replace. All the CommunicationEntries are replaced. This API is
deprecated. Please use the following API instead. PUT
/infra/domains/domain-id/security-policies/security-policy-id
:type domain_id: :class:`str`
:param domain_id: (required)
:type communication_map_id: :class:`str`
:param communication_map_id: (required)
:type communication_map: :class:`com.vmware.nsx_policy.model_client.CommunicationMap`
:param communication_map: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.CommunicationMap`
:return: com.vmware.nsx_policy.model.CommunicationMap
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'domain_id': domain_id,
'communication_map_id': communication_map_id,
'communication_map': communication_map,
})
class GatewayPolicies(VapiInterface):
"""
"""
REVISE_OPERATION_TOP = "insert_top"
"""
Possible value for ``operation`` of method :func:`GatewayPolicies.revise`.
"""
REVISE_OPERATION_BOTTOM = "insert_bottom"
"""
Possible value for ``operation`` of method :func:`GatewayPolicies.revise`.
"""
REVISE_OPERATION_AFTER = "insert_after"
"""
Possible value for ``operation`` of method :func:`GatewayPolicies.revise`.
"""
REVISE_OPERATION_BEFORE = "insert_before"
"""
Possible value for ``operation`` of method :func:`GatewayPolicies.revise`.
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx_policy.infra.domains.gateway_policies'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _GatewayPoliciesStub)
self._VAPI_OPERATION_IDS = {}
def delete(self,
domain_id,
gateway_policy_id,
):
"""
Delete GatewayPolicy
:type domain_id: :class:`str`
:param domain_id: (required)
:type gateway_policy_id: :class:`str`
:param gateway_policy_id: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'domain_id': domain_id,
'gateway_policy_id': gateway_policy_id,
})
def get(self,
domain_id,
gateway_policy_id,
):
"""
Read gateway policy for a domain.
:type domain_id: :class:`str`
:param domain_id: (required)
:type gateway_policy_id: :class:`str`
:param gateway_policy_id: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.GatewayPolicy`
:return: com.vmware.nsx_policy.model.GatewayPolicy
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'domain_id': domain_id,
'gateway_policy_id': gateway_policy_id,
})
def list(self,
domain_id,
cursor=None,
include_mark_for_delete_objects=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
List all gateway policies for specified Domain.
:type domain_id: :class:`str`
:param domain_id: (required)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type include_mark_for_delete_objects: :class:`bool` or ``None``
:param include_mark_for_delete_objects: Include objects that are marked for deletion in results (optional,
default to false)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx_policy.model_client.GatewayPolicyListResult`
:return: com.vmware.nsx_policy.model.GatewayPolicyListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'domain_id': domain_id,
'cursor': cursor,
'include_mark_for_delete_objects': include_mark_for_delete_objects,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def patch(self,
domain_id,
gateway_policy_id,
gateway_policy,
):
"""
Update the gateway policy for a domain. This is a full replace. All the
rules are replaced.
:type domain_id: :class:`str`
:param domain_id: (required)
:type gateway_policy_id: :class:`str`
:param gateway_policy_id: (required)
:type gateway_policy: :class:`com.vmware.nsx_policy.model_client.GatewayPolicy`
:param gateway_policy: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('patch',
{
'domain_id': domain_id,
'gateway_policy_id': gateway_policy_id,
'gateway_policy': gateway_policy,
})
def revise(self,
domain_id,
gateway_policy_id,
gateway_policy,
anchor_path=None,
operation=None,
):
"""
This is used to set a precedence of a gateway policy w.r.t others.
:type domain_id: :class:`str`
:param domain_id: (required)
:type gateway_policy_id: :class:`str`
:param gateway_policy_id: (required)
:type gateway_policy: :class:`com.vmware.nsx_policy.model_client.GatewayPolicy`
:param gateway_policy: (required)
:type anchor_path: :class:`str` or ``None``
:param anchor_path: The security policy/rule path if operation is 'insert_after' or
'insert_before' (optional)
:type operation: :class:`str` or ``None``
:param operation: Operation (optional, default to insert_top)
:rtype: :class:`com.vmware.nsx_policy.model_client.GatewayPolicy`
:return: com.vmware.nsx_policy.model.GatewayPolicy
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('revise',
{
'domain_id': domain_id,
'gateway_policy_id': gateway_policy_id,
'gateway_policy': gateway_policy,
'anchor_path': anchor_path,
'operation': operation,
})
def update(self,
domain_id,
gateway_policy_id,
gateway_policy,
):
"""
Update the gateway policy for a domain. This is a full replace. All the
rules are replaced.
:type domain_id: :class:`str`
:param domain_id: (required)
:type gateway_policy_id: :class:`str`
:param gateway_policy_id: (required)
:type gateway_policy: :class:`com.vmware.nsx_policy.model_client.GatewayPolicy`
:param gateway_policy: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.GatewayPolicy`
:return: com.vmware.nsx_policy.model.GatewayPolicy
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'domain_id': domain_id,
'gateway_policy_id': gateway_policy_id,
'gateway_policy': gateway_policy,
})
class Groups(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx_policy.infra.domains.groups'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _GroupsStub)
self._VAPI_OPERATION_IDS = {}
def delete(self,
domain_id,
group_id,
fail_if_subtree_exists=None,
force=None,
):
"""
Delete Group
:type domain_id: :class:`str`
:param domain_id: Domain ID (required)
:type group_id: :class:`str`
:param group_id: Group ID (required)
:type fail_if_subtree_exists: :class:`bool` or ``None``
:param fail_if_subtree_exists: Do not delete if the group subtree has any entities (optional,
default to false)
:type force: :class:`bool` or ``None``
:param force: Force delete the resource even if it is being used somewhere
(optional, default to false)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'domain_id': domain_id,
'group_id': group_id,
'fail_if_subtree_exists': fail_if_subtree_exists,
'force': force,
})
def get(self,
domain_id,
group_id,
):
"""
Read group
:type domain_id: :class:`str`
:param domain_id: Domain ID (required)
:type group_id: :class:`str`
:param group_id: Group ID (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.Group`
:return: com.vmware.nsx_policy.model.Group
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'domain_id': domain_id,
'group_id': group_id,
})
def list(self,
domain_id,
cursor=None,
include_mark_for_delete_objects=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
List Groups for a domain
:type domain_id: :class:`str`
:param domain_id: Domain ID (required)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type include_mark_for_delete_objects: :class:`bool` or ``None``
:param include_mark_for_delete_objects: Include objects that are marked for deletion in results (optional,
default to false)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx_policy.model_client.GroupListResult`
:return: com.vmware.nsx_policy.model.GroupListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'domain_id': domain_id,
'cursor': cursor,
'include_mark_for_delete_objects': include_mark_for_delete_objects,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def patch(self,
domain_id,
group_id,
group,
):
"""
If a group with the group-id is not already present, create a new
group. If it already exists, patch the group.
:type domain_id: :class:`str`
:param domain_id: Domain ID (required)
:type group_id: :class:`str`
:param group_id: Group ID (required)
:type group: :class:`com.vmware.nsx_policy.model_client.Group`
:param group: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('patch',
{
'domain_id': domain_id,
'group_id': group_id,
'group': group,
})
def update(self,
domain_id,
group_id,
group,
):
"""
If a group with the group-id is not already present, create a new
group. If it already exists, update the group.
:type domain_id: :class:`str`
:param domain_id: Domain ID (required)
:type group_id: :class:`str`
:param group_id: Group ID (required)
:type group: :class:`com.vmware.nsx_policy.model_client.Group`
:param group: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.Group`
:return: com.vmware.nsx_policy.model.Group
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'domain_id': domain_id,
'group_id': group_id,
'group': group,
})
class RedirectionPolicies(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx_policy.infra.domains.redirection_policies'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _RedirectionPoliciesStub)
self._VAPI_OPERATION_IDS = {}
def delete(self,
domain_id,
redirection_policy_id,
):
"""
Delete redirection policy.
:type domain_id: :class:`str`
:param domain_id: Domain id (required)
:type redirection_policy_id: :class:`str`
:param redirection_policy_id: Redirection map id (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'domain_id': domain_id,
'redirection_policy_id': redirection_policy_id,
})
def get(self,
domain_id,
redirection_policy_id,
):
"""
Read redirection policy.
:type domain_id: :class:`str`
:param domain_id: Domain id (required)
:type redirection_policy_id: :class:`str`
:param redirection_policy_id: Redirection map id (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.RedirectionPolicy`
:return: com.vmware.nsx_policy.model.RedirectionPolicy
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'domain_id': domain_id,
'redirection_policy_id': redirection_policy_id,
})
def list(self,
cursor=None,
include_mark_for_delete_objects=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
List all redirection policys across all domains ordered by precedence.
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type include_mark_for_delete_objects: :class:`bool` or ``None``
:param include_mark_for_delete_objects: Include objects that are marked for deletion in results (optional,
default to false)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx_policy.model_client.RedirectionPolicyListResult`
:return: com.vmware.nsx_policy.model.RedirectionPolicyListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'cursor': cursor,
'include_mark_for_delete_objects': include_mark_for_delete_objects,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def list_0(self,
domain_id,
cursor=None,
include_mark_for_delete_objects=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
List redirection policys for a domain
:type domain_id: :class:`str`
:param domain_id: Domain id (required)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type include_mark_for_delete_objects: :class:`bool` or ``None``
:param include_mark_for_delete_objects: Include objects that are marked for deletion in results (optional,
default to false)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx_policy.model_client.RedirectionPolicyListResult`
:return: com.vmware.nsx_policy.model.RedirectionPolicyListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list_0',
{
'domain_id': domain_id,
'cursor': cursor,
'include_mark_for_delete_objects': include_mark_for_delete_objects,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def patch(self,
domain_id,
redirection_policy_id,
redirection_policy,
):
"""
Create or update the redirection policy.
:type domain_id: :class:`str`
:param domain_id: Domain id (required)
:type redirection_policy_id: :class:`str`
:param redirection_policy_id: Redirection map id (required)
:type redirection_policy: :class:`com.vmware.nsx_policy.model_client.RedirectionPolicy`
:param redirection_policy: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('patch',
{
'domain_id': domain_id,
'redirection_policy_id': redirection_policy_id,
'redirection_policy': redirection_policy,
})
def update(self,
domain_id,
redirection_policy_id,
redirection_policy,
):
"""
Create or update the redirection policy.
:type domain_id: :class:`str`
:param domain_id: Domain id (required)
:type redirection_policy_id: :class:`str`
:param redirection_policy_id: Redirection map id (required)
:type redirection_policy: :class:`com.vmware.nsx_policy.model_client.RedirectionPolicy`
:param redirection_policy: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.RedirectionPolicy`
:return: com.vmware.nsx_policy.model.RedirectionPolicy
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'domain_id': domain_id,
'redirection_policy_id': redirection_policy_id,
'redirection_policy': redirection_policy,
})
class SecurityPolicies(VapiInterface):
"""
"""
REVISE_OPERATION_TOP = "insert_top"
"""
Possible value for ``operation`` of method :func:`SecurityPolicies.revise`.
"""
REVISE_OPERATION_BOTTOM = "insert_bottom"
"""
Possible value for ``operation`` of method :func:`SecurityPolicies.revise`.
"""
REVISE_OPERATION_AFTER = "insert_after"
"""
Possible value for ``operation`` of method :func:`SecurityPolicies.revise`.
"""
REVISE_OPERATION_BEFORE = "insert_before"
"""
Possible value for ``operation`` of method :func:`SecurityPolicies.revise`.
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx_policy.infra.domains.security_policies'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _SecurityPoliciesStub)
self._VAPI_OPERATION_IDS = {}
def delete(self,
domain_id,
security_policy_id,
):
"""
Deletes the security policy along with all the rules
:type domain_id: :class:`str`
:param domain_id: (required)
:type security_policy_id: :class:`str`
:param security_policy_id: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'domain_id': domain_id,
'security_policy_id': security_policy_id,
})
def get(self,
domain_id,
security_policy_id,
):
"""
Read security policy for a domain.
:type domain_id: :class:`str`
:param domain_id: (required)
:type security_policy_id: :class:`str`
:param security_policy_id: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.SecurityPolicy`
:return: com.vmware.nsx_policy.model.SecurityPolicy
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'domain_id': domain_id,
'security_policy_id': security_policy_id,
})
def list(self,
domain_id,
cursor=None,
include_mark_for_delete_objects=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
List all security policies for a domain.
:type domain_id: :class:`str`
:param domain_id: (required)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type include_mark_for_delete_objects: :class:`bool` or ``None``
:param include_mark_for_delete_objects: Include objects that are marked for deletion in results (optional,
default to false)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx_policy.model_client.SecurityPolicyListResult`
:return: com.vmware.nsx_policy.model.SecurityPolicyListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'domain_id': domain_id,
'cursor': cursor,
'include_mark_for_delete_objects': include_mark_for_delete_objects,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def patch(self,
domain_id,
security_policy_id,
security_policy,
):
"""
Patch the security policy for a domain. If a security policy for the
given security-policy-id is not present, the object will get created
and if it is present it will be updated. This is a full replace
:type domain_id: :class:`str`
:param domain_id: (required)
:type security_policy_id: :class:`str`
:param security_policy_id: (required)
:type security_policy: :class:`com.vmware.nsx_policy.model_client.SecurityPolicy`
:param security_policy: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('patch',
{
'domain_id': domain_id,
'security_policy_id': security_policy_id,
'security_policy': security_policy,
})
def revise(self,
domain_id,
security_policy_id,
security_policy,
anchor_path=None,
operation=None,
):
"""
This is used to set a precedence of a security policy w.r.t others.
:type domain_id: :class:`str`
:param domain_id: (required)
:type security_policy_id: :class:`str`
:param security_policy_id: (required)
:type security_policy: :class:`com.vmware.nsx_policy.model_client.SecurityPolicy`
:param security_policy: (required)
:type anchor_path: :class:`str` or ``None``
:param anchor_path: The security policy/rule path if operation is 'insert_after' or
'insert_before' (optional)
:type operation: :class:`str` or ``None``
:param operation: Operation (optional, default to insert_top)
:rtype: :class:`com.vmware.nsx_policy.model_client.SecurityPolicy`
:return: com.vmware.nsx_policy.model.SecurityPolicy
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('revise',
{
'domain_id': domain_id,
'security_policy_id': security_policy_id,
'security_policy': security_policy,
'anchor_path': anchor_path,
'operation': operation,
})
def update(self,
domain_id,
security_policy_id,
security_policy,
):
"""
Create or Update the security policy for a domain. This is a full
replace. All the rules are replaced.
:type domain_id: :class:`str`
:param domain_id: (required)
:type security_policy_id: :class:`str`
:param security_policy_id: (required)
:type security_policy: :class:`com.vmware.nsx_policy.model_client.SecurityPolicy`
:param security_policy: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.SecurityPolicy`
:return: com.vmware.nsx_policy.model.SecurityPolicy
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'domain_id': domain_id,
'security_policy_id': security_policy_id,
'security_policy': security_policy,
})
class _CommunicationMapsStub(ApiInterfaceStub):
def __init__(self, config):
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'communication_map_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/policy/api/v1/infra/domains/{domain-id}/communication-maps/{communication-map-id}',
path_variables={
'domain_id': 'domain-id',
'communication_map_id': 'communication-map-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'communication_map_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
HasFieldsOfValidator()
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/domains/{domain-id}/communication-maps/{communication-map-id}',
path_variables={
'domain_id': 'domain-id',
'communication_map_id': 'communication-map-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for list operation
list_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'cursor': type.OptionalType(type.StringType()),
'include_mark_for_delete_objects': type.OptionalType(type.BooleanType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
HasFieldsOfValidator()
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/domains/{domain-id}/communication-maps',
path_variables={
'domain_id': 'domain-id',
},
query_parameters={
'cursor': 'cursor',
'include_mark_for_delete_objects': 'include_mark_for_delete_objects',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
},
content_type='application/json'
)
# properties for patch operation
patch_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'communication_map_id': type.StringType(),
'communication_map': type.ReferenceType('com.vmware.nsx_policy.model_client', 'CommunicationMap'),
})
patch_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
patch_input_value_validator_list = [
HasFieldsOfValidator()
]
patch_output_validator_list = [
]
patch_rest_metadata = OperationRestMetadata(
http_method='PATCH',
url_template='/policy/api/v1/infra/domains/{domain-id}/communication-maps/{communication-map-id}',
request_body_parameter='communication_map',
path_variables={
'domain_id': 'domain-id',
'communication_map_id': 'communication-map-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for revise operation
revise_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'communication_map_id': type.StringType(),
'communication_map': type.ReferenceType('com.vmware.nsx_policy.model_client', 'CommunicationMap'),
'anchor_path': type.OptionalType(type.StringType()),
'operation': type.OptionalType(type.StringType()),
})
revise_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
revise_input_value_validator_list = [
HasFieldsOfValidator()
]
revise_output_validator_list = [
HasFieldsOfValidator()
]
revise_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/policy/api/v1/infra/domains/{domain-id}/communication-maps/{communication-map-id}?action=revise',
request_body_parameter='communication_map',
path_variables={
'domain_id': 'domain-id',
'communication_map_id': 'communication-map-id',
},
query_parameters={
'anchor_path': 'anchor_path',
'operation': 'operation',
},
content_type='application/json'
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'communication_map_id': type.StringType(),
'communication_map': type.ReferenceType('com.vmware.nsx_policy.model_client', 'CommunicationMap'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
HasFieldsOfValidator()
]
update_output_validator_list = [
HasFieldsOfValidator()
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/policy/api/v1/infra/domains/{domain-id}/communication-maps/{communication-map-id}',
request_body_parameter='communication_map',
path_variables={
'domain_id': 'domain-id',
'communication_map_id': 'communication-map-id',
},
query_parameters={
},
content_type='application/json'
)
operations = {
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'CommunicationMap'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'CommunicationMapListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'patch': {
'input_type': patch_input_type,
'output_type': type.VoidType(),
'errors': patch_error_dict,
'input_value_validator_list': patch_input_value_validator_list,
'output_validator_list': patch_output_validator_list,
'task_type': TaskType.NONE,
},
'revise': {
'input_type': revise_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'CommunicationMap'),
'errors': revise_error_dict,
'input_value_validator_list': revise_input_value_validator_list,
'output_validator_list': revise_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'CommunicationMap'),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'list': list_rest_metadata,
'patch': patch_rest_metadata,
'revise': revise_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx_policy.infra.domains.communication_maps',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _GatewayPoliciesStub(ApiInterfaceStub):
def __init__(self, config):
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'gateway_policy_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/policy/api/v1/infra/domains/{domain-id}/gateway-policies/{gateway-policy-id}',
path_variables={
'domain_id': 'domain-id',
'gateway_policy_id': 'gateway-policy-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'gateway_policy_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
HasFieldsOfValidator()
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/domains/{domain-id}/gateway-policies/{gateway-policy-id}',
path_variables={
'domain_id': 'domain-id',
'gateway_policy_id': 'gateway-policy-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for list operation
list_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'cursor': type.OptionalType(type.StringType()),
'include_mark_for_delete_objects': type.OptionalType(type.BooleanType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
HasFieldsOfValidator()
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/domains/{domain-id}/gateway-policies',
path_variables={
'domain_id': 'domain-id',
},
query_parameters={
'cursor': 'cursor',
'include_mark_for_delete_objects': 'include_mark_for_delete_objects',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
},
content_type='application/json'
)
# properties for patch operation
patch_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'gateway_policy_id': type.StringType(),
'gateway_policy': type.ReferenceType('com.vmware.nsx_policy.model_client', 'GatewayPolicy'),
})
patch_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
patch_input_value_validator_list = [
HasFieldsOfValidator()
]
patch_output_validator_list = [
]
patch_rest_metadata = OperationRestMetadata(
http_method='PATCH',
url_template='/policy/api/v1/infra/domains/{domain-id}/gateway-policies/{gateway-policy-id}',
request_body_parameter='gateway_policy',
path_variables={
'domain_id': 'domain-id',
'gateway_policy_id': 'gateway-policy-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for revise operation
revise_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'gateway_policy_id': type.StringType(),
'gateway_policy': type.ReferenceType('com.vmware.nsx_policy.model_client', 'GatewayPolicy'),
'anchor_path': type.OptionalType(type.StringType()),
'operation': type.OptionalType(type.StringType()),
})
revise_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
revise_input_value_validator_list = [
HasFieldsOfValidator()
]
revise_output_validator_list = [
HasFieldsOfValidator()
]
revise_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/policy/api/v1/infra/domains/{domain-id}/gateway-policies/{gateway-policy-id}?action=revise',
request_body_parameter='gateway_policy',
path_variables={
'domain_id': 'domain-id',
'gateway_policy_id': 'gateway-policy-id',
},
query_parameters={
'anchor_path': 'anchor_path',
'operation': 'operation',
},
content_type='application/json'
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'gateway_policy_id': type.StringType(),
'gateway_policy': type.ReferenceType('com.vmware.nsx_policy.model_client', 'GatewayPolicy'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
HasFieldsOfValidator()
]
update_output_validator_list = [
HasFieldsOfValidator()
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/policy/api/v1/infra/domains/{domain-id}/gateway-policies/{gateway-policy-id}',
request_body_parameter='gateway_policy',
path_variables={
'domain_id': 'domain-id',
'gateway_policy_id': 'gateway-policy-id',
},
query_parameters={
},
content_type='application/json'
)
operations = {
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'GatewayPolicy'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'GatewayPolicyListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'patch': {
'input_type': patch_input_type,
'output_type': type.VoidType(),
'errors': patch_error_dict,
'input_value_validator_list': patch_input_value_validator_list,
'output_validator_list': patch_output_validator_list,
'task_type': TaskType.NONE,
},
'revise': {
'input_type': revise_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'GatewayPolicy'),
'errors': revise_error_dict,
'input_value_validator_list': revise_input_value_validator_list,
'output_validator_list': revise_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'GatewayPolicy'),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'list': list_rest_metadata,
'patch': patch_rest_metadata,
'revise': revise_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx_policy.infra.domains.gateway_policies',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _GroupsStub(ApiInterfaceStub):
def __init__(self, config):
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'group_id': type.StringType(),
'fail_if_subtree_exists': type.OptionalType(type.BooleanType()),
'force': type.OptionalType(type.BooleanType()),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/policy/api/v1/infra/domains/{domain-id}/groups/{group-id}',
path_variables={
'domain_id': 'domain-id',
'group_id': 'group-id',
},
query_parameters={
'fail_if_subtree_exists': 'fail_if_subtree_exists',
'force': 'force',
},
content_type='application/json'
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'group_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
HasFieldsOfValidator()
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/domains/{domain-id}/groups/{group-id}',
path_variables={
'domain_id': 'domain-id',
'group_id': 'group-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for list operation
list_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'cursor': type.OptionalType(type.StringType()),
'include_mark_for_delete_objects': type.OptionalType(type.BooleanType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
HasFieldsOfValidator()
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/domains/{domain-id}/groups',
path_variables={
'domain_id': 'domain-id',
},
query_parameters={
'cursor': 'cursor',
'include_mark_for_delete_objects': 'include_mark_for_delete_objects',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
},
content_type='application/json'
)
# properties for patch operation
patch_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'group_id': type.StringType(),
'group': type.ReferenceType('com.vmware.nsx_policy.model_client', 'Group'),
})
patch_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
patch_input_value_validator_list = [
HasFieldsOfValidator()
]
patch_output_validator_list = [
]
patch_rest_metadata = OperationRestMetadata(
http_method='PATCH',
url_template='/policy/api/v1/infra/domains/{domain-id}/groups/{group-id}',
request_body_parameter='group',
path_variables={
'domain_id': 'domain-id',
'group_id': 'group-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'group_id': type.StringType(),
'group': type.ReferenceType('com.vmware.nsx_policy.model_client', 'Group'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
HasFieldsOfValidator()
]
update_output_validator_list = [
HasFieldsOfValidator()
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/policy/api/v1/infra/domains/{domain-id}/groups/{group-id}',
request_body_parameter='group',
path_variables={
'domain_id': 'domain-id',
'group_id': 'group-id',
},
query_parameters={
},
content_type='application/json'
)
operations = {
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'Group'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'GroupListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'patch': {
'input_type': patch_input_type,
'output_type': type.VoidType(),
'errors': patch_error_dict,
'input_value_validator_list': patch_input_value_validator_list,
'output_validator_list': patch_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'Group'),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'list': list_rest_metadata,
'patch': patch_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx_policy.infra.domains.groups',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _RedirectionPoliciesStub(ApiInterfaceStub):
def __init__(self, config):
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'redirection_policy_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/policy/api/v1/infra/domains/{domain-id}/redirection-policies/{redirection-policy-id}',
path_variables={
'domain_id': 'domain-id',
'redirection_policy_id': 'redirection-policy-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'redirection_policy_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
HasFieldsOfValidator()
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/domains/{domain-id}/redirection-policies/{redirection-policy-id}',
path_variables={
'domain_id': 'domain-id',
'redirection_policy_id': 'redirection-policy-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for list operation
list_input_type = type.StructType('operation-input', {
'cursor': type.OptionalType(type.StringType()),
'include_mark_for_delete_objects': type.OptionalType(type.BooleanType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
HasFieldsOfValidator()
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/domains/redirection-policies',
path_variables={
},
query_parameters={
'cursor': 'cursor',
'include_mark_for_delete_objects': 'include_mark_for_delete_objects',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
},
content_type='application/json'
)
# properties for list_0 operation
list_0_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'cursor': type.OptionalType(type.StringType()),
'include_mark_for_delete_objects': type.OptionalType(type.BooleanType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_0_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_0_input_value_validator_list = [
]
list_0_output_validator_list = [
HasFieldsOfValidator()
]
list_0_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/domains/{domain-id}/redirection-policies',
path_variables={
'domain_id': 'domain-id',
},
query_parameters={
'cursor': 'cursor',
'include_mark_for_delete_objects': 'include_mark_for_delete_objects',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
},
content_type='application/json'
)
# properties for patch operation
patch_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'redirection_policy_id': type.StringType(),
'redirection_policy': type.ReferenceType('com.vmware.nsx_policy.model_client', 'RedirectionPolicy'),
})
patch_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
patch_input_value_validator_list = [
HasFieldsOfValidator()
]
patch_output_validator_list = [
]
patch_rest_metadata = OperationRestMetadata(
http_method='PATCH',
url_template='/policy/api/v1/infra/domains/{domain-id}/redirection-policies/{redirection-policy-id}',
request_body_parameter='redirection_policy',
path_variables={
'domain_id': 'domain-id',
'redirection_policy_id': 'redirection-policy-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'redirection_policy_id': type.StringType(),
'redirection_policy': type.ReferenceType('com.vmware.nsx_policy.model_client', 'RedirectionPolicy'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
HasFieldsOfValidator()
]
update_output_validator_list = [
HasFieldsOfValidator()
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/policy/api/v1/infra/domains/{domain-id}/redirection-policies/{redirection-policy-id}',
request_body_parameter='redirection_policy',
path_variables={
'domain_id': 'domain-id',
'redirection_policy_id': 'redirection-policy-id',
},
query_parameters={
},
content_type='application/json'
)
operations = {
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'RedirectionPolicy'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'RedirectionPolicyListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'list_0': {
'input_type': list_0_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'RedirectionPolicyListResult'),
'errors': list_0_error_dict,
'input_value_validator_list': list_0_input_value_validator_list,
'output_validator_list': list_0_output_validator_list,
'task_type': TaskType.NONE,
},
'patch': {
'input_type': patch_input_type,
'output_type': type.VoidType(),
'errors': patch_error_dict,
'input_value_validator_list': patch_input_value_validator_list,
'output_validator_list': patch_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'RedirectionPolicy'),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'list': list_rest_metadata,
'list_0': list_0_rest_metadata,
'patch': patch_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx_policy.infra.domains.redirection_policies',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _SecurityPoliciesStub(ApiInterfaceStub):
def __init__(self, config):
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'security_policy_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/policy/api/v1/infra/domains/{domain-id}/security-policies/{security-policy-id}',
path_variables={
'domain_id': 'domain-id',
'security_policy_id': 'security-policy-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'security_policy_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
HasFieldsOfValidator()
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/domains/{domain-id}/security-policies/{security-policy-id}',
path_variables={
'domain_id': 'domain-id',
'security_policy_id': 'security-policy-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for list operation
list_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'cursor': type.OptionalType(type.StringType()),
'include_mark_for_delete_objects': type.OptionalType(type.BooleanType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
HasFieldsOfValidator()
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/domains/{domain-id}/security-policies',
path_variables={
'domain_id': 'domain-id',
},
query_parameters={
'cursor': 'cursor',
'include_mark_for_delete_objects': 'include_mark_for_delete_objects',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
},
content_type='application/json'
)
# properties for patch operation
patch_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'security_policy_id': type.StringType(),
'security_policy': type.ReferenceType('com.vmware.nsx_policy.model_client', 'SecurityPolicy'),
})
patch_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
patch_input_value_validator_list = [
HasFieldsOfValidator()
]
patch_output_validator_list = [
]
patch_rest_metadata = OperationRestMetadata(
http_method='PATCH',
url_template='/policy/api/v1/infra/domains/{domain-id}/security-policies/{security-policy-id}',
request_body_parameter='security_policy',
path_variables={
'domain_id': 'domain-id',
'security_policy_id': 'security-policy-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for revise operation
revise_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'security_policy_id': type.StringType(),
'security_policy': type.ReferenceType('com.vmware.nsx_policy.model_client', 'SecurityPolicy'),
'anchor_path': type.OptionalType(type.StringType()),
'operation': type.OptionalType(type.StringType()),
})
revise_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
revise_input_value_validator_list = [
HasFieldsOfValidator()
]
revise_output_validator_list = [
HasFieldsOfValidator()
]
revise_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/policy/api/v1/infra/domains/{domain-id}/security-policies/{security-policy-id}?action=revise',
request_body_parameter='security_policy',
path_variables={
'domain_id': 'domain-id',
'security_policy_id': 'security-policy-id',
},
query_parameters={
'anchor_path': 'anchor_path',
'operation': 'operation',
},
content_type='application/json'
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'domain_id': type.StringType(),
'security_policy_id': type.StringType(),
'security_policy': type.ReferenceType('com.vmware.nsx_policy.model_client', 'SecurityPolicy'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
HasFieldsOfValidator()
]
update_output_validator_list = [
HasFieldsOfValidator()
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/policy/api/v1/infra/domains/{domain-id}/security-policies/{security-policy-id}',
request_body_parameter='security_policy',
path_variables={
'domain_id': 'domain-id',
'security_policy_id': 'security-policy-id',
},
query_parameters={
},
content_type='application/json'
)
operations = {
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'SecurityPolicy'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'SecurityPolicyListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'patch': {
'input_type': patch_input_type,
'output_type': type.VoidType(),
'errors': patch_error_dict,
'input_value_validator_list': patch_input_value_validator_list,
'output_validator_list': patch_output_validator_list,
'task_type': TaskType.NONE,
},
'revise': {
'input_type': revise_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'SecurityPolicy'),
'errors': revise_error_dict,
'input_value_validator_list': revise_input_value_validator_list,
'output_validator_list': revise_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'SecurityPolicy'),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'list': list_rest_metadata,
'patch': patch_rest_metadata,
'revise': revise_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx_policy.infra.domains.security_policies',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class StubFactory(StubFactoryBase):
_attrs = {
'CommunicationMaps': CommunicationMaps,
'GatewayPolicies': GatewayPolicies,
'Groups': Groups,
'RedirectionPolicies': RedirectionPolicies,
'SecurityPolicies': SecurityPolicies,
'communication_maps': 'com.vmware.nsx_policy.infra.domains.communication_maps_client.StubFactory',
'gateway_policies': 'com.vmware.nsx_policy.infra.domains.gateway_policies_client.StubFactory',
'groups': 'com.vmware.nsx_policy.infra.domains.groups_client.StubFactory',
'redirection_policies': 'com.vmware.nsx_policy.infra.domains.redirection_policies_client.StubFactory',
'security_policies': 'com.vmware.nsx_policy.infra.domains.security_policies_client.StubFactory',
}
|
import sys
sys.setrecursionlimit(10**6)
def DFS(u):
global found
visisted[u] = True
checkedLoop[u] = True
for x in graph[u]:
if checkedLoop[x] == True:
found = True
return
if visisted[x] == False:
DFS(x)
checkedLoop[u] = False
TC = int(input())
for _ in range(TC):
found = False
N, M = map(int, input().split())
graph = [[] for i in range(N + 5)]
visisted = [False] * (N + 5)
checkedLoop = [False] * (N + 5)
for i in range(M):
u, v = map(int, input().split())
graph[u].append(v)
for i in range(N):
if visisted[i] == False:
DFS(i)
print("YES" if found else "NO")
|
#!/usr/bin/env python3
with open('input', 'r') as f:
data = [line.rstrip().split() for line in f.readlines()]
valid_lines = 0
for line in data:
if len(set(line)) == len(line):
valid_lines += 1
print('There were {} valid lines'.format(valid_lines))
|
from muzero.network.muzero import MuZero, MuZeroAtariConfig
import gym
import asyncio
if __name__ == '__main__':
environment = gym.make('Breakout-v0')
muzero_config = MuZeroAtariConfig(environment=environment)
muzero = MuZero(muzero_config)
muzero.start_training()
|
"""nla_client_lib.py provides a wrapper to calls to the REST-style API which interfaces with the
CEDA NLA system. Common calls, such as `ls`, `quota` and making requests are wrapped in a few
functions."""
__author__ = 'sjp23'
import os
import requests
import json
from nla_client.nla_client_settings import NLA_SERVER_URL
user = os.environ["USER"]
baseurl = NLA_SERVER_URL
def ls(match, stages):
""".. |br| raw:: html
<br />
Return a list of files in the NLA system given a pattern to match against, and a combination of stages
of the files to filter on.
:param string match: A pattern to match filenames against - i.e. does a filename contain this substring
:param string stages: Filter the files based on the stage of the file within the NLA system. Stages can be any combination of **UDTAR**
- **U**: UNVERIFIED
- **D**: ONDISK
- **T**: ONTAPE
- **A**: RESTORING
- **R**: RESTORED
:return: A dictionary containing information about the files which match the pattern and stages, consisting of these keys:
- **count** (*integer*) : The number of files in the NLA system matching the pattern and stage
- **files** (*List[Dictionary]*]) : A list of information about each file
|br|
Each "files" Dictionary can contain the following keys (for each TapeFile):
- **path** (`string`): logical path to the file.
- **stage** (`char`): current stage of the file, one of **UDTAR** as above.
- **verified** (`DateTime`): the date and time the file was verified on.
- **size** (`integer`): the size of the file in bytes.
:rtype: Dictionary
"""
url = baseurl + "/api/v1/files?match=%s&stages=%s" % (match, stages)
response = requests.get(url)
return response.json()
def make_request(patterns=None, retention=None, files=None, label=None):
"""Add a retrieval request into the NLA system
:param string patterns: (`optional`) pattern to match in a logical file path in request to restore files, e.g. "1986" to request to restore all files containing "1986"
:param DateTime retention: (`optional`) time and date until when the files will remain in the restore area. Default is 20 days.
:param List[string] files: (`optional`) list of files to request to restore
:param string label: (`optional`) user supplied label for the request, visible when user examines their requests
:return: A HTTP Response object. The two most important elements of this object are:
- **status_code** (`integer`): the HTTP status code:
- 200 OK: Request was successful
- 403 FORBIDDEN: error with user quota: either the user quota is full or the user could not be found
- **json()** (`Dictionary`): information about the request, the possible keys are:
- **req_id** (`integer`): the unique numeric identifier for the request
- **error** (`string`): error message if request fails
:rtype: `requests.Response <http://docs.python-requests.org/en/master/api/#requests.Response>`_
"""
url = baseurl + "/api/v1/requests"
data = {"quota": user}
assert patterns is None or files is None, "Can't define request files from list and pattern."
if patterns:
data["patterns"] = patterns
if files:
data["files"] = files
if retention:
data["retention"] = retention
if label:
data["label"] = label
response = requests.post(url, data=json.dumps(data))
return response
def update_request(request_id, retention=None, label=None, notify_first=None, notify_last=None):
"""Update an existing retrieval request in the NLA system
:param integer request_id: the unique integer id of the request
:param DateTime retention: (`optional`) time and date until when the files will remain in the restore area. Default is 20 days.
:param string label: (`optional`) user supplied label for the request, visible when user examines their requests
:param string notify_first: (`optional`) email address to notify when first restored file is available in the restore area
:param string notify_last: (`optional`) email address to notify when last file is available in restore area - i.e. the request is complete
:return: A HTTP Response object. The two most important elements of this object are:
- **status_code** (`integer`): the HTTP status code:
- 200 OK: Request was successful
- 403 FORBIDDEN: error with user quota: the user could not be found
- 404 NOT FOUND: the request with `request_id` could not be found
- **json()** (`Dictionary`): information about the request, the possible keys are:
- **req_id** (`integer`): the unique numeric identifier for the request
- **error** (`string`): error message if request fails
:rtype: `requests.Response <http://docs.python-requests.org/en/master/api/#requests.Response>`_
"""
url = baseurl + "/api/v1/requests/%s" % request_id
data = {"quota": user}
if retention:
data["retention"] = retention
if label:
data["label"] = label
if notify_first is not None: # allow null string so that the default email in the user's quota can be used
data["notify_on_first_file"] = notify_first
if notify_last is not None:
data["notify_on_last_file"] = notify_last
response = requests.put(url, data=json.dumps(data))
return response
def list_requests():
"""List all retrieval requests which have not passed their retention date for the current user.
:return: A dictionary containing details about the user and the user's requests, consisting of the following keys:
- **used** (`integer`): the amount of quota the user has used, in bytes
- **notes** (`string`): any notes about the user - affliations, projects, etc.
- **email** (`string`): the email address for the user
- **user** (`string`): the user id of the user - currently their JASMIN login
- **requests** (`List[Dictionary]`): A list of dictionaries giving information about each request the user has made to the NLA system
- **id** (`integer`): integer identifier for the user
- **size** (`integer`): the size of the allocated quota for the current user
|br|
Each "requests" Dictionary can contain the following keys (for each TapeRequest):
- **id** (`integer`): the integer identifier of the request
- **request_date** (`DateTime`): the date and time the request was made
- **retention** (`DateTime`): the date and time the request will expire on
- **label** (`string`): the label assigned to the request by the user, or a default of the request pattern or first file in a listing request
- **storaged_request_start** (`DateTime`): the date and time the retrieval request started on StorageD
- **storaged_request_end** (`DateTime`): the date and time the retrieval request concluded on StorageD
- **first_files_on_disk** (`DateTime`): the date and time the first files arrived on the restore disk
- **last_files_on_disk** (`DateTime`): the date and time the last files arrived on the restore disk
:returntype: Dictionary"""
url = baseurl + "/api/v1/quota/%s" % user
response = requests.get(url)
if response.status_code == 200:
return response.json()
else:
return None
def show_request(request_number):
"""Show the information for a single request, given the integer identifier of the request.
:param integer request_number: the unique integer identifier for the request
:return: A dictionary containing details about the request, consisting of the following keys:
- **id** (`integer`): unique id of the request
- **quota** (`string`): the user id for the quota to use in making the request
- **retention** (`DateTime`): date when restored files will be removed from restore area
- **request_date** (`DateTime`): date when a request was made
- **request_patterns** (`string`): pattern to match against to retrieve files from tape
- **notify_on_first_file** (`string`): email address to notify when first restored file is available in the restore area
- **notify_on_last_file** (`string`): email address to notify when last file is available in restore area - i.e. the request is complete
- **label** (`string`): a user defined label for the request
- **storaged_request_start** (`string`): (*optional*) the date and time the retrieval request started on StorageD
- **storaged_request_end** (`string`): (*optional*) the date and time the retrieval request concluded on StorageD
- **first_files_on_disk** (`string`): (*optional*) the date and time the first files arrived on the restore disk
- **last_files_on_disk** (`string`): (*optional*) the date and time the last files arrived on the restore disk
- **files** (`List[string]`): list of files in the request
"""
url = baseurl + "/api/v1/requests/%s" % request_number
response = requests.get(url)
if response.status_code == 200:
return response.json()
else:
return None
|
# file openpyxl/tests/test_dump.py
# Copyright (c) 2010-2011 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
# Python stdlib imports
from datetime import time, datetime
from tempfile import NamedTemporaryFile
import os
import os.path
import shutil
# 3rd party imports
from nose.tools import eq_, raises
from openpyxl.workbook import Workbook
from openpyxl.writer import dump_worksheet
from openpyxl.cell import get_column_letter
from openpyxl.reader.excel import load_workbook
from openpyxl.writer.strings import StringTableBuilder
from openpyxl.shared.compat import xrange
from openpyxl.shared.exc import WorkbookAlreadySaved
def _get_test_filename():
test_file = NamedTemporaryFile(mode='w', prefix='openpyxl.', suffix='.xlsx', delete=False)
test_file.close()
return test_file.name
def test_dump_sheet_title():
test_filename = _get_test_filename()
wb = Workbook(optimized_write=True)
ws = wb.create_sheet(title='Test1')
wb.save(test_filename)
wb2 = load_workbook(test_filename)
ws = wb2.get_sheet_by_name('Test1')
eq_('Test1', ws.title)
def test_dump_sheet():
test_filename = _get_test_filename()
wb = Workbook(optimized_write=True)
ws = wb.create_sheet()
letters = [get_column_letter(x + 1) for x in xrange(20)]
expected_rows = []
for row in xrange(20):
expected_rows.append(['%s%d' % (letter, row + 1) for letter in letters])
for row in xrange(20):
expected_rows.append([(row + 1) for letter in letters])
for row in xrange(10):
expected_rows.append([datetime(2010, ((x % 12) + 1), row + 1) for x in range(len(letters))])
for row in xrange(20):
expected_rows.append(['=%s%d' % (letter, row + 1) for letter in letters])
for row in expected_rows:
ws.append(row)
wb.save(test_filename)
wb2 = load_workbook(test_filename)
ws = wb2.worksheets[0]
for ex_row, ws_row in zip(expected_rows[:-20], ws.rows):
for ex_cell, ws_cell in zip(ex_row, ws_row):
eq_(ex_cell, ws_cell.value)
os.remove(test_filename)
def test_table_builder():
sb = StringTableBuilder()
result = {'a':0, 'b':1, 'c':2, 'd':3}
for letter in sorted(result.keys()):
for x in range(5):
sb.add(letter)
table = dict(sb.get_table())
for key, idx in result.items():
eq_(idx, table[key])
def test_open_too_many_files():
test_filename = _get_test_filename()
wb = Workbook(optimized_write=True)
for i in range(200): # over 200 worksheets should raise an OSError ('too many open files')
wb.create_sheet()
wb.save(test_filename)
os.remove(test_filename)
def test_create_temp_file():
f = dump_worksheet.create_temporary_file()
if not os.path.isfile(f):
raise Exception("The file %s does not exist" % f)
@raises(WorkbookAlreadySaved)
def test_dump_twice():
test_filename = _get_test_filename()
wb = Workbook(optimized_write=True)
ws = wb.create_sheet()
ws.append(['hello'])
wb.save(test_filename)
os.remove(test_filename)
wb.save(test_filename)
@raises(WorkbookAlreadySaved)
def test_append_after_save():
test_filename = _get_test_filename()
wb = Workbook(optimized_write=True)
ws = wb.create_sheet()
ws.append(['hello'])
wb.save(test_filename)
os.remove(test_filename)
ws.append(['hello'])
|
from __future__ import print_function, division, absolute_import
import itertools
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import skimage
import skimage.data
import skimage.morphology
import scipy
import scipy.special
import imgaug as ia
import imgaug.random as iarandom
from imgaug import parameters as iap
from imgaug.testutils import reseed
def _eps(arr):
if ia.is_np_array(arr) and arr.dtype.kind == "f":
return np.finfo(arr.dtype).eps
return 1e-4
class Test_handle_continuous_param(unittest.TestCase):
def test_value_range_is_none(self):
result = iap.handle_continuous_param(
1, "[test1]",
value_range=None, tuple_to_uniform=True, list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_range_is_tuple_of_nones(self):
result = iap.handle_continuous_param(
1, "[test1b]",
value_range=(None, None),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_param_is_stochastic_parameter(self):
result = iap.handle_continuous_param(
iap.Deterministic(1), "[test2]",
value_range=None, tuple_to_uniform=True, list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_range_is_tuple_of_integers(self):
result = iap.handle_continuous_param(
1, "[test3]",
value_range=(0, 10),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_param_is_outside_of_value_range(self):
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
1, "[test4]",
value_range=(2, 12),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test4]" in str(context.exception))
def test_param_is_inside_value_range_and_no_lower_bound(self):
# value within value range (without lower bound)
result = iap.handle_continuous_param(
1, "[test5]",
value_range=(None, 12),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_param_is_outside_of_value_range_and_no_lower_bound(self):
# value outside of value range (without lower bound)
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
1, "[test6]",
value_range=(None, 0),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test6]" in str(context.exception))
def test_param_is_inside_value_range_and_no_upper_bound(self):
# value within value range (without upper bound)
result = iap.handle_continuous_param(
1, "[test7]",
value_range=(-1, None),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_param_is_outside_of_value_range_and_no_upper_bound(self):
# value outside of value range (without upper bound)
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
1, "[test8]",
value_range=(2, None),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test8]" in str(context.exception))
def test_tuple_as_value_but_no_tuples_allowed(self):
# tuple as value, but no tuples allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
(1, 2), "[test9]",
value_range=None,
tuple_to_uniform=False,
list_to_choice=True)
self.assertTrue("[test9]" in str(context.exception))
def test_tuple_as_value_and_tuples_allowed(self):
# tuple as value and tuple allowed
result = iap.handle_continuous_param(
(1, 2), "[test10]",
value_range=None,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Uniform))
def test_tuple_as_value_and_tuples_allowed_and_inside_value_range(self):
# tuple as value and tuple allowed and tuple within value range
result = iap.handle_continuous_param(
(1, 2), "[test11]",
value_range=(0, 10),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Uniform))
def test_tuple_value_and_allowed_and_partially_outside_value_range(self):
# tuple as value and tuple allowed and tuple partially outside of
# value range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
(1, 2), "[test12]",
value_range=(1.5, 13),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test12]" in str(context.exception))
def test_tuple_value_and_allowed_and_fully_outside_value_range(self):
# tuple as value and tuple allowed and tuple fully outside of value
# range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
(1, 2), "[test13]",
value_range=(3, 13),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test13]" in str(context.exception))
def test_list_as_value_but_no_lists_allowed(self):
# list as value, but no list allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
[1, 2, 3], "[test14]",
value_range=None,
tuple_to_uniform=True,
list_to_choice=False)
self.assertTrue("[test14]" in str(context.exception))
def test_list_as_value_and_lists_allowed(self):
# list as value and list allowed
result = iap.handle_continuous_param(
[1, 2, 3], "[test15]",
value_range=None,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Choice))
def test_list_value_and_allowed_and_partially_outside_value_range(self):
# list as value and list allowed and list partially outside of value
# range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
[1, 2], "[test16]",
value_range=(1.5, 13),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test16]" in str(context.exception))
def test_list_value_and_allowed_and_fully_outside_of_value_range(self):
# list as value and list allowed and list fully outside of value range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
[1, 2], "[test17]",
value_range=(3, 13),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test17]" in str(context.exception))
def test_value_inside_value_range_and_value_range_given_as_callable(self):
# single value within value range given as callable
def _value_range(x):
return -1 < x < 1
result = iap.handle_continuous_param(
1, "[test18]",
value_range=_value_range,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_bad_datatype_as_value_range(self):
# bad datatype for value range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
1, "[test19]",
value_range=False,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(
"Unexpected input for value_range" in str(context.exception))
class Test_handle_discrete_param(unittest.TestCase):
def test_float_value_inside_value_range_but_no_floats_allowed(self):
# float value without value range when no float value is allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1.5, "[test0]",
value_range=None,
tuple_to_uniform=True,
list_to_choice=True, allow_floats=False)
self.assertTrue("[test0]" in str(context.exception))
def test_value_range_is_none(self):
# value without value range
result = iap.handle_discrete_param(
1, "[test1]", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_range_is_tuple_of_nones(self):
# value without value range as (None, None)
result = iap.handle_discrete_param(
1, "[test1b]", value_range=(None, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_is_stochastic_parameter(self):
# stochastic parameter
result = iap.handle_discrete_param(
iap.Deterministic(1), "[test2]", value_range=None,
tuple_to_uniform=True, list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_inside_value_range(self):
# value within value range
result = iap.handle_discrete_param(
1, "[test3]", value_range=(0, 10), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_outside_value_range(self):
# value outside of value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1, "[test4]", value_range=(2, 12), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test4]" in str(context.exception))
def test_value_inside_value_range_no_lower_bound(self):
# value within value range (without lower bound)
result = iap.handle_discrete_param(
1, "[test5]", value_range=(None, 12), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_outside_value_range_no_lower_bound(self):
# value outside of value range (without lower bound)
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1, "[test6]", value_range=(None, 0), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test6]" in str(context.exception))
def test_value_inside_value_range_no_upper_bound(self):
# value within value range (without upper bound)
result = iap.handle_discrete_param(
1, "[test7]", value_range=(-1, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_outside_value_range_no_upper_bound(self):
# value outside of value range (without upper bound)
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1, "[test8]", value_range=(2, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test8]" in str(context.exception))
def test_value_is_tuple_but_no_tuples_allowed(self):
# tuple as value, but no tuples allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
(1, 2), "[test9]", value_range=None, tuple_to_uniform=False,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test9]" in str(context.exception))
def test_value_is_tuple_and_tuples_allowed(self):
# tuple as value and tuple allowed
result = iap.handle_discrete_param(
(1, 2), "[test10]", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.DiscreteUniform))
def test_value_tuple_and_allowed_and_inside_value_range(self):
# tuple as value and tuple allowed and tuple within value range
result = iap.handle_discrete_param(
(1, 2), "[test11]", value_range=(0, 10), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.DiscreteUniform))
def test_value_tuple_and_allowed_and_inside_vr_allow_floats_false(self):
# tuple as value and tuple allowed and tuple within value range with
# allow_floats=False
result = iap.handle_discrete_param(
(1, 2), "[test11b]", value_range=(0, 10),
tuple_to_uniform=True, list_to_choice=True, allow_floats=False)
self.assertTrue(isinstance(result, iap.DiscreteUniform))
def test_value_tuple_and_allowed_and_partially_outside_value_range(self):
# tuple as value and tuple allowed and tuple partially outside of
# value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
(1, 3), "[test12]", value_range=(2, 13), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test12]" in str(context.exception))
def test_value_tuple_and_allowed_and_fully_outside_value_range(self):
# tuple as value and tuple allowed and tuple fully outside of value
# range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
(1, 2), "[test13]", value_range=(3, 13), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test13]" in str(context.exception))
def test_value_list_but_not_allowed(self):
# list as value, but no list allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
[1, 2, 3], "[test14]", value_range=None, tuple_to_uniform=True,
list_to_choice=False, allow_floats=True)
self.assertTrue("[test14]" in str(context.exception))
def test_value_list_and_allowed(self):
# list as value and list allowed
result = iap.handle_discrete_param(
[1, 2, 3], "[test15]", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Choice))
def test_value_list_and_allowed_and_partially_outside_value_range(self):
# list as value and list allowed and list partially outside of value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
[1, 3], "[test16]", value_range=(2, 13), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test16]" in str(context.exception))
def test_value_list_and_allowed_and_fully_outside_value_range(self):
# list as value and list allowed and list fully outside of value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
[1, 2], "[test17]", value_range=(3, 13), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test17]" in str(context.exception))
def test_value_inside_value_range_given_as_callable(self):
# single value within value range given as callable
def _value_range(x):
return -1 < x < 1
result = iap.handle_discrete_param(
1, "[test18]",
value_range=_value_range,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_bad_datatype_as_value_range(self):
# bad datatype for value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1, "[test19]", value_range=False, tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(
"Unexpected input for value_range" in str(context.exception))
class Test_handle_categorical_string_param(unittest.TestCase):
def test_arg_is_all(self):
valid_values = ["class1", "class2"]
param = iap.handle_categorical_string_param(
ia.ALL, "foo", valid_values)
assert isinstance(param, iap.Choice)
assert param.a == valid_values
def test_arg_is_valid_str(self):
valid_values = ["class1", "class2"]
param = iap.handle_categorical_string_param(
"class1", "foo", valid_values)
assert isinstance(param, iap.Deterministic)
assert param.value == "class1"
def test_arg_is_invalid_str(self):
valid_values = ["class1", "class2"]
with self.assertRaises(AssertionError) as ctx:
_param = iap.handle_categorical_string_param(
"class3", "foo", valid_values)
expected = (
"Expected parameter 'foo' to be one of: class1, class2. "
"Got: class3.")
assert expected == str(ctx.exception)
def test_arg_is_valid_list(self):
valid_values = ["class1", "class2", "class3"]
param = iap.handle_categorical_string_param(
["class1", "class3"], "foo", valid_values)
assert isinstance(param, iap.Choice)
assert param.a == ["class1", "class3"]
def test_arg_is_list_with_invalid_types(self):
valid_values = ["class1", "class2", "class3"]
with self.assertRaises(AssertionError) as ctx:
_param = iap.handle_categorical_string_param(
["class1", False], "foo", valid_values)
expected = (
"Expected list provided for parameter 'foo' to only contain "
"strings, got types: str, bool."
)
assert expected in str(ctx.exception)
def test_arg_is_invalid_list(self):
valid_values = ["class1", "class2", "class3"]
with self.assertRaises(AssertionError) as ctx:
_param = iap.handle_categorical_string_param(
["class1", "class4"], "foo", valid_values)
expected = (
"Expected list provided for parameter 'foo' to only contain "
"the following allowed strings: class1, class2, class3. "
"Got strings: class1, class4."
)
assert expected in str(ctx.exception)
def test_arg_is_stochastic_param(self):
param = iap.Deterministic("class1")
param_out = iap.handle_categorical_string_param(
param, "foo", ["class1"])
assert param_out is param
def test_arg_is_invalid_datatype(self):
with self.assertRaises(Exception) as ctx:
_ = iap.handle_categorical_string_param(
False, "foo", ["class1"])
expected = "Expected parameter 'foo' to be imgaug.ALL"
assert expected in str(ctx.exception)
class Test_handle_probability_param(unittest.TestCase):
def test_bool_like_values(self):
for val in [True, False, 0, 1, 0.0, 1.0]:
with self.subTest(param=val):
p = iap.handle_probability_param(val, "[test1]")
assert isinstance(p, iap.Deterministic)
assert p.value == int(val)
def test_float_probabilities(self):
for val in [0.0001, 0.001, 0.01, 0.1, 0.9, 0.99, 0.999, 0.9999]:
with self.subTest(param=val):
p = iap.handle_probability_param(val, "[test2]")
assert isinstance(p, iap.Binomial)
assert isinstance(p.p, iap.Deterministic)
assert val-1e-8 < p.p.value < val+1e-8
def test_probability_is_stochastic_parameter(self):
det = iap.Deterministic(1)
p = iap.handle_probability_param(det, "[test3]")
assert p == det
def test_probability_has_bad_datatype(self):
with self.assertRaises(Exception) as context:
_p = iap.handle_probability_param("test", "[test4]")
self.assertTrue("Expected " in str(context.exception))
def test_probability_is_negative(self):
with self.assertRaises(AssertionError):
_p = iap.handle_probability_param(-0.01, "[test5]")
def test_probability_is_above_100_percent(self):
with self.assertRaises(AssertionError):
_p = iap.handle_probability_param(1.01, "[test6]")
class Test_force_np_float_dtype(unittest.TestCase):
def test_common_dtypes(self):
dtypes = [
("float16", "float16"),
("float32", "float32"),
("float64", "float64"),
("uint8", "float64"),
("int32", "float64")
]
for dtype_in, expected in dtypes:
with self.subTest(dtype_in=dtype_in):
arr = np.zeros((1,), dtype=dtype_in)
observed = iap.force_np_float_dtype(arr).dtype
assert observed.name == expected
class Test_both_np_float_if_one_is_float(unittest.TestCase):
def test_float16_float32(self):
a1 = np.zeros((1,), dtype=np.float16)
b1 = np.zeros((1,), dtype=np.float32)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.name == "float16"
assert b2.dtype.name == "float32"
def test_float16_int32(self):
a1 = np.zeros((1,), dtype=np.float16)
b1 = np.zeros((1,), dtype=np.int32)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.name == "float16"
assert b2.dtype.name == "float64"
def test_int32_float16(self):
a1 = np.zeros((1,), dtype=np.int32)
b1 = np.zeros((1,), dtype=np.float16)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.name == "float64"
assert b2.dtype.name == "float16"
def test_int32_uint8(self):
a1 = np.zeros((1,), dtype=np.int32)
b1 = np.zeros((1,), dtype=np.uint8)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.name == "float64"
assert b2.dtype.name == "float64"
class Test_draw_distributions_grid(unittest.TestCase):
def setUp(self):
reseed()
def test_basic_functionality(self):
params = [mock.Mock(), mock.Mock()]
params[0].draw_distribution_graph.return_value = \
np.zeros((1, 1, 3), dtype=np.uint8)
params[1].draw_distribution_graph.return_value = \
np.zeros((1, 1, 3), dtype=np.uint8)
draw_grid_mock = mock.Mock()
draw_grid_mock.return_value = np.zeros((4, 3, 2), dtype=np.uint8)
with mock.patch('imgaug.imgaug.draw_grid', draw_grid_mock):
grid_observed = iap.draw_distributions_grid(
params, rows=2, cols=3, graph_sizes=(20, 21),
sample_sizes=[(1, 2), (3, 4)], titles=["A", "B"])
assert grid_observed.shape == (4, 3, 2)
assert params[0].draw_distribution_graph.call_count == 1
assert params[1].draw_distribution_graph.call_count == 1
assert params[0].draw_distribution_graph.call_args[1]["size"] == (1, 2)
assert params[0].draw_distribution_graph.call_args[1]["title"] == "A"
assert params[1].draw_distribution_graph.call_args[1]["size"] == (3, 4)
assert params[1].draw_distribution_graph.call_args[1]["title"] == "B"
assert draw_grid_mock.call_count == 1
assert draw_grid_mock.call_args[0][0][0].shape == (20, 21, 3)
assert draw_grid_mock.call_args[0][0][1].shape == (20, 21, 3)
assert draw_grid_mock.call_args[1]["rows"] == 2
assert draw_grid_mock.call_args[1]["cols"] == 3
class Test_draw_distributions_graph(unittest.TestCase):
def test_basic_functionality(self):
# this test is very rough as we get a not-very-well-defined image out
# of the function
param = iap.Uniform(0.0, 1.0)
graph_img = param.draw_distribution_graph(title=None, size=(10000,),
bins=100)
# at least 10% of the image should be white-ish (background)
nb_white = np.sum(graph_img[..., :] > [200, 200, 200])
nb_all = np.prod(graph_img.shape)
graph_img_title = param.draw_distribution_graph(title="test",
size=(10000,),
bins=100)
assert graph_img.ndim == 3
assert graph_img.shape[2] == 3
assert nb_white > 0.1 * nb_all
assert graph_img_title.ndim == 3
assert graph_img_title.shape[2] == 3
assert not np.array_equal(graph_img_title, graph_img)
class TestStochasticParameter(unittest.TestCase):
def setUp(self):
reseed()
def test_copy(self):
other_param = iap.Uniform(1.0, 10.0)
param = iap.Discretize(other_param)
other_param.a = [1.0]
param_copy = param.copy()
param.other_param.a[0] += 1
assert isinstance(param_copy, iap.Discretize)
assert isinstance(param_copy.other_param, iap.Uniform)
assert param_copy.other_param.a[0] == param.other_param.a[0]
def test_deepcopy(self):
other_param = iap.Uniform(1.0, 10.0)
param = iap.Discretize(other_param)
other_param.a = [1.0]
param_copy = param.deepcopy()
param.other_param.a[0] += 1
assert isinstance(param_copy, iap.Discretize)
assert isinstance(param_copy.other_param, iap.Uniform)
assert param_copy.other_param.a[0] != param.other_param.a[0]
class TestStochasticParameterOperators(unittest.TestCase):
def setUp(self):
reseed()
def test_multiply_stochasic_params(self):
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 * param2
assert isinstance(param3, iap.Multiply)
assert param3.other_param == param1
assert param3.val == param2
def test_multiply_stochastic_param_with_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1 * 2
assert isinstance(param3, iap.Multiply)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_multiply_integer_with_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = 2 * param1
assert isinstance(param3, iap.Multiply)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_multiply_string_with_stochastic_param_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" * param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_multiply_stochastic_param_with_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 * "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_divide_stochastic_params(self):
# Divide (__truediv__)
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 / param2
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert param3.val == param2
def test_divide_stochastic_param_by_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1 / 2
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_divide_integer_by_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = 2 / param1
assert isinstance(param3, iap.Divide)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_divide_string_by_stochastic_param_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" / param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_divide_stochastic_param_by_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 / "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_div_stochastic_params(self):
# Divide (__div__)
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1.__div__(param2)
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert param3.val == param2
def test_div_stochastic_param_by_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1.__div__(2)
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_div_stochastic_param_by_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1.__div__("test")
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_rdiv_stochastic_param_by_integer(self):
# Divide (__rdiv__)
param1 = iap.Normal(0, 1)
param3 = param1.__rdiv__(2)
assert isinstance(param3, iap.Divide)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_rdiv_stochastic_param_by_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1.__rdiv__("test")
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_floordiv_stochastic_params(self):
# Divide (__floordiv__)
param1_int = iap.DiscreteUniform(0, 10)
param2_int = iap.Choice([1, 2])
param3 = param1_int // param2_int
assert isinstance(param3, iap.Discretize)
assert isinstance(param3.other_param, iap.Divide)
assert param3.other_param.other_param == param1_int
assert param3.other_param.val == param2_int
def test_floordiv_symbol_stochastic_param_by_integer(self):
param1_int = iap.DiscreteUniform(0, 10)
param3 = param1_int // 2
assert isinstance(param3, iap.Discretize)
assert isinstance(param3.other_param, iap.Divide)
assert param3.other_param.other_param == param1_int
assert isinstance(param3.other_param.val, iap.Deterministic)
assert param3.other_param.val.value == 2
def test_floordiv_symbol_integer_by_stochastic_param(self):
param1_int = iap.DiscreteUniform(0, 10)
param3 = 2 // param1_int
assert isinstance(param3, iap.Discretize)
assert isinstance(param3.other_param, iap.Divide)
assert isinstance(param3.other_param.other_param, iap.Deterministic)
assert param3.other_param.other_param.value == 2
assert param3.other_param.val == param1_int
def test_floordiv_symbol_string_by_stochastic_should_fail(self):
param1_int = iap.DiscreteUniform(0, 10)
with self.assertRaises(Exception) as context:
_ = "test" // param1_int
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_floordiv_symbol_stochastic_param_by_string_should_fail(self):
param1_int = iap.DiscreteUniform(0, 10)
with self.assertRaises(Exception) as context:
_ = param1_int // "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_add_stochastic_params(self):
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 + param2
assert isinstance(param3, iap.Add)
assert param3.other_param == param1
assert param3.val == param2
def test_add_integer_to_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = param1 + 2
assert isinstance(param3, iap.Add)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_add_stochastic_param_to_integer(self):
param1 = iap.Normal(0, 1)
param3 = 2 + param1
assert isinstance(param3, iap.Add)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_add_stochastic_param_to_string(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" + param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_add_string_to_stochastic_param(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 + "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_subtract_stochastic_params(self):
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 - param2
assert isinstance(param3, iap.Subtract)
assert param3.other_param == param1
assert param3.val == param2
def test_subtract_integer_from_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = param1 - 2
assert isinstance(param3, iap.Subtract)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_subtract_stochastic_param_from_integer(self):
param1 = iap.Normal(0, 1)
param3 = 2 - param1
assert isinstance(param3, iap.Subtract)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_subtract_stochastic_param_from_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" - param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_subtract_string_from_stochastic_param_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 - "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_exponentiate_stochastic_params(self):
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 ** param2
assert isinstance(param3, iap.Power)
assert param3.other_param == param1
assert param3.val == param2
def test_exponentiate_stochastic_param_by_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1 ** 2
assert isinstance(param3, iap.Power)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_exponentiate_integer_by_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = 2 ** param1
assert isinstance(param3, iap.Power)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_exponentiate_string_by_stochastic_param(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" ** param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_exponentiate_stochastic_param_by_string(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 ** "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
class TestBinomial(unittest.TestCase):
def setUp(self):
reseed()
def test___init___p_is_zero(self):
param = iap.Binomial(0)
assert (
param.__str__()
== param.__repr__()
== "Binomial(Deterministic(int 0))"
)
def test___init___p_is_one(self):
param = iap.Binomial(1.0)
assert (
param.__str__()
== param.__repr__()
== "Binomial(Deterministic(float 1.00000000))"
)
def test_p_is_zero(self):
param = iap.Binomial(0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 0
assert np.all(samples == 0)
def test_p_is_one(self):
param = iap.Binomial(1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 1
assert np.all(samples == 1)
def test_p_is_50_percent(self):
param = iap.Binomial(0.5)
sample = param.draw_sample()
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert sample.shape == tuple()
assert samples.shape == (10000,)
assert sample in [0, 1]
assert len(unique) == 2
for val, count in zip(unique, counts):
if val == 0:
assert 5000 - 500 < count < 5000 + 500
elif val == 1:
assert 5000 - 500 < count < 5000 + 500
else:
assert False
def test_p_is_list(self):
param = iap.Binomial(iap.Choice([0.25, 0.75]))
for _ in sm.xrange(10):
samples = param.draw_samples((1000,))
p = np.sum(samples) / samples.size
assert (
(0.25 - 0.05 < p < 0.25 + 0.05)
or (0.75 - 0.05 < p < 0.75 + 0.05)
)
def test_p_is_tuple(self):
param = iap.Binomial((0.0, 1.0))
last_p = 0.5
diffs = []
for _ in sm.xrange(30):
samples = param.draw_samples((1000,))
p = np.sum(samples).astype(np.float32) / samples.size
diffs.append(abs(p - last_p))
last_p = p
nb_p_changed = sum([diff > 0.05 for diff in diffs])
assert nb_p_changed > 15
def test_samples_same_values_for_same_seeds(self):
param = iap.Binomial(0.5)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestChoice(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Choice([0, 1, 2])
assert (
param.__str__()
== param.__repr__()
== "Choice(a=[0, 1, 2], replace=True, p=None)"
)
def test_value_is_list(self):
param = iap.Choice([0, 1, 2])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1, 2]
assert np.all(
np.logical_or(
np.logical_or(samples == 0, samples == 1),
samples == 2
)
)
def test_sampled_values_match_expected_counts(self):
param = iap.Choice([0, 1, 2])
samples = param.draw_samples((10000,))
expected = 10000/3
expected_tolerance = expected * 0.05
for v in [0, 1, 2]:
count = np.sum(samples == v)
assert (
expected - expected_tolerance
< count <
expected + expected_tolerance
)
def test_value_is_list_containing_negative_number(self):
param = iap.Choice([-1, 1])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 1]
assert np.all(np.logical_or(samples == -1, samples == 1))
def test_value_is_list_of_floats(self):
param = iap.Choice([-1.2, 1.7])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert (
(
-1.2 - _eps(sample)
< sample <
-1.2 + _eps(sample)
)
or
(
1.7 - _eps(sample)
< sample <
1.7 + _eps(sample)
)
)
assert np.all(
np.logical_or(
np.logical_and(
-1.2 - _eps(sample) < samples,
samples < -1.2 + _eps(sample)
),
np.logical_and(
1.7 - _eps(sample) < samples,
samples < 1.7 + _eps(sample)
)
)
)
def test_value_is_list_of_strings(self):
param = iap.Choice(["first", "second", "third"])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in ["first", "second", "third"]
assert np.all(
np.logical_or(
np.logical_or(
samples == "first",
samples == "second"
),
samples == "third"
)
)
def test_sample_without_replacing(self):
param = iap.Choice([1+i for i in sm.xrange(100)], replace=False)
samples = param.draw_samples((50,))
seen = [0 for _ in sm.xrange(100)]
for sample in samples:
seen[sample-1] += 1
assert all([count in [0, 1] for count in seen])
def test_non_uniform_probabilities_over_elements(self):
param = iap.Choice([0, 1], p=[0.25, 0.75])
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert len(unique) == 2
for val, count in zip(unique, counts):
if val == 0:
assert 2500 - 500 < count < 2500 + 500
elif val == 1:
assert 7500 - 500 < count < 7500 + 500
else:
assert False
def test_list_contains_stochastic_parameter(self):
param = iap.Choice([iap.Choice([0, 1]), 2])
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert len(unique) == 3
for val, count in zip(unique, counts):
if val in [0, 1]:
assert 2500 - 500 < count < 2500 + 500
elif val == 2:
assert 5000 - 500 < count < 5000 + 500
else:
assert False
def test_samples_same_values_for_same_seeds(self):
param = iap.Choice([-1, 0, 1, 2, 3])
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
def test_value_is_bad_datatype(self):
with self.assertRaises(Exception) as context:
_ = iap.Choice(123)
self.assertTrue(
"Expected a to be an iterable" in str(context.exception))
def test_p_is_bad_datatype(self):
with self.assertRaises(Exception) as context:
_ = iap.Choice([1, 2], p=123)
self.assertTrue("Expected p to be" in str(context.exception))
def test_value_and_p_have_unequal_lengths(self):
with self.assertRaises(Exception) as context:
_ = iap.Choice([1, 2], p=[1])
self.assertTrue("Expected lengths of" in str(context.exception))
class TestDiscreteUniform(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.DiscreteUniform(0, 2)
assert (
param.__str__()
== param.__repr__()
== "DiscreteUniform(Deterministic(int 0), Deterministic(int 2))"
)
def test_bounds_are_ints(self):
param = iap.DiscreteUniform(0, 2)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1, 2]
assert np.all(
np.logical_or(
np.logical_or(samples == 0, samples == 1),
samples == 2
)
)
def test_samples_match_expected_counts(self):
param = iap.DiscreteUniform(0, 2)
samples = param.draw_samples((10000,))
expected = 10000/3
expected_tolerance = expected * 0.05
for v in [0, 1, 2]:
count = np.sum(samples == v)
assert (
expected - expected_tolerance
< count <
expected + expected_tolerance
)
def test_lower_bound_is_negative(self):
param = iap.DiscreteUniform(-1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(
np.logical_or(
np.logical_or(samples == -1, samples == 0),
samples == 1
)
)
def test_bounds_are_floats(self):
param = iap.DiscreteUniform(-1.2, 1.2)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(
np.logical_or(
np.logical_or(
samples == -1, samples == 0
),
samples == 1
)
)
def test_lower_and_upper_bound_have_wrong_order(self):
param = iap.DiscreteUniform(1, -1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(
np.logical_or(
np.logical_or(
samples == -1, samples == 0
),
samples == 1
)
)
def test_lower_and_upper_bound_are_the_same(self):
param = iap.DiscreteUniform(1, 1)
sample = param.draw_sample()
samples = param.draw_samples((100,))
assert sample == 1
assert np.all(samples == 1)
def test_samples_same_values_for_same_seeds(self):
param = iap.Uniform(-1, 1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestPoisson(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Poisson(1)
assert (
param.__str__()
== param.__repr__()
== "Poisson(Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Poisson(1)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 <= sample
def test_via_comparison_to_np_poisson(self):
param = iap.Poisson(1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).poisson(
lam=1, size=(100, 1000))
assert samples.shape == (100, 1000)
for i in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
count_direct = int(np.sum(samples_direct == i))
count = np.sum(samples == i)
tolerance = max(count_direct * 0.1, 250)
assert count_direct - tolerance < count < count_direct + tolerance
def test_samples_same_values_for_same_seeds(self):
param = iap.Poisson(1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestNormal(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Normal(0, 1)
assert (
param.__str__()
== param.__repr__()
== "Normal(loc=Deterministic(int 0), scale=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Normal(0, 1)
sample = param.draw_sample()
assert sample.shape == tuple()
def test_via_comparison_to_np_normal(self):
param = iap.Normal(0, 1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).normal(loc=0, scale=1,
size=(100, 1000))
samples = np.clip(samples, -1, 1)
samples_direct = np.clip(samples_direct, -1, 1)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(-1.0, 1.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(-1.0, 1.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_loc_is_stochastic_parameter(self):
param = iap.Normal(iap.Choice([-100, 100]), 1)
seen = [0, 0]
for _ in sm.xrange(1000):
samples = param.draw_samples((100,))
exp = np.mean(samples)
if -100 - 10 < exp < -100 + 10:
seen[0] += 1
elif 100 - 10 < exp < 100 + 10:
seen[1] += 1
else:
assert False
assert 500 - 100 < seen[0] < 500 + 100
assert 500 - 100 < seen[1] < 500 + 100
def test_scale(self):
param1 = iap.Normal(0, 1)
param2 = iap.Normal(0, 100)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.std(samples1) < np.std(samples2)
assert 100 - 10 < np.std(samples2) < 100 + 10
def test_samples_same_values_for_same_seeds(self):
param = iap.Normal(0, 1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestTruncatedNormal(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.TruncatedNormal(0, 1)
expected = (
"TruncatedNormal("
"loc=Deterministic(int 0), "
"scale=Deterministic(int 1), "
"low=Deterministic(float -inf), "
"high=Deterministic(float inf)"
")"
)
assert (
param.__str__()
== param.__repr__()
== expected
)
def test___init___custom_range(self):
param = iap.TruncatedNormal(0, 1, low=-100, high=50.0)
expected = (
"TruncatedNormal("
"loc=Deterministic(int 0), "
"scale=Deterministic(int 1), "
"low=Deterministic(int -100), "
"high=Deterministic(float 50.00000000)"
")"
)
assert (
param.__str__()
== param.__repr__()
== expected
)
def test_scale_is_zero(self):
param = iap.TruncatedNormal(0.5, 0, low=-10, high=10)
samples = param.draw_samples((100,))
assert np.allclose(samples, 0.5)
def test_scale(self):
param1 = iap.TruncatedNormal(0.0, 0.1, low=-100, high=100)
param2 = iap.TruncatedNormal(0.0, 5.0, low=-100, high=100)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.std(samples1) < np.std(samples2)
assert np.isclose(np.std(samples1), 0.1, rtol=0, atol=0.20)
assert np.isclose(np.std(samples2), 5.0, rtol=0, atol=0.40)
def test_loc_is_stochastic_parameter(self):
param = iap.TruncatedNormal(iap.Choice([-100, 100]), 0.01,
low=-1000, high=1000)
seen = [0, 0]
for _ in sm.xrange(200):
samples = param.draw_samples((5,))
observed = np.mean(samples)
dist1 = np.abs(-100 - observed)
dist2 = np.abs(100 - observed)
if dist1 < 1:
seen[0] += 1
elif dist2 < 1:
seen[1] += 1
else:
assert False
assert np.isclose(seen[0], 100, rtol=0, atol=20)
assert np.isclose(seen[1], 100, rtol=0, atol=20)
def test_samples_are_within_bounds(self):
param = iap.TruncatedNormal(0, 10.0, low=-5, high=7.5)
samples = param.draw_samples((1000,))
# are all within bounds
assert np.all(samples >= -5.0 - 1e-4)
assert np.all(samples <= 7.5 + 1e-4)
# at least some samples close to bounds
assert np.any(samples <= -4.5)
assert np.any(samples >= 7.0)
# at least some samples close to loc
assert np.any(np.abs(samples) < 0.5)
def test_samples_same_values_for_same_seeds(self):
param = iap.TruncatedNormal(0, 1)
samples1 = param.draw_samples((10, 5), random_state=1234)
samples2 = param.draw_samples((10, 5), random_state=1234)
assert np.allclose(samples1, samples2)
def test_samples_different_values_for_different_seeds(self):
param = iap.TruncatedNormal(0, 1)
samples1 = param.draw_samples((10, 5), random_state=1234)
samples2 = param.draw_samples((10, 5), random_state=2345)
assert not np.allclose(samples1, samples2)
class TestLaplace(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Laplace(0, 1)
assert (
param.__str__()
== param.__repr__()
== "Laplace(loc=Deterministic(int 0), scale=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Laplace(0, 1)
sample = param.draw_sample()
assert sample.shape == tuple()
def test_via_comparison_to_np_laplace(self):
param = iap.Laplace(0, 1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).laplace(loc=0, scale=1,
size=(100, 1000))
assert samples.shape == (100, 1000)
samples = np.clip(samples, -1, 1)
samples_direct = np.clip(samples_direct, -1, 1)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(-1.0, 1.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(-1.0, 1.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_loc_is_stochastic_parameter(self):
param = iap.Laplace(iap.Choice([-100, 100]), 1)
seen = [0, 0]
for _ in sm.xrange(1000):
samples = param.draw_samples((100,))
exp = np.mean(samples)
if -100 - 10 < exp < -100 + 10:
seen[0] += 1
elif 100 - 10 < exp < 100 + 10:
seen[1] += 1
else:
assert False
assert 500 - 100 < seen[0] < 500 + 100
assert 500 - 100 < seen[1] < 500 + 100
def test_scale(self):
param1 = iap.Laplace(0, 1)
param2 = iap.Laplace(0, 100)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.var(samples1) < np.var(samples2)
def test_scale_is_zero(self):
param1 = iap.Laplace(1, 0)
samples = param1.draw_samples((100,))
assert np.all(np.logical_and(
samples > 1 - _eps(samples),
samples < 1 + _eps(samples)
))
def test_samples_same_values_for_same_seeds(self):
param = iap.Laplace(0, 1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestChiSquare(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.ChiSquare(1)
assert (
param.__str__()
== param.__repr__()
== "ChiSquare(df=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.ChiSquare(1)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 <= sample
def test_via_comparison_to_np_chisquare(self):
param = iap.ChiSquare(1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).chisquare(df=1,
size=(100, 1000))
assert samples.shape == (100, 1000)
assert np.all(0 <= samples)
samples = np.clip(samples, 0, 3)
samples_direct = np.clip(samples_direct, 0, 3)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 3.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(0, 3.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_df_is_stochastic_parameter(self):
param = iap.ChiSquare(iap.Choice([1, 10]))
seen = [0, 0]
for _ in sm.xrange(1000):
samples = param.draw_samples((100,))
exp = np.mean(samples)
if 1 - 1.0 < exp < 1 + 1.0:
seen[0] += 1
elif 10 - 4.0 < exp < 10 + 4.0:
seen[1] += 1
else:
assert False
assert 500 - 100 < seen[0] < 500 + 100
assert 500 - 100 < seen[1] < 500 + 100
def test_larger_df_leads_to_more_variance(self):
param1 = iap.ChiSquare(1)
param2 = iap.ChiSquare(10)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.var(samples1) < np.var(samples2)
assert 2*1 - 1.0 < np.var(samples1) < 2*1 + 1.0
assert 2*10 - 5.0 < np.var(samples2) < 2*10 + 5.0
def test_samples_same_values_for_same_seeds(self):
param = iap.ChiSquare(1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestWeibull(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Weibull(1)
assert (
param.__str__()
== param.__repr__()
== "Weibull(a=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Weibull(1)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 <= sample
def test_via_comparison_to_np_weibull(self):
param = iap.Weibull(1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).weibull(a=1,
size=(100, 1000))
assert samples.shape == (100, 1000)
assert np.all(0 <= samples)
samples = np.clip(samples, 0, 2)
samples_direct = np.clip(samples_direct, 0, 2)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 2.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(0, 2.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_argument_is_stochastic_parameter(self):
param = iap.Weibull(iap.Choice([1, 0.5]))
expected_first = scipy.special.gamma(1 + 1/1)
expected_second = scipy.special.gamma(1 + 1/0.5)
seen = [0, 0]
for _ in sm.xrange(100):
samples = param.draw_samples((50000,))
observed = np.mean(samples)
matches_first = (
expected_first - 0.2 * expected_first
< observed <
expected_first + 0.2 * expected_first
)
matches_second = (
expected_second - 0.2 * expected_second
< observed <
expected_second + 0.2 * expected_second
)
if matches_first:
seen[0] += 1
elif matches_second:
seen[1] += 1
else:
assert False
assert 50 - 25 < seen[0] < 50 + 25
assert 50 - 25 < seen[1] < 50 + 25
def test_different_strengths(self):
param1 = iap.Weibull(1)
param2 = iap.Weibull(0.5)
samples1 = param1.draw_samples((10000,))
samples2 = param2.draw_samples((10000,))
expected_first = (
scipy.special.gamma(1 + 2/1)
- (scipy.special.gamma(1 + 1/1))**2
)
expected_second = (
scipy.special.gamma(1 + 2/0.5)
- (scipy.special.gamma(1 + 1/0.5))**2
)
assert np.var(samples1) < np.var(samples2)
assert (
expected_first - 0.2 * expected_first
< np.var(samples1) <
expected_first + 0.2 * expected_first
)
assert (
expected_second - 0.2 * expected_second
< np.var(samples2) <
expected_second + 0.2 * expected_second
)
def test_samples_same_values_for_same_seeds(self):
param = iap.Weibull(1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestUniform(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Uniform(0, 1.0)
assert (
param.__str__()
== param.__repr__()
== "Uniform(Deterministic(int 0), Deterministic(float 1.00000000))"
)
def test_draw_sample(self):
param = iap.Uniform(0, 1.0)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 - _eps(sample) < sample < 1.0 + _eps(sample)
def test_draw_samples(self):
param = iap.Uniform(0, 1.0)
samples = param.draw_samples((10, 5))
assert samples.shape == (10, 5)
assert np.all(
np.logical_and(
0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_via_density_histogram(self):
param = iap.Uniform(0, 1.0)
samples = param.draw_samples((10000,))
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0.0, 1.0),
density=False)
density_expected = 1.0/nb_bins
density_tolerance = 0.05
for nb_samples in hist:
density = nb_samples / samples.size
assert (
density_expected - density_tolerance
< density <
density_expected + density_tolerance
)
def test_negative_value(self):
param = iap.Uniform(-1.0, 1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.0 - _eps(sample) < sample < 1.0 + _eps(sample)
assert np.all(
np.logical_and(
-1.0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_wrong_argument_order(self):
param = iap.Uniform(1.0, -1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.0 - _eps(sample) < sample < 1.0 + _eps(sample)
assert np.all(
np.logical_and(
-1.0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_arguments_are_integers(self):
param = iap.Uniform(-1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.0 - _eps(sample) < sample < 1.0 + _eps(sample)
assert np.all(
np.logical_and(
-1.0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_arguments_are_identical(self):
param = iap.Uniform(1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert 1.0 - _eps(sample) < sample < 1.0 + _eps(sample)
assert np.all(
np.logical_and(
1.0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_samples_same_values_for_same_seeds(self):
param = iap.Uniform(-1.0, 1.0)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestBeta(unittest.TestCase):
@classmethod
def _mean(cls, alpha, beta):
return alpha / (alpha + beta)
@classmethod
def _var(cls, alpha, beta):
return (alpha * beta) / ((alpha + beta)**2 * (alpha + beta + 1))
def setUp(self):
reseed()
def test___init__(self):
param = iap.Beta(0.5, 0.5)
assert (
param.__str__()
== param.__repr__()
== "Beta("
"Deterministic(float 0.50000000), "
"Deterministic(float 0.50000000)"
")"
)
def test_draw_sample(self):
param = iap.Beta(0.5, 0.5)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 - _eps(sample) < sample < 1.0 + _eps(sample)
def test_draw_samples(self):
param = iap.Beta(0.5, 0.5)
samples = param.draw_samples((100, 1000))
assert samples.shape == (100, 1000)
assert np.all(
np.logical_and(
0 - _eps(samples) <= samples,
samples <= 1.0 + _eps(samples)
)
)
def test_via_comparison_to_np_beta(self):
param = iap.Beta(0.5, 0.5)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).beta(
a=0.5, b=0.5, size=(100, 1000))
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 1.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(0, 1.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_argument_is_stochastic_parameter(self):
param = iap.Beta(iap.Choice([0.5, 2]), 0.5)
expected_first = self._mean(0.5, 0.5)
expected_second = self._mean(2, 0.5)
seen = [0, 0]
for _ in sm.xrange(100):
samples = param.draw_samples((10000,))
observed = np.mean(samples)
if expected_first - 0.05 < observed < expected_first + 0.05:
seen[0] += 1
elif expected_second - 0.05 < observed < expected_second + 0.05:
seen[1] += 1
else:
assert False
assert 50 - 25 < seen[0] < 50 + 25
assert 50 - 25 < seen[1] < 50 + 25
def test_compare_curves_of_different_arguments(self):
param1 = iap.Beta(2, 2)
param2 = iap.Beta(0.5, 0.5)
samples1 = param1.draw_samples((10000,))
samples2 = param2.draw_samples((10000,))
expected_first = self._var(2, 2)
expected_second = self._var(0.5, 0.5)
assert np.var(samples1) < np.var(samples2)
assert (
expected_first - 0.1 * expected_first
< np.var(samples1) <
expected_first + 0.1 * expected_first
)
assert (
expected_second - 0.1 * expected_second
< np.var(samples2) <
expected_second + 0.1 * expected_second
)
def test_samples_same_values_for_same_seeds(self):
param = iap.Beta(0.5, 0.5)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestDeterministic(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
pairs = [
(0, "Deterministic(int 0)"),
(1.0, "Deterministic(float 1.00000000)"),
("test", "Deterministic(test)")
]
for value, expected in pairs:
with self.subTest(value=value):
param = iap.Deterministic(value)
assert (
param.__str__()
== param.__repr__()
== expected
)
def test_samples_same_values_for_same_seeds(self):
values = [
-100, -54, -1, 0, 1, 54, 100,
-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0
]
for value in values:
with self.subTest(value=value):
param = iap.Deterministic(value)
rs1 = iarandom.RNG(123456)
rs2 = iarandom.RNG(123456)
samples1 = param.draw_samples(20, random_state=rs1)
samples2 = param.draw_samples(20, random_state=rs2)
assert np.array_equal(samples1, samples2)
def test_draw_sample_int(self):
values = [-100, -54, -1, 0, 1, 54, 100]
for value in values:
with self.subTest(value=value):
param = iap.Deterministic(value)
sample1 = param.draw_sample()
sample2 = param.draw_sample()
assert sample1.shape == tuple()
assert sample1 == sample2
def test_draw_sample_float(self):
values = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for value in values:
with self.subTest(value=value):
param = iap.Deterministic(value)
sample1 = param.draw_sample()
sample2 = param.draw_sample()
assert sample1.shape == tuple()
assert np.isclose(
sample1, sample2, rtol=0, atol=_eps(sample1))
def test_draw_samples_int(self):
values = [-100, -54, -1, 0, 1, 54, 100]
shapes = [10, 10, (5, 3), (5, 3), (4, 5, 3), (4, 5, 3)]
for value, shape in itertools.product(values, shapes):
with self.subTest(value=value, shape=shape):
param = iap.Deterministic(value)
samples = param.draw_samples(shape)
shape_expected = (
shape
if isinstance(shape, tuple)
else tuple([shape]))
assert samples.shape == shape_expected
assert np.all(samples == value)
def test_draw_samples_float(self):
values = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
shapes = [10, 10, (5, 3), (5, 3), (4, 5, 3), (4, 5, 3)]
for value, shape in itertools.product(values, shapes):
with self.subTest(value=value, shape=shape):
param = iap.Deterministic(value)
samples = param.draw_samples(shape)
shape_expected = (
shape
if isinstance(shape, tuple)
else tuple([shape]))
assert samples.shape == shape_expected
assert np.allclose(samples, value, rtol=0, atol=_eps(samples))
def test_argument_is_stochastic_parameter(self):
seen = [0, 0]
for _ in sm.xrange(200):
param = iap.Deterministic(iap.Choice([0, 1]))
seen[param.value] += 1
assert 100 - 50 < seen[0] < 100 + 50
assert 100 - 50 < seen[1] < 100 + 50
def test_argument_has_invalid_type(self):
with self.assertRaises(Exception) as context:
_ = iap.Deterministic([1, 2, 3])
self.assertTrue(
"Expected StochasticParameter object or number or string"
in str(context.exception))
class TestFromLowerResolution(unittest.TestCase):
def setUp(self):
reseed()
def test___init___size_percent(self):
param = iap.FromLowerResolution(other_param=iap.Deterministic(0),
size_percent=1, method="nearest")
assert (
param.__str__()
== param.__repr__()
== "FromLowerResolution("
"size_percent=Deterministic(int 1), "
"method=Deterministic(nearest), "
"other_param=Deterministic(int 0)"
")"
)
def test___init___size_px(self):
param = iap.FromLowerResolution(other_param=iap.Deterministic(0),
size_px=1, method="nearest")
assert (
param.__str__()
== param.__repr__()
== "FromLowerResolution("
"size_px=Deterministic(int 1), "
"method=Deterministic(nearest), "
"other_param=Deterministic(int 0)"
")"
)
def test_binomial_hwc(self):
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
samples = param.draw_samples((8, 8, 1))
uq = np.unique(samples)
assert samples.shape == (8, 8, 1)
assert len(uq) == 2
assert 0 in uq
assert 1 in uq
def test_binomial_nhwc(self):
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
samples_nhwc = param.draw_samples((1, 8, 8, 1))
uq = np.unique(samples_nhwc)
assert samples_nhwc.shape == (1, 8, 8, 1)
assert len(uq) == 2
assert 0 in uq
assert 1 in uq
def test_draw_samples_with_too_many_dimensions(self):
# (N, H, W, C, something) causing error
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
with self.assertRaises(Exception) as context:
_ = param.draw_samples((1, 8, 8, 1, 1))
self.assertTrue(
"FromLowerResolution can only generate samples of shape"
in str(context.exception)
)
def test_binomial_hw3(self):
# C=3
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
samples = param.draw_samples((8, 8, 3))
uq = np.unique(samples)
assert samples.shape == (8, 8, 3)
assert len(uq) == 2
assert 0 in uq
assert 1 in uq
def test_different_size_px_arguments(self):
# different sizes in px
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=16)
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_different_size_px_arguments_with_tuple(self):
# different sizes in px, one given as tuple (a, b)
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=(2, 16))
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(400):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_different_size_px_argument_with_stochastic_parameters(self):
# different sizes in px, given as StochasticParameter
param1 = iap.FromLowerResolution(iap.Binomial(0.5),
size_px=iap.Deterministic(1))
param2 = iap.FromLowerResolution(iap.Binomial(0.5),
size_px=iap.Choice([8, 16]))
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_size_px_has_invalid_datatype(self):
# bad datatype for size_px
with self.assertRaises(Exception) as context:
_ = iap.FromLowerResolution(iap.Binomial(0.5), size_px=False)
self.assertTrue("Expected " in str(context.exception))
def test_min_size(self):
# min_size
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=1,
min_size=16)
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_size_percent(self):
# different sizes in percent
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=0.01)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=0.8)
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_size_percent_as_stochastic_parameters(self):
# different sizes in percent, given as StochasticParameter
param1 = iap.FromLowerResolution(iap.Binomial(0.5),
size_percent=iap.Deterministic(0.01))
param2 = iap.FromLowerResolution(iap.Binomial(0.5),
size_percent=iap.Choice([0.4, 0.8]))
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_size_percent_has_invalid_datatype(self):
# bad datatype for size_percent
with self.assertRaises(Exception) as context:
_ = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=False)
self.assertTrue("Expected " in str(context.exception))
def test_method(self):
# method given as StochasticParameter
param = iap.FromLowerResolution(
iap.Binomial(0.5), size_px=4,
method=iap.Choice(["nearest", "linear"]))
seen = [0, 0]
for _ in sm.xrange(200):
samples = param.draw_samples((16, 16, 1))
nb_in_between = np.sum(
np.logical_and(0.05 < samples, samples < 0.95))
if nb_in_between == 0:
seen[0] += 1
else:
seen[1] += 1
assert 100 - 50 < seen[0] < 100 + 50
assert 100 - 50 < seen[1] < 100 + 50
def test_method_has_invalid_datatype(self):
# bad datatype for method
with self.assertRaises(Exception) as context:
_ = iap.FromLowerResolution(iap.Binomial(0.5), size_px=4,
method=False)
self.assertTrue("Expected " in str(context.exception))
def test_samples_same_values_for_same_seeds(self):
# multiple calls with same random_state
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
samples1 = param.draw_samples((10, 5, 1),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5, 1),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestClip(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Clip(iap.Deterministic(0), -1, 1)
assert (
param.__str__()
== param.__repr__()
== "Clip(Deterministic(int 0), -1.000000, 1.000000)"
)
def test_value_within_bounds(self):
param = iap.Clip(iap.Deterministic(0), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 0
assert np.all(samples == 0)
def test_value_exactly_at_upper_bound(self):
param = iap.Clip(iap.Deterministic(1), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 1
assert np.all(samples == 1)
def test_value_exactly_at_lower_bound(self):
param = iap.Clip(iap.Deterministic(-1), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == -1
assert np.all(samples == -1)
def test_value_is_within_bounds_and_float(self):
param = iap.Clip(iap.Deterministic(0.5), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert 0.5 - _eps(sample) < sample < 0.5 + _eps(sample)
assert np.all(
np.logical_and(
0.5 - _eps(sample) <= samples,
samples <= 0.5 + _eps(sample)
)
)
def test_value_is_above_upper_bound(self):
param = iap.Clip(iap.Deterministic(2), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 1
assert np.all(samples == 1)
def test_value_is_below_lower_bound(self):
param = iap.Clip(iap.Deterministic(-2), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == -1
assert np.all(samples == -1)
def test_value_is_sometimes_without_bounds_sometimes_beyond(self):
param = iap.Clip(iap.Choice([0, 2]), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1]
assert np.all(np.logical_or(samples == 0, samples == 1))
def test_samples_same_values_for_same_seeds(self):
param = iap.Clip(iap.Choice([0, 2]), -1, 1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
def test_lower_bound_is_none(self):
param = iap.Clip(iap.Deterministic(0), None, 1)
sample = param.draw_sample()
assert sample == 0
assert (
param.__str__()
== param.__repr__()
== "Clip(Deterministic(int 0), None, 1.000000)"
)
def test_upper_bound_is_none(self):
param = iap.Clip(iap.Deterministic(0), 0, None)
sample = param.draw_sample()
assert sample == 0
assert (
param.__str__()
== param.__repr__()
== "Clip(Deterministic(int 0), 0.000000, None)"
)
def test_both_bounds_are_none(self):
param = iap.Clip(iap.Deterministic(0), None, None)
sample = param.draw_sample()
assert sample == 0
assert (
param.__str__()
== param.__repr__()
== "Clip(Deterministic(int 0), None, None)"
)
class TestDiscretize(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Discretize(iap.Deterministic(0))
assert (
param.__str__()
== param.__repr__()
== "Discretize(Deterministic(int 0))"
)
def test_applied_to_deterministic(self):
values = [-100.2, -54.3, -1.0, -1, -0.7, -0.00043,
0,
0.00043, 0.7, 1.0, 1, 54.3, 100.2]
for value in values:
with self.subTest(value=value):
param = iap.Discretize(iap.Deterministic(value))
value_expected = np.round(
np.float64([value])
).astype(np.int32)[0]
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == value_expected
assert np.all(samples == value_expected)
# TODO why are these tests applied to DiscreteUniform instead of Uniform?
def test_applied_to_discrete_uniform(self):
param_orig = iap.DiscreteUniform(0, 1)
param = iap.Discretize(param_orig)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1]
assert np.all(np.logical_or(samples == 0, samples == 1))
def test_applied_to_discrete_uniform_with_wider_range(self):
param_orig = iap.DiscreteUniform(0, 2)
param = iap.Discretize(param_orig)
samples1 = param_orig.draw_samples((10000,))
samples2 = param.draw_samples((10000,))
assert np.all(np.abs(samples1 - samples2) < 0.2*(10000/3))
def test_samples_same_values_for_same_seeds(self):
param_orig = iap.DiscreteUniform(0, 2)
param = iap.Discretize(param_orig)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestMultiply(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Multiply(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Multiply(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_multiply_example_integer_values(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Multiply(iap.Deterministic(v1), v2)
samples = p.draw_samples((2, 3))
assert p.draw_sample() == v1 * v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int64) + v1 * v2
)
def test_multiply_example_integer_values_both_deterministic(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Multiply(iap.Deterministic(v1), iap.Deterministic(v2))
samples = p.draw_samples((2, 3))
assert p.draw_sample() == v1 * v2
assert samples.dtype.name == "int32"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int32) + v1 * v2
)
def test_multiply_example_float_values(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Multiply(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert np.isclose(sample, v1 * v2, atol=1e-3, rtol=0)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float32) + v1 * v2
)
def test_multiply_example_float_values_both_deterministic(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Multiply(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert np.isclose(sample, v1 * v2, atol=1e-3, rtol=0)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float32) + v1 * v2
)
def test_multiply_by_stochastic_parameter(self):
param = iap.Multiply(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 * 1.0 - _eps(samples))
assert np.all(samples < 1.0 * 2.0 + _eps(samples))
assert (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_multiply_by_stochastic_parameter_elementwise(self):
param = iap.Multiply(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 * 1.0 - _eps(samples))
assert np.all(samples < 1.0 * 2.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_multiply_stochastic_parameter_by_fixed_value(self):
param = iap.Multiply(iap.Uniform(1.0, 2.0),
1.0,
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 * 1.0 - _eps(samples))
assert np.all(samples < 2.0 * 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_multiply_stochastic_parameter_by_fixed_value_elementwise(self):
param = iap.Multiply(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 * 1.0 - _eps(samples))
assert np.all(samples < 2.0 * 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
class TestDivide(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Divide(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Divide(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_divide_integers(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
if v2 == 0:
v2 = 1
with self.subTest(left=v1, right=v2):
p = iap.Divide(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == (v1 / v2)
assert samples.dtype.kind == "f"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.float64) + (v1 / v2)
)
def test_divide_integers_both_deterministic(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
if v2 == 0:
v2 = 1
with self.subTest(left=v1, right=v2):
p = iap.Divide(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == (v1 / v2)
assert samples.dtype.kind == "f"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.float64) + (v1 / v2)
)
def test_divide_floats(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
if v2 == 0:
v2 = 1
with self.subTest(left=v1, right=v2):
p = iap.Divide(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert (
(v1 / v2) - _eps(sample)
<= sample <=
(v1 / v2) + _eps(sample)
)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + (v1 / v2)
)
def test_divide_floats_both_deterministic(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
if v2 == 0:
v2 = 1
with self.subTest(left=v1, right=v2):
p = iap.Divide(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert (
(v1 / v2) - _eps(sample)
<= sample <=
(v1 / v2) + _eps(sample)
)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + (v1 / v2)
)
def test_divide_by_stochastic_parameter(self):
param = iap.Divide(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > (1.0 / 2.0) - _eps(samples))
assert np.all(samples < (1.0 / 1.0) + _eps(samples))
assert (
samples_sorted[0] - _eps(samples)
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples)
)
def test_divide_by_stochastic_parameter_elementwise(self):
param = iap.Divide(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > (1.0 / 2.0) - _eps(samples))
assert np.all(samples < (1.0 / 1.0) + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples)
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples)
)
def test_divide_stochastic_parameter_by_float(self):
param = iap.Divide(iap.Uniform(1.0, 2.0),
1.0,
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > (1.0 / 1.0) - _eps(samples))
assert np.all(samples < (2.0 / 1.0) + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples)
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples)
)
def test_divide_stochastic_parameter_by_float_elementwise(self):
param = iap.Divide(iap.Uniform(1.0, 2.0),
1.0,
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > (1.0 / 1.0) - _eps(samples))
assert np.all(samples < (2.0 / 1.0) + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted)
< samples_sorted[-1]
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted)
)
def test_divide_by_stochastic_parameter_that_can_by_zero(self):
# test division by zero automatically being converted to division by 1
param = iap.Divide(2,
iap.Choice([0, 2]),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_unique = np.sort(np.unique(samples.flatten()))
assert samples_unique[0] == 1 and samples_unique[1] == 2
def test_divide_by_zero(self):
param = iap.Divide(iap.Deterministic(1), 0, elementwise=False)
sample = param.draw_sample()
assert sample == 1
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Add(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Add(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_add_integers(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Add(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == v1 + v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int32) + v1 + v2
)
def test_add_integers_both_deterministic(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Add(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == v1 + v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int32) + v1 + v2
)
def test_add_floats(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Add(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert np.isclose(sample, v1 + v2, atol=1e-3, rtol=0)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float32) + v1 + v2
)
def test_add_floats_both_deterministic(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Add(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert np.isclose(sample, v1 + v2, atol=1e-3, rtol=0)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float32) + v1 + v2
)
def test_add_stochastic_parameter(self):
param = iap.Add(iap.Deterministic(1.0), (1.0, 2.0), elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples >= 1.0 + 1.0 - _eps(samples))
assert np.all(samples <= 1.0 + 2.0 + _eps(samples))
assert (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted[0])
)
def test_add_stochastic_parameter_elementwise(self):
param = iap.Add(iap.Deterministic(1.0), (1.0, 2.0), elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples >= 1.0 + 1.0 - _eps(samples))
assert np.all(samples <= 1.0 + 2.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted[0])
)
def test_add_to_stochastic_parameter(self):
param = iap.Add(iap.Uniform(1.0, 2.0), 1.0, elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples >= 1.0 + 1.0 - _eps(samples))
assert np.all(samples <= 2.0 + 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted[0])
)
def test_add_to_stochastic_parameter_elementwise(self):
param = iap.Add(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples >= 1.0 + 1.0 - _eps(samples))
assert np.all(samples <= 2.0 + 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted[0])
)
class TestSubtract(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Subtract(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Subtract(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_subtract_integers(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Subtract(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == v1 - v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int64) + v1 - v2
)
def test_subtract_integers_both_deterministic(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Subtract(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == v1 - v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int64) + v1 - v2
)
def test_subtract_floats(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Subtract(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert v1 - v2 - _eps(sample) < sample < v1 - v2 + _eps(sample)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + v1 - v2
)
def test_subtract_floats_both_deterministic(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Subtract(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert v1 - v2 - _eps(sample) < sample < v1 - v2 + _eps(sample)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + v1 - v2
)
def test_subtract_stochastic_parameter(self):
param = iap.Subtract(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 - 2.0 - _eps(samples))
assert np.all(samples < 1.0 - 1.0 + _eps(samples))
assert (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_subtract_stochastic_parameter_elementwise(self):
param = iap.Subtract(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 - 2.0 - _eps(samples))
assert np.all(samples < 1.0 - 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_subtract_from_stochastic_parameter(self):
param = iap.Subtract(iap.Uniform(1.0, 2.0), 1.0, elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 - 1.0 - _eps(samples))
assert np.all(samples < 2.0 - 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_subtract_from_stochastic_parameter_elementwise(self):
param = iap.Subtract(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 - 1.0 - _eps(samples))
assert np.all(samples < 2.0 - 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
class TestPower(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Power(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Power(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_pairs(self):
values = [
-100, -54, -1, 0, 1, 54, 100,
-100.0, -54.0, -1.0, 0.0, 1.0, 54.0, 100.0
]
exponents = [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2]
for base, exponent in itertools.product(values, exponents):
if base < 0 and ia.is_single_float(exponent):
continue
if base == 0 and exponent < 0:
continue
with self.subTest(base=base, exponent=exponent):
p = iap.Power(iap.Deterministic(base), exponent)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert (
base ** exponent - _eps(sample)
< sample <
base ** exponent + _eps(sample)
)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + base ** exponent
)
def test_pairs_both_deterministic(self):
values = [
-100, -54, -1, 0, 1, 54, 100,
-100.0, -54.0, -1.0, 0.0, 1.0, 54.0, 100.0
]
exponents = [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2]
for base, exponent in itertools.product(values, exponents):
if base < 0 and ia.is_single_float(exponent):
continue
if base == 0 and exponent < 0:
continue
with self.subTest(base=base, exponent=exponent):
p = iap.Power(iap.Deterministic(base), iap.Deterministic(exponent))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert (
base ** exponent - _eps(sample)
< sample <
base ** exponent + _eps(sample)
)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + base ** exponent
)
def test_exponent_is_stochastic_parameter(self):
param = iap.Power(iap.Deterministic(1.5),
(1.0, 2.0),
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.5 ** 1.0 - 2 * _eps(samples))
assert np.all(samples < 1.5 ** 2.0 + 2 * _eps(samples))
assert (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_exponent_is_stochastic_parameter_elementwise(self):
param = iap.Power(iap.Deterministic(1.5),
(1.0, 2.0),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.5 ** 1.0 - 2 * _eps(samples))
assert np.all(samples < 1.5 ** 2.0 + 2 * _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_value_is_uniform(self):
param = iap.Power(iap.Uniform(1.0, 2.0), 1.0, elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 ** 1.0 - 2 * _eps(samples))
assert np.all(samples < 2.0 ** 1.0 + 2 * _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_value_is_uniform_elementwise(self):
param = iap.Power(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 ** 1.0 - 2 * _eps(samples))
assert np.all(samples < 2.0 ** 1.0 + 2 * _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
class TestAbsolute(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Absolute(iap.Deterministic(0))
assert (
param.__str__()
== param.__repr__()
== "Absolute(Deterministic(int 0))"
)
def test_fixed_values(self):
simple_values = [-1.5, -1, -1.0, -0.1, 0, 0.0, 0.1, 1, 1.0, 1.5]
for value in simple_values:
with self.subTest(value=value):
param = iap.Absolute(iap.Deterministic(value))
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
if ia.is_single_float(value):
assert (
abs(value) - _eps(sample)
< sample <
abs(value) + _eps(sample)
)
assert np.all(abs(value) - _eps(samples) < samples)
assert np.all(samples < abs(value) + _eps(samples))
else:
assert sample == abs(value)
assert np.all(samples == abs(value))
def test_value_is_stochastic_parameter(self):
param = iap.Absolute(iap.Choice([-3, -1, 1, 3]))
sample = param.draw_sample()
samples = param.draw_samples((10, 10))
samples_uq = np.sort(np.unique(samples))
assert sample.shape == tuple()
assert sample in [3, 1]
assert samples.shape == (10, 10)
assert len(samples_uq) == 2
assert samples_uq[0] == 1 and samples_uq[1] == 3
class TestRandomSign(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.RandomSign(iap.Deterministic(0), 0.5)
assert (
param.__str__()
== param.__repr__()
== "RandomSign(Deterministic(int 0), 0.50)"
)
def test_value_is_deterministic(self):
param = iap.RandomSign(iap.Deterministic(1))
samples = param.draw_samples((1000,))
n_positive = np.sum(samples == 1)
n_negative = np.sum(samples == -1)
assert samples.shape == (1000,)
assert n_positive + n_negative == 1000
assert 350 < n_positive < 750
def test_value_is_deterministic_many_samples(self):
param = iap.RandomSign(iap.Deterministic(1))
seen = [0, 0]
for _ in sm.xrange(1000):
sample = param.draw_sample()
assert sample.shape == tuple()
if sample == 1:
seen[1] += 1
else:
seen[0] += 1
n_negative, n_positive = seen
assert n_positive + n_negative == 1000
assert 350 < n_positive < 750
def test_value_is_stochastic_parameter(self):
param = iap.RandomSign(iap.Choice([1, 2]))
samples = param.draw_samples((4000,))
seen = [0, 0, 0, 0]
seen[0] = np.sum(samples == -2)
seen[1] = np.sum(samples == -1)
seen[2] = np.sum(samples == 1)
seen[3] = np.sum(samples == 2)
assert np.sum(seen) == 4000
assert all([700 < v < 1300 for v in seen])
def test_samples_same_values_for_same_seeds(self):
param = iap.RandomSign(iap.Choice([1, 2]))
samples1 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
assert samples1.shape == (100, 10)
assert samples2.shape == (100, 10)
assert np.array_equal(samples1, samples2)
assert np.sum(samples1 == -2) > 50
assert np.sum(samples1 == -1) > 50
assert np.sum(samples1 == 1) > 50
assert np.sum(samples1 == 2) > 50
class TestForceSign(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.ForceSign(iap.Deterministic(0), True, "invert", 1)
assert (
param.__str__()
== param.__repr__()
== "ForceSign(Deterministic(int 0), True, invert, 1)"
)
def test_single_sample_positive(self):
param = iap.ForceSign(iap.Deterministic(1), positive=True,
mode="invert")
sample = param.draw_sample()
assert sample.shape == tuple()
assert sample == 1
def test_single_sample_negative(self):
param = iap.ForceSign(iap.Deterministic(1), positive=False,
mode="invert")
sample = param.draw_sample()
assert sample.shape == tuple()
assert sample == -1
def test_many_samples_positive(self):
param = iap.ForceSign(iap.Deterministic(1), positive=True,
mode="invert")
samples = param.draw_samples(100)
assert samples.shape == (100,)
assert np.all(samples == 1)
def test_many_samples_negative(self):
param = iap.ForceSign(iap.Deterministic(1), positive=False,
mode="invert")
samples = param.draw_samples(100)
assert samples.shape == (100,)
assert np.all(samples == -1)
def test_many_samples_negative_value_to_positive(self):
param = iap.ForceSign(iap.Deterministic(-1), positive=True,
mode="invert")
samples = param.draw_samples(100)
assert samples.shape == (100,)
assert np.all(samples == 1)
def test_many_samples_negative_value_to_negative(self):
param = iap.ForceSign(iap.Deterministic(-1), positive=False,
mode="invert")
samples = param.draw_samples(100)
assert samples.shape == (100,)
assert np.all(samples == -1)
def test_many_samples_stochastic_value_to_positive(self):
param = iap.ForceSign(iap.Choice([-2, 1]), positive=True,
mode="invert")
samples = param.draw_samples(1000)
n_twos = np.sum(samples == 2)
n_ones = np.sum(samples == 1)
assert samples.shape == (1000,)
assert n_twos + n_ones == 1000
assert 200 < n_twos < 700
assert 200 < n_ones < 700
def test_many_samples_stochastic_value_to_positive_reroll(self):
param = iap.ForceSign(iap.Choice([-2, 1]), positive=True,
mode="reroll")
samples = param.draw_samples(1000)
n_twos = np.sum(samples == 2)
n_ones = np.sum(samples == 1)
assert samples.shape == (1000,)
assert n_twos + n_ones == 1000
assert n_twos > 0
assert n_ones > 0
def test_many_samples_stochastic_value_to_positive_reroll_max_count(self):
param = iap.ForceSign(iap.Choice([-2, 1]), positive=True,
mode="reroll", reroll_count_max=100)
samples = param.draw_samples(100)
n_twos = np.sum(samples == 2)
n_ones = np.sum(samples == 1)
assert samples.shape == (100,)
assert n_twos + n_ones == 100
assert n_twos < 5
def test_samples_same_values_for_same_seeds(self):
param = iap.ForceSign(iap.Choice([-2, 1]),
positive=True,
mode="invert")
samples1 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
assert samples1.shape == (100, 10)
assert samples2.shape == (100, 10)
assert np.array_equal(samples1, samples2)
class TestPositive(unittest.TestCase):
def setUp(self):
reseed()
def test_many_samples_reroll(self):
param = iap.Positive(iap.Deterministic(-1),
mode="reroll",
reroll_count_max=1)
samples = param.draw_samples((100,))
assert samples.shape == (100,)
assert np.all(samples == 1)
class TestNegative(unittest.TestCase):
def setUp(self):
reseed()
def test_many_samples_reroll(self):
param = iap.Negative(iap.Deterministic(1),
mode="reroll",
reroll_count_max=1)
samples = param.draw_samples((100,))
assert samples.shape == (100,)
assert np.all(samples == -1)
class TestIterativeNoiseAggregator(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.IterativeNoiseAggregator(iap.Deterministic(0),
iterations=(1, 3),
aggregation_method="max")
assert (
param.__str__()
== param.__repr__()
== (
"IterativeNoiseAggregator("
"Deterministic(int 0), "
"DiscreteUniform(Deterministic(int 1), "
"Deterministic(int 3)"
"), "
"Deterministic(max)"
")"
)
)
def test_value_is_deterministic_max_1_iter(self):
param = iap.IterativeNoiseAggregator(iap.Deterministic(1),
iterations=1,
aggregation_method="max")
sample = param.draw_sample()
samples = param.draw_samples((2, 4))
assert sample.shape == tuple()
assert samples.shape == (2, 4)
assert sample == 1
assert np.all(samples == 1)
def test_value_is_stochastic_avg_200_iter(self):
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]),
iterations=200,
aggregation_method="avg")
sample = param.draw_sample()
samples = param.draw_samples((2, 4))
assert sample.shape == tuple()
assert samples.shape == (2, 4)
assert 25 - 10 < sample < 25 + 10
assert np.all(np.logical_and(25 - 10 < samples, samples < 25 + 10))
def test_value_is_stochastic_max_100_iter(self):
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]),
iterations=100,
aggregation_method="max")
sample = param.draw_sample()
samples = param.draw_samples((2, 4))
assert sample.shape == tuple()
assert samples.shape == (2, 4)
assert sample == 50
assert np.all(samples == 50)
def test_value_is_stochastic_min_100_iter(self):
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]),
iterations=100,
aggregation_method="min")
sample = param.draw_sample()
samples = param.draw_samples((2, 4))
assert sample.shape == tuple()
assert samples.shape == (2, 4)
assert sample == 0
assert np.all(samples == 0)
def test_value_is_stochastic_avg_or_max_100_iter_evaluate_counts(self):
seen = [0, 0, 0, 0]
for _ in sm.xrange(100):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]),
iterations=100,
aggregation_method=["avg", "max"])
samples = param.draw_samples((1, 1))
diff_0 = abs(0 - samples[0, 0])
diff_25 = abs(25 - samples[0, 0])
diff_50 = abs(50 - samples[0, 0])
if diff_25 < 10.0:
seen[0] += 1
elif diff_50 < _eps(samples):
seen[1] += 1
elif diff_0 < _eps(samples):
seen[2] += 1
else:
seen[3] += 1
assert seen[2] <= 2 # around 0.0
assert seen[3] <= 2 # 0.0+eps <= x < 15.0 or 35.0 < x < 50.0 or >50.0
assert 50 - 20 < seen[0] < 50 + 20
assert 50 - 20 < seen[1] < 50 + 20
def test_value_is_stochastic_avg_tuple_as_iter_evaluate_histograms(self):
# iterations as tuple
param = iap.IterativeNoiseAggregator(
iap.Uniform(-1.0, 1.0),
iterations=(1, 100),
aggregation_method="avg")
diffs = []
for _ in sm.xrange(100):
samples = param.draw_samples((1, 1))
diff = abs(samples[0, 0] - 0.0)
diffs.append(diff)
nb_bins = 3
hist, _ = np.histogram(diffs, bins=nb_bins, range=(-1.0, 1.0),
density=False)
assert hist[1] > hist[0]
assert hist[1] > hist[2]
def test_value_is_stochastic_max_list_as_iter_evaluate_counts(self):
# iterations as list
seen = [0, 0]
for _ in sm.xrange(400):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]),
iterations=[1, 100],
aggregation_method=["max"])
samples = param.draw_samples((1, 1))
diff_0 = abs(0 - samples[0, 0])
diff_50 = abs(50 - samples[0, 0])
if diff_50 < _eps(samples):
seen[0] += 1
elif diff_0 < _eps(samples):
seen[1] += 1
else:
assert False
assert 300 - 50 < seen[0] < 300 + 50
assert 100 - 50 < seen[1] < 100 + 50
def test_value_is_stochastic_all_100_iter(self):
# test ia.ALL as aggregation_method
# note that each method individually and list of methods are already
# tested, so no in depth test is needed here
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]), iterations=100, aggregation_method=ia.ALL)
assert isinstance(param.aggregation_method, iap.Choice)
assert len(param.aggregation_method.a) == 3
assert [v in param.aggregation_method.a for v in ["min", "avg", "max"]]
def test_value_is_stochastic_max_2_iter(self):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]), iterations=2, aggregation_method="max")
samples = param.draw_samples((2, 1000))
nb_0 = np.sum(samples == 0)
nb_50 = np.sum(samples == 50)
assert nb_0 + nb_50 == 2 * 1000
assert 0.25 - 0.05 < nb_0 / (2 * 1000) < 0.25 + 0.05
def test_samples_same_values_for_same_seeds(self):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]), iterations=5, aggregation_method="avg")
samples1 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
assert samples1.shape == (100, 10)
assert samples2.shape == (100, 10)
assert np.allclose(samples1, samples2)
def test_stochastic_param_as_aggregation_method(self):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]),
iterations=5,
aggregation_method=iap.Deterministic("max"))
assert isinstance(param.aggregation_method, iap.Deterministic)
assert param.aggregation_method.value == "max"
def test_bad_datatype_for_aggregation_method(self):
with self.assertRaises(Exception) as context:
_ = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]), iterations=5, aggregation_method=False)
self.assertTrue(
"Expected aggregation_method to be" in str(context.exception))
def test_bad_datatype_for_iterations(self):
with self.assertRaises(Exception) as context:
_ = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]),
iterations=False,
aggregation_method="max")
self.assertTrue("Expected iterations to be" in str(context.exception))
class TestSigmoid(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Sigmoid(
iap.Deterministic(0),
threshold=(-10, 10),
activated=True,
mul=1,
add=0)
assert (
param.__str__()
== param.__repr__()
== (
"Sigmoid("
"Deterministic(int 0), "
"Uniform("
"Deterministic(int -10), "
"Deterministic(int 10)"
"), "
"Deterministic(int 1), "
"1, "
"0)"
)
)
def test_activated_is_true(self):
param = iap.Sigmoid(
iap.Deterministic(5),
add=0,
mul=1,
threshold=0.5,
activated=True)
expected = 1 / (1 + np.exp(-(5 * 1 + 0 - 0.5)))
sample = param.draw_sample()
samples = param.draw_samples((5, 10))
assert sample.shape == tuple()
assert samples.shape == (5, 10)
assert expected - _eps(sample) < sample < expected + _eps(sample)
assert np.all(
np.logical_and(
expected - _eps(samples) < samples,
samples < expected + _eps(samples)
)
)
def test_activated_is_false(self):
param = iap.Sigmoid(
iap.Deterministic(5),
add=0,
mul=1,
threshold=0.5,
activated=False)
expected = 5
sample = param.draw_sample()
samples = param.draw_samples((5, 10))
assert sample.shape == tuple()
assert samples.shape == (5, 10)
assert expected - _eps(sample) < sample < expected + _eps(sample)
assert np.all(
np.logical_and(
expected - _eps(sample) < samples,
samples < expected + _eps(sample)
)
)
def test_activated_is_probabilistic(self):
param = iap.Sigmoid(
iap.Deterministic(5),
add=0,
mul=1,
threshold=0.5,
activated=0.5)
expected_first = 5
expected_second = 1 / (1 + np.exp(-(5 * 1 + 0 - 0.5)))
seen = [0, 0]
for _ in sm.xrange(1000):
sample = param.draw_sample()
diff_first = abs(sample - expected_first)
diff_second = abs(sample - expected_second)
if diff_first < _eps(sample):
seen[0] += 1
elif diff_second < _eps(sample):
seen[1] += 1
else:
assert False
assert 500 - 150 < seen[0] < 500 + 150
assert 500 - 150 < seen[1] < 500 + 150
def test_value_is_stochastic_param(self):
param = iap.Sigmoid(
iap.Choice([1, 10]),
add=0,
mul=1,
threshold=0.5,
activated=True)
expected_first = 1 / (1 + np.exp(-(1 * 1 + 0 - 0.5)))
expected_second = 1 / (1 + np.exp(-(10 * 1 + 0 - 0.5)))
seen = [0, 0]
for _ in sm.xrange(1000):
sample = param.draw_sample()
diff_first = abs(sample - expected_first)
diff_second = abs(sample - expected_second)
if diff_first < _eps(sample):
seen[0] += 1
elif diff_second < _eps(sample):
seen[1] += 1
else:
assert False
assert 500 - 150 < seen[0] < 500 + 150
assert 500 - 150 < seen[1] < 500 + 150
def test_mul_add_threshold_with_various_fixed_values(self):
muls = [0.1, 1, 10.3]
adds = [-5.7, -0.0734, 0, 0.0734, 5.7]
vals = [-1, -0.7, 0, 0.7, 1]
threshs = [-5.7, -0.0734, 0, 0.0734, 5.7]
for mul, add, val, thresh in itertools.product(muls, adds, vals,
threshs):
with self.subTest(mul=mul, add=add, val=val, threshold=thresh):
param = iap.Sigmoid(
iap.Deterministic(val),
add=add,
mul=mul,
threshold=thresh)
sample = param.draw_sample()
samples = param.draw_samples((2, 3))
dt = sample.dtype
val_ = np.array([val], dtype=dt)
mul_ = np.array([mul], dtype=dt)
add_ = np.array([add], dtype=dt)
thresh_ = np.array([thresh], dtype=dt)
expected = (
1 / (
1 + np.exp(
-(val_ * mul_ + add_ - thresh_)
)
)
)
assert sample.shape == tuple()
assert samples.shape == (2, 3)
assert (
expected - 5*_eps(sample)
< sample <
expected + 5*_eps(sample)
)
assert np.all(
np.logical_and(
expected - 5*_eps(sample) < samples,
samples < expected + 5*_eps(sample)
)
)
def test_samples_same_values_for_same_seeds(self):
param = iap.Sigmoid(
iap.Choice([1, 10]),
add=0,
mul=1,
threshold=0.5,
activated=True)
samples1 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
assert samples1.shape == (100, 10)
assert samples2.shape == (100, 10)
assert np.array_equal(samples1, samples2)
|
import time
import logging
from healthtools.scrapers.base_scraper import Scraper
from healthtools.config import SITES, SMALL_BATCH_NHIF
log = logging.getLogger(__name__)
class NhifInpatientScraper(Scraper):
"""Scraper for the NHIF accredited inpatient facilities"""
def __init__(self):
super(NhifInpatientScraper, self).__init__()
self.site_url = SITES["NHIF_INPATIENT"]
self.fields = ["hospital", "postal_addr", "beds", "branch", "category", "id"]
self.es_doc = "nhif-inpatient"
self.data_key = "nhif_inpatient.json"
self.data_archive_key = "archive/nhif_inpatient-{}.json"
def scrape_page(self, tab_num, page_retries):
"""
Get entries from each tab panel
:param tab_num: the tab number
:page_retries: Number of times to retry
:return: tuple consisting of entries and records to be deleted
"""
try:
soup = self.make_soup(self.site_url)
regions = soup.findAll("a", {"data-toggle": "tab"})
tabs = [(region["href"].split("#")[1], str(region.getText())) for region in regions]
results = []
results_es = []
for tab in tabs:
table = soup.find("div", {"id": tab[0]}).tbody
if self.small_batch:
rows = table.find_all("tr")[:SMALL_BATCH_NHIF]
else:
rows = table.find_all("tr")
for row in rows:
columns = row.find_all("td")
columns = [str(text.get_text()) for text in columns]
columns.append(self.doc_id)
entry = dict(zip(self.fields, columns))
# Nairobi region isn't included correctly
if tab[1] == "":
entry["region"] = "Nairobi Region"
else:
entry["region"] = tab[1]
meta, entry = self.elasticsearch_format(entry)
results_es.append(meta)
results_es.append(entry)
results.append(entry)
self.doc_id += 1
return results, results_es
except Exception as err:
if page_retries >= 5:
error = {
"ERROR": "Failed to scrape data from NHIH Inpatient page.",
"SOURCE": "scrape_page() url: %s" % tab_num,
"MESSAGE": str(err)
}
self.print_error(error)
return err
else:
page_retries += 1
log.warning("Try %d/5 has failed... \n%s \nGoing to sleep for %d seconds.",
page_retries, err, page_retries*5)
time.sleep(page_retries*5)
self.scrape_page(tab_num, page_retries)
def set_site_pages_no(self):
"""
Get the total number of pages
"""
try:
soup = self.make_soup(self.site_url)
# get number of tabs to scrape
self.site_pages_no = len(
[tag.name for tag in soup.find("div", {"class": "tab-content"}) if tag.name == 'div'])
except Exception as err:
error = {
"ERROR": "NHIF Inpatient: set_site_pages_no()",
"SOURCE": "url: %s" % self.site_url,
"MESSAGE": str(err)
}
self.print_error(error)
return
|
from fpdf import FPDF
import os
import re
from scipy.signal.spectral import spectrogram
class PDF(FPDF):
def __init__(self):
super().__init__()
self.WIDTH = 210
self.HEIGHT = 297
def header(self):
# Custom logo and positioning
# Create an `assets` folder and put any wide and short image inside
# Name the image `logo.png`
# 10 distance from left 8 distance from top 33 size
self.image('.\\assets/logo.png', 10, 8, 35)
self.image('.\\assets/CUFE.png', 170, 6, 25)
self.set_font('helvetica', 'B', 15)
self.cell(self.WIDTH - 142)
self.cell(60, 1, 'Sound Equalizer', 0, 0, 'C')
self.ln(20)
def footer(self):
# Page numbers in the footer
self.set_y(-15)
self.set_font('helvetica', 'I', 8)
self.set_text_color(128)
self.cell(0, 10, 'Page ' + str(self.page_no()), 0, 0, 'C')
def print_page(self, images, PLOT_DIR):
# Generates the report
self.add_page()
# image 15 from left 25 from top self.width - 30 -> distance to right (width of the graph)
self.image(PLOT_DIR + '/' + images[0], 15, 30, self.WIDTH - 30)
self.image(PLOT_DIR + '/' +
images[1], 15, self.WIDTH / 2 + 20, self.WIDTH - 30)
def construct(self, PLOT_DIR):
pages_data = []
# Get all plots
files = os.listdir(PLOT_DIR)
# Sort them by month - a bit tricky because the file names are strings
files = sorted(os.listdir(PLOT_DIR),
key=lambda x: x.split('.')[0])
pages = len(files) // 2
for i in range(pages):
pages_data.append([files[0+i], files[pages+i]])
return pages_data
|
from typing import Optional
import colorful as cf
from kolga.utils.models import SubprocessResult
class Logger:
"""
Class for logging of events in the DevOps pipeline
"""
def _create_message(self, message: str, icon: Optional[str] = None) -> str:
icon_string = f"{icon} " if icon else ""
return f"{icon_string}{message}"
def error(
self,
message: str = "",
icon: Optional[str] = None,
error: Optional[Exception] = None,
raise_exception: bool = True,
) -> None:
"""
Log formatted errors to stdout and optionally raise them
Args:
message: Verbose/Custom error message of the exception
icon: Icon to place as before the output
error: Exception should be logged and optionally raised
raise_exception: If True, raise `error` if passed, otherwise raise `Exception`
"""
message_string = message if message else "An error occured"
_message = self._create_message(message_string, icon)
if error and not raise_exception:
_message += f"{error}"
print(f"{cf.red}{_message}{cf.reset}") # noqa: T001
if raise_exception:
error = error or Exception(message_string)
raise error
def warning(self, message: str, icon: Optional[str] = None) -> None:
"""
Log formatted warnings to stdout
Args:
message: Verbose/Custom error message of the exception
icon: Icon to place as before the output
"""
_message = self._create_message(message, icon)
print(f"{cf.yellow}{_message}{cf.reset}") # noqa: T001
def success(self, message: str = "", icon: Optional[str] = None) -> None:
"""
Log formatted successful events to stdout
Args:
message: Verbose/Custom error message of the exception
icon: Icon to place as before the output
"""
message_string = message if message else "Done"
_message = self._create_message(message_string, icon)
print(f"{cf.green}{_message}{cf.reset}") # noqa: T001
def info(
self,
message: str = "",
title: str = "",
icon: Optional[str] = None,
end: str = "\n",
) -> None:
"""
Log formatted info events to stdout
Args:
title: Title of the message, printed in bold
message: Verbose/Custom error message of the exception
icon: Icon to place as before the output
end: Ending char of the message, for controlling new line for instance
"""
message_string = (
f"{cf.bold}{title}{cf.reset}{message}" if title else f"{message}"
)
_message = self._create_message(message_string, icon)
print(f"{_message}", end=end, flush=True) # noqa: T001
def std(
self,
std: SubprocessResult,
raise_exception: bool = False,
log_error: bool = True,
) -> None:
"""
Log results of :class:`SubprocessResult` warnings to stdout
Args:
std: Result from a subprocess call
raise_exception: If True, raise `Exception`
log_error: If True, log the error part of the result with :func:`~Logger.error`
"""
if log_error:
logger.error(message=std.err, raise_exception=False)
output_string = f"\n{cf.green}stdout:\n{cf.reset}{std.out}\n{cf.red}stderr:\n{cf.reset}{std.err}"
if raise_exception:
raise Exception(output_string)
else:
print(output_string) # noqa: T001
logger = Logger()
|
# This contains all helper functions for the project
import re
# -------------------------------------------------- USERS -------------------------------------------------------------
def find_special_chars(string):
regex = re.compile('[@_!#$%^&*()<>?/\|}{~:]')
if regex.search(string) == None:
return False
elif string == '':
return True
else:
return True
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2013 PAL Robotics SL.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# * Siegfried-A. Gevatter <[email protected]>
import roslib; roslib.load_manifest('hector_exploration_node')
import rospy
from actionlib import SimpleActionClient, GoalStatus
from move_base_msgs.msg import *
from hector_nav_msgs.srv import GetRobotTrajectory
class ExplorationController:
def __init__(self):
self._plan_service = rospy.ServiceProxy('get_exploration_path', GetRobotTrajectory)
self._move_base = SimpleActionClient('planner/move_base', MoveBaseAction)
def run(self):
r = rospy.Rate(1 / 7.0)
while not rospy.is_shutdown():
self.run_once()
r.sleep()
def run_once(self):
path = self._plan_service().trajectory
poses = path.poses
if not path.poses:
rospy.loginfo('No frontiers left.')
return
rospy.loginfo('Moving to frontier...')
self.move_to_pose(poses[-1])
def move_to_pose(self, pose_stamped, timeout=20.0):
rospy.loginfo('move_to_pose')
self._move_base.wait_for_server(rospy.Duration(timeout))
rospy.loginfo('server up/timeout')
goal = MoveBaseGoal()
#pose_stamped.header.frame_id = "base_link";
pose_stamped.header.stamp = rospy.Time.now()
#pose_stamped.pose.position.x = pose_stamped.pose.position.x + 1
#pose_stamped.pose.position.y = pose_stamped.pose.position.y + 1
goal.target_pose = pose_stamped
rospy.loginfo(goal)
self._move_base.send_goal(goal)
rospy.loginfo('goal sent')
self._move_base.wait_for_result(rospy.Duration(timeout))
rospy.loginfo('result')
return self._move_base.get_state() == GoalStatus.SUCCEEDED
if __name__ == '__main__':
rospy.init_node('hector_to_move_base')
controller = ExplorationController()
controller.run()
|
# Generated by Django 3.1.7 on 2021-04-22 15:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Carletapp', '0015_auto_20210421_1451'),
]
operations = [
migrations.CreateModel(
name='Wallet',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='Carletapp.carletuser')),
('amount', models.PositiveIntegerField(default=0)),
('proof_of_payment', models.ImageField(blank=True, null=True, upload_to='proof_of_payment/')),
('payment_amount', models.PositiveIntegerField(default=0)),
('payment_approved', models.BooleanField(default=False)),
],
),
migrations.RemoveField(
model_name='carletuser',
name='wallet',
),
migrations.AddField(
model_name='carletuser',
name='rating_counter',
field=models.PositiveIntegerField(default=1),
),
migrations.AddField(
model_name='tripdetail',
name='payment',
field=models.BooleanField(default=False),
),
]
|
#!/usr/bin/env python
"""
Artesanal example Pipe without Pipe class.
"""
__author__ = "Rafael García Cuéllar"
__email__ = "[email protected]"
__copyright__ = "Copyright (c) 2018 Rafael García Cuéllar"
__license__ = "MIT"
from concurrent.futures import ProcessPoolExecutor
import time
import random
def worker(arg):
time.sleep(random.random())
return arg
def pipeline(future):
pools[1].submit(worker, future.result()).add_done_callback(printer)
def printer(future):
pools[2].submit(worker, future.result()).add_done_callback(spout)
def spout(future):
print(future.result())
def instanceProcessPool():
pools = []
for i in range(3):
pool = ProcessPoolExecutor(2)
pools.append(pool)
return pools
def shutdownPools(pools):
for pool in pools:
pool.shutdown()
def runThreadsInPipeline(pools):
for pool in pools:
pool.submit(worker, random.random()).add_done_callback(pipeline)
if __name__ == "__main__":
__spec__ = None # Fix multiprocessing in Spyder's IPython
pools = instanceProcessPool() # pool = ProcessPoolExecutor([max_workers])
runThreadsInPipeline(pools) # pools[0].submit(worker, random.random()).add_done_callback(pipeline)
shutdownPools(pools) # pool.shutdown() |
import os
DEBUG = os.getenv('DEBUG', False)
PORT = os.getenv('PORT', 80)
|
"""
Test model creation with custom fields
"""
from django.test import TestCase
from django_any.models import any_model
from testapp.models import ModelWithCustomField
class CustomFieldsTest(TestCase):
def test_created_model_with_custom_field(self):
model = any_model(ModelWithCustomField)
self.assertEqual(type(model), ModelWithCustomField)
self.assertEqual(len(model._meta.fields), len(
ModelWithCustomField._meta.local_fields))
self.assertTrue(model.slug)
self.assertTrue(isinstance(model.slug, str))
|