repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringlengths 1
5
| size
stringlengths 4
7
| content
stringlengths 475
1M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,293,591B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
noahwaterfieldprice/diffraction | tests/unit/lattice_test.py | 1 | 22224 | from collections import OrderedDict
from numpy import add, array, array_equal, ndarray, pi, sqrt
from numpy.testing import assert_almost_equal, assert_array_almost_equal
import pytest
from diffraction.cif.helpers import NUMERICAL_DATA_VALUE
from diffraction.lattice import (Lattice, DirectLattice, DirectLatticeVector,
_to_radians, _to_degrees, metric_tensor,
ReciprocalLattice, ReciprocalLatticeVector,
reciprocalise)
CALCITE_CIF = OrderedDict([("cell_length_a", "4.9900(2)"),
("cell_length_b", "4.9900(2)"),
("cell_length_c", "17.002(1)"),
("cell_angle_alpha", "90."),
("cell_angle_beta", "90."),
("cell_angle_gamma", "90.")])
CALCITE_LATTICE = OrderedDict(
[("a", 4.99), ("b", 4.99), ("c", 17.002),
("alpha", 90), ("beta", 90), ("gamma", 120)])
CALCITE_DIRECT_METRIC = array([[24.9001, -12.45005, 0],
[-12.45005, 24.9001, 0],
[0, 0, 289.068004]])
CALCITE_DIRECT_CELL_VOLUME = 366.6331539
CALCITE_RECIPROCAL_LATTICE = OrderedDict(
[("a_star", 1.4539), ("b_star", 1.4539), ("c_star", 0.3696),
("alpha_star", 90), ("beta_star", 90), ("gamma_star", 60)])
CALCITE_RECIPROCAL_METRIC = array([[2.1138, 1.0569, 0],
[1.0569, 2.1138, 0],
[0, 0, 0.1366]])
CALCITE_RECIPROCAL_CELL_VOLUME = 0.6766
class FakeAbstractLattice(Lattice):
"""Fake concrete AbstractLattice class for testing"""
lattice_parameter_keys = ("k1", "k2", "k3", "k4", "k5", "k6")
@classmethod
def from_cif(cls, filepath, data_block=None):
super().from_cif(filepath, data_block)
class TestUtilityFunctions:
def test_converting_lattice_parameters_to_radians(self):
lattice_parameters_deg = [1, 2, 3, 90, 120, 45]
expected = (1, 2, 3, pi / 2, 2 * pi / 3, pi / 4)
lattice_parameters_rad = _to_radians(lattice_parameters_deg)
assert_array_almost_equal(lattice_parameters_rad, expected)
def test_converting_lattice_parameters_to_degrees(self):
lattice_parameters_rad = [1, 2, 3, pi / 2, 2 * pi / 3, pi / 4]
expected = (1, 2, 3, 90, 120, 45)
lattice_parameters_deg = _to_degrees(lattice_parameters_rad)
assert_array_almost_equal(lattice_parameters_deg, expected)
def test_calculating_metric_tensor(self):
lattice_parameters = CALCITE_LATTICE.values()
assert_array_almost_equal(metric_tensor(lattice_parameters),
CALCITE_DIRECT_METRIC)
def test_transforming_to_reciprocal_basis(self):
lattice_parameters = CALCITE_LATTICE.values()
reciprocal_lattice_parameters = reciprocalise(lattice_parameters)
assert_array_almost_equal(reciprocal_lattice_parameters,
tuple(CALCITE_RECIPROCAL_LATTICE.values()),
decimal=4)
class TestCreatingAbstractLattice:
cls = FakeAbstractLattice
test_dict = OrderedDict([("k1", 2), ("k2", 5), ("k3", 10),
("k4", 90), ("k5", 90), ("k6", 120)])
def test_error_if_lattice_parameter_missing_from_input_list(self, mocker):
lattice_parameters_missing_one = list(self.test_dict.values())[:5]
mock = mocker.MagicMock()
mock.lattice_parameter_keys = self.test_dict.keys()
with pytest.raises(ValueError) as exception_info:
self.cls.check_lattice_parameters(mock, lattice_parameters_missing_one)
assert str(
exception_info.value) == "Missing lattice parameter from input"
def test_error_if_parameter_missing_from_input_dict(self):
for missing_parameter in self.test_dict.keys():
dict_with_missing_parameter = self.test_dict.copy()
del dict_with_missing_parameter[missing_parameter]
with pytest.raises(ValueError) as exception_info:
self.cls.from_dict(dict_with_missing_parameter)
assert str(exception_info.value) == \
"Parameter: '{}' missing from input dictionary".format(
missing_parameter)
def test_parameters_are_assigned_with_values_read_from_dict(self, mocker):
mock = mocker.patch("diffraction.lattice.Lattice.__init__",
return_value=None)
self.cls.from_dict(self.test_dict)
mock.assert_called_once_with(list(self.test_dict.values()))
@pytest.mark.parametrize("invalid_value", ["abc", "123@%£", "1232.433.21"])
@pytest.mark.parametrize("position", range(6))
def test_error_if_invalid_lattice_parameter_given(self, invalid_value,
position, mocker):
invalid_lattice_parameters = list(self.test_dict.values())
invalid_lattice_parameters[position] = invalid_value
mock = mocker.MagicMock()
mock.lattice_parameter_keys = tuple(self.test_dict.keys())
with pytest.raises(ValueError) as exception_info:
self.cls.check_lattice_parameters(mock, invalid_lattice_parameters)
assert str(exception_info.value) == \
"Invalid lattice parameter {}: {}".format(
mock.lattice_parameter_keys[position], invalid_value)
def test_parameters_are_assigned_with_correct_type(self, mocker):
lattice_parameters = self.test_dict.values()
lattice = self.cls(lattice_parameters)
mocker.patch("diffraction.lattice.Lattice.check_lattice_parameters",
return_value=self.test_dict.values())
# test lattice parameters are assigned as floats
for parameter, value in self.test_dict.items():
assert getattr(lattice, parameter) == value
assert isinstance(getattr(lattice, parameter), float)
def test_string_representation_of_lattice(self):
lattice_parameters = self.test_dict.values()
lattice = self.cls(lattice_parameters)
assert repr(lattice) == "{0}({1})".format(
lattice.__class__.__name__,
[float(parameter) for parameter in lattice_parameters])
assert str(lattice) == "{0}({1})".format(
lattice.__class__.__name__,
[float(parameter) for parameter in lattice_parameters])
def test_loading_from_cif(self):
with pytest.raises(NotImplementedError) as exception:
self.cls.from_cif("some/file/path.cif")
class TestCreatingDirectLattice(TestCreatingAbstractLattice):
cls = DirectLattice
test_dict = CALCITE_LATTICE
def test_loading_from_cif(self, mocker):
load_data_block_mock = mocker.patch(
"diffraction.lattice.load_data_block",
return_value="data_items")
get_cif_data_mock = mocker.patch("diffraction.lattice.get_cif_data",
return_value=list(
self.test_dict.values()))
mock = mocker.patch("diffraction.lattice.Lattice.__init__",
return_value=None)
self.cls.from_cif("some/single/data/block/cif")
load_data_block_mock.assert_called_with("some/single/data/block/cif",
None)
get_cif_data_mock.assert_called_with("data_items", *CALCITE_CIF.keys())
assert_almost_equal(mock.call_args[0][0],
tuple(self.test_dict.values()))
def test_creating_from_reciprocal_lattice(self, mocker):
mock = mocker.MagicMock()
mock.lattice_parameters = "reciprocal_lattice_parameters"
m1 = mocker.patch("diffraction.lattice.reciprocalise",
return_value="direct_lattice_parameters")
m2 = mocker.patch("diffraction.lattice.DirectLattice")
ReciprocalLattice.direct(mock)
m1.assert_called_once_with("reciprocal_lattice_parameters")
m2.assert_called_once_with("direct_lattice_parameters")
class TestCreatingReciprocalLattice(TestCreatingAbstractLattice):
cls = ReciprocalLattice
test_dict = CALCITE_RECIPROCAL_LATTICE
def test_loading_from_cif(self, mocker):
load_data_block_mock = mocker.patch(
"diffraction.lattice.load_data_block",
return_value="data_items")
get_cif_data_mock = mocker.patch("diffraction.lattice.get_cif_data",
return_value=list(
CALCITE_LATTICE.values()))
mock = mocker.patch("diffraction.lattice.Lattice.__init__",
return_value=None)
self.cls.from_cif("some/single/data/block/cif")
load_data_block_mock.assert_called_with("some/single/data/block/cif",
None)
get_cif_data_mock.assert_called_with("data_items", *CALCITE_CIF.keys())
assert_almost_equal(mock.call_args[0][0],
tuple(self.test_dict.values()),
decimal=4)
def test_creating_from_direct_lattice(self, mocker):
mock = mocker.MagicMock()
mock.lattice_parameters = "direct_lattice_parameters"
m1 = mocker.patch("diffraction.lattice.reciprocalise",
return_value="reciprocal_lattice_parameters")
m2 = mocker.patch("diffraction.lattice.ReciprocalLattice")
DirectLattice.reciprocal(mock)
m1.assert_called_once_with("direct_lattice_parameters")
m2.assert_called_once_with("reciprocal_lattice_parameters")
class TestAccessingComputedProperties:
# tests both DirectLattice and ReciprocalLattice objects
@pytest.mark.parametrize("lattice, lattice_class", [
(CALCITE_LATTICE, DirectLattice),
(CALCITE_RECIPROCAL_LATTICE, ReciprocalLattice)])
def test_can_get_lattice_parameters_as_a_tuple(self, mocker, lattice,
lattice_class):
mock = mocker.MagicMock(**lattice)
mock.lattice_parameter_keys = lattice_class.lattice_parameter_keys
mock.lattice_parameters = lattice_class.lattice_parameters
assert mock.lattice_parameters.fget(mock) == tuple(lattice.values())
@pytest.mark.parametrize("lattice, lattice_class, parameter", [
(CALCITE_LATTICE, DirectLattice, 'a'),
(CALCITE_RECIPROCAL_LATTICE, ReciprocalLattice, 'a_star')])
def test_lattice_parameters_updated_if_lattice_parameter_changed(
self, mocker, lattice, lattice_class, parameter):
mock = mocker.MagicMock(**lattice)
mock.lattice_parameter_keys = lattice_class.lattice_parameter_keys
mock.lattice_parameters = lattice_class.lattice_parameters
expected_lattice_parameters = (10,) + tuple(lattice.values())[1:]
setattr(mock, parameter, 10)
assert mock.lattice_parameters.fget(
mock) == expected_lattice_parameters
@pytest.mark.parametrize("lattice, lattice_class", [
(CALCITE_LATTICE, DirectLattice),
(CALCITE_RECIPROCAL_LATTICE, ReciprocalLattice)])
def test_lattice_metric_is_calculated_with_correct_input(self, mocker,
lattice,
lattice_class):
lattice_parameters = tuple(lattice.values())
mock = mocker.MagicMock(lattice_parameters=lattice_parameters)
m = mocker.patch("diffraction.lattice.metric_tensor")
mock.metric = lattice_class.metric
mock.metric.fget(mock)
m.assert_called_once_with(lattice_parameters)
@pytest.mark.parametrize("lattice, lattice_class, metric, cell_volume", [
(CALCITE_LATTICE, DirectLattice,
CALCITE_DIRECT_METRIC, CALCITE_DIRECT_CELL_VOLUME),
(CALCITE_RECIPROCAL_LATTICE, ReciprocalLattice,
CALCITE_RECIPROCAL_METRIC, CALCITE_RECIPROCAL_CELL_VOLUME)])
def test_unit_cell_volume_is_calculated_correctly(self, mocker, lattice,
lattice_class,
metric, cell_volume):
mock = mocker.MagicMock(**lattice)
mock.unit_cell_volume = lattice_class.unit_cell_volume
mock.metric = metric
assert_almost_equal(mock.unit_cell_volume.fget(mock),
cell_volume, decimal=4)
class TestDirectLatticeVectorCreationAndMagicMethods:
lattice_cls = DirectLattice
cls = DirectLatticeVector
def test_creating_lattice_vector_directly(self, mocker):
lattice = mocker.MagicMock()
vector = self.cls([1, 0, 0], lattice)
assert vector.lattice == lattice
def test_creating_lattice_vector_from_lattice(self, mocker):
lattice = mocker.MagicMock()
v1 = self.cls([1, 2, 3], lattice)
v2 = self.lattice_cls.vector(lattice, [1, 2, 3])
assert v1 == v2
def test_lattice_attribute_persists_when_new_array_created(self, mocker):
lattice = mocker.MagicMock()
v1 = self.cls([1, 0, 0], lattice)
v2 = 2 * v1
v3 = v1.copy()
assert v2.lattice == lattice
assert v3.lattice == lattice
def test_direct_lattice_vector_equivalence(self, mocker):
lattice_1 = mocker.MagicMock()
lattice_2 = mocker.MagicMock()
v1 = self.cls([1, 0, 0], lattice_1)
v2 = self.cls([1, 0, 0], lattice_1)
v3 = self.cls([1, 0, 0], lattice_2)
v4 = self.cls([0, 1, 0], lattice_1)
assert v1 == v2
assert v1 != v3
assert v1 != v4
def test_adding_and_subtracting_direct_lattice_vectors(self, mocker):
lattice = mocker.MagicMock()
v1 = self.cls([1, 0, 0], lattice)
v2 = self.cls([0, 2, 3], lattice)
v3 = self.cls([1, 2, 3], lattice)
assert v1 + v2 == v3
assert v3 - v2 == v1
def test_error_if_adding_or_subtracting_with_different_lattices(self,
mocker):
lattice_1 = mocker.MagicMock()
lattice_2 = mocker.MagicMock()
v1 = self.cls([1, 0, 0], lattice_1)
v2 = self.cls([0, 2, 3], lattice_2)
with pytest.raises(TypeError) as exception_info:
v1 + v2
assert str(exception_info.value) == (
"lattice must be the same for both {:s}s".format(self.cls.__name__))
with pytest.raises(TypeError) as exception_info:
v1 - v2
assert str(exception_info.value) == (
"lattice must be the same for both {:s}s".format(self.cls.__name__))
def test_string_representation_of_lattice_vectors(self, mocker):
lattice = mocker.MagicMock()
components = [1, 2, 3]
v1 = self.cls([1, 2, 3], lattice)
assert repr(v1) == "{0}({1}, {2})".format(
self.cls.__name__, components, lattice)
assert str(v1) == "{0}({1})".format(
self.cls.__name__, components)
class TestReciprocalLatticeVectorCreationAndMagicMethods(
TestDirectLatticeVectorCreationAndMagicMethods):
lattice_cls = ReciprocalLattice
cls = ReciprocalLatticeVector
class TestDirectLatticeVectorCalculations:
def test_calculating_norm_of_direct_lattice_vector(self, mocker):
lattice = mocker.MagicMock(metric=CALCITE_DIRECT_METRIC)
v1 = DirectLatticeVector([1, 1, 0], lattice)
v2 = DirectLatticeVector([1, 2, 3], lattice)
assert_almost_equal(v1.norm(), 4.99)
assert_almost_equal(v2.norm(), 51.7330874)
def test_error_if_calculating_inner_product_or_angle_with_different_lattices(self, mocker):
lattice_1 = mocker.MagicMock()
lattice_2 = mocker.MagicMock()
v1 = ReciprocalLatticeVector([1, 0, 0], lattice_1)
v2 = ReciprocalLatticeVector([0, 2, 3], lattice_2)
with pytest.raises(TypeError) as exception_info:
v1.inner(v2)
assert str(exception_info.value) == "lattice must be the same " \
"for both ReciprocalLatticeVectors"
with pytest.raises(TypeError) as exception_info:
v1.angle(v2)
assert str(exception_info.value) == "lattice must be the same " \
"for both ReciprocalLatticeVectors"
@pytest.mark.parametrize("uvw,result", [
([0, 1, 0], 12.45005),
([0, 0, 1], 289.068004),
([1, -1, 0], 0,),
([1, 2, 3], 904.554162)])
def test_calculating_inner_product_of_vectors(self, mocker, uvw, result):
lattice = mocker.MagicMock(metric=CALCITE_DIRECT_METRIC)
v1 = DirectLatticeVector([1, 1, 1], lattice)
v2 = DirectLatticeVector(uvw, lattice)
assert_almost_equal(v1.inner(v2), result)
@pytest.mark.parametrize("uvw,result", [
([0, 1, 0], 81.90538705),
([0, 0, 1], 16.3566939),
([1, -1, 0], 90),
([1, 2, 3], 9.324336578)])
def test_calculating_angle_between_two_vectors(self, mocker, uvw, result):
lattice = mocker.MagicMock(metric=CALCITE_DIRECT_METRIC)
v1 = DirectLatticeVector([1, 1, 1], lattice)
v2 = DirectLatticeVector(uvw, lattice)
assert_almost_equal(v1.angle(v2), result)
class TestReciprocalLatticeVectorCalculations:
def test_calculating_norm_of_reciprocal_lattice_vector(self, mocker):
lattice = mocker.MagicMock(metric=CALCITE_RECIPROCAL_METRIC)
v1 = ReciprocalLatticeVector([1, 1, 0], lattice)
v2 = ReciprocalLatticeVector([1, 2, 3], lattice)
assert_almost_equal(v1.norm(), 2.5182, decimal=4)
assert_almost_equal(v2.norm(), 4.0032, decimal=4)
def test_error_if_calculating_inner_product_or_angle_with_different_lattices(self, mocker):
lattice_1 = mocker.MagicMock()
lattice_2 = mocker.MagicMock()
v1 = ReciprocalLatticeVector([1, 0, 0], lattice_1)
v2 = ReciprocalLatticeVector([0, 2, 3], lattice_2)
with pytest.raises(TypeError) as exception_info:
v1.inner(v2)
assert str(exception_info.value) == "lattice must be the same " \
"for both ReciprocalLatticeVectors"
with pytest.raises(TypeError) as exception_info:
v1.angle(v2)
assert str(exception_info.value) == "lattice must be the same " \
"for both ReciprocalLatticeVectors"
@pytest.mark.parametrize("hkl,result", [
([0, 1, 0], 3.1707),
([0, 0, 1], 0.1366),
([1, -1, 0], 0,),
([1, 2, 3], 9.9219)])
def test_calculating_inner_product_of_vectors(self, mocker, hkl, result):
lattice = mocker.MagicMock(metric=CALCITE_RECIPROCAL_METRIC)
v1 = ReciprocalLatticeVector([1, 1, 1], lattice)
v2 = ReciprocalLatticeVector(hkl, lattice)
assert_almost_equal(v1.inner(v2), result, decimal=4)
@pytest.mark.parametrize("hkl,result", [
([0, 1, 0], 31.0357),
([0, 0, 1], 81.6504),
([1, -1, 0], 90),
([1, 2, 3], 13.1489)])
def test_calculating_angle_between_two_vectors(self, mocker, hkl, result):
lattice = mocker.MagicMock(metric=CALCITE_RECIPROCAL_METRIC)
v1 = ReciprocalLatticeVector([1, 1, 1], lattice)
v2 = ReciprocalLatticeVector(hkl, lattice)
assert_almost_equal(v1.angle(v2), result, decimal=4)
class TestDirectAndReciprocalLatticeVectorCalculations:
def test_error_if_calculating_inner_product_or_angle_with_unreciprocal_lattices(self, mocker):
direct_lattice = mocker.MagicMock(metric=CALCITE_DIRECT_METRIC)
reciprocal_lattice = mocker.MagicMock(metric=CALCITE_RECIPROCAL_METRIC * 1.02)
direct_vector = DirectLatticeVector([1, 0, 0], direct_lattice)
reciprocal_vector = ReciprocalLatticeVector([0, 2, 3], reciprocal_lattice)
with pytest.raises(TypeError) as exception_info:
direct_vector.inner(reciprocal_vector)
assert str(exception_info.value) == "DirectLatticeVector and ReciprocalLatticeVector" \
" lattices must be reciprocally related."
with pytest.raises(TypeError) as exception_info:
direct_vector.angle(reciprocal_vector)
assert str(exception_info.value) == "DirectLatticeVector and ReciprocalLatticeVector" \
" lattices must be reciprocally related."
@pytest.mark.parametrize("uvw,hkl,result", [
([1, 0, 0], [0, 0, 1], 0),
([1, 0, 0], [1, 0, 0], 2 * pi),
([1, -1, 0], [1, 2, 3], -2 * pi),
([1, 2, 3], [0, 0, 1], 6 * pi)])
def test_calculating_inner_product_of_direct_and_reciprocal_lattice_vectors(
self, mocker, uvw, hkl, result):
direct_lattice = mocker.MagicMock(metric=CALCITE_DIRECT_METRIC)
reciprocal_lattice = mocker.MagicMock(metric=CALCITE_RECIPROCAL_METRIC)
direct_vector = DirectLatticeVector(uvw, direct_lattice)
reciprocal_vector = ReciprocalLatticeVector(hkl, reciprocal_lattice)
assert_almost_equal(direct_vector.inner(reciprocal_vector), result)
assert_almost_equal(reciprocal_vector.inner(direct_vector), result)
@pytest.mark.parametrize("uvw,hkl,result", [
([1, 0, 0], [0, 0, 1], 90),
([1, 0, 0], [1, 0, 0], 30),
([1, -1, 0], [0, 0, 1], 90),
([1, 2, 3], [0, 0, 1], 9.6527)])
def test_calculating_angle_between_direct_and_reciprocal_lattice_vectors(
self, mocker, uvw, hkl, result):
direct_lattice = mocker.MagicMock(metric=CALCITE_DIRECT_METRIC)
reciprocal_lattice = mocker.MagicMock(metric=CALCITE_RECIPROCAL_METRIC)
direct_vector = DirectLatticeVector(uvw, direct_lattice)
reciprocal_vector = ReciprocalLatticeVector(hkl, reciprocal_lattice)
assert_almost_equal(direct_vector.angle(reciprocal_vector), result, decimal=2)
assert_almost_equal(reciprocal_vector.angle(direct_vector), result, decimal=2)
| gpl-2.0 | -8,285,432,835,754,662,000 | 43.446 | 98 | 0.604194 | false |
ondrik/libvata | python_interface/test1.py | 1 | 1225 | #!/usr/bin/env python3
AUT1 = """
Ops
Automaton A6
States q5:0 q4:0 q3:0 q2:0 q1:0 q0:0
Final States q5
Transitions
bot0 -> q0
black -> q1
bot2(q0,q0) -> q0
bot2(q0,q0) -> q1
black(q1,q1) -> q3
black(q3,q3) -> q2
black(q3,q3) -> q4
bot1(q4) -> q5
red(q3,q3) -> q5
"""
AUT2 = """
Automaton A7
States q6:0 q5:0 q4:0 q3:0 q2:0 q1:0 q0:0
Final States q5
Transitions
bot0 -> q0
black -> q1
bot2(q0,q0) -> q1
black(q1,q1) -> q3
black(q3,q3) -> q4
bot1(q4) -> q5
red(q3,q3) -> q5
bot1(q5) -> q6
red(q3,q6) -> q5
"""
if __name__ == '__main__':
aut1 = vata_load_string(AUT1)
aut2 = vata_load_string(AUT2)
# union of aut1 and aut2
aut_union = vata_union(aut1, aut2)
# assert invariants
# TODO: maybe keep only one direction of inclusion?
assert aut1.is_included(aut_union)
assert aut_union.includes(aut2)
assert vata_inclusion(aut1, aut_union)
assert vata_inclusion(aut2, aut_union)
# complement of aut1
aut1_cmpl = vata_complement(aut1)
assert vata_intersection(aut1, aut1_cmpl).is_lang_empty()
assert vata_union(aut1, aut1_cmpl).is_lang_universal()
| gpl-3.0 | 5,062,678,262,856,747,000 | 19.762712 | 61 | 0.577959 | false |
chengsoonong/crowdastro | crowdastro/experiment/experiment_rgz_raykar_class_balance.py | 1 | 5515 | """Tests the effect of class imbalance on the Raykar algorithm applied to RGZ.
Matthew Alger
The Australian National University
2016
"""
import argparse
import collections
import logging
import h5py
import matplotlib
import matplotlib.pyplot as plt
import numpy
import sklearn
import sklearn.decomposition
import sklearn.metrics
from . import runners
from .experiment_rgz_raykar import top_n_accurate_targets
from .results import Results
from .. import __version__
from ..crowd.util import majority_vote
from ..crowd.raykar import RaykarClassifier
from ..plot import vertical_scatter_ba
def main(crowdastro_h5_path, training_h5_path, results_h5_path,
overwrite=False, plot=False, n_annotators=10):
with h5py.File(crowdastro_h5_path, 'r') as crowdastro_h5, \
h5py.File(training_h5_path, 'r') as training_h5:
n_splits = crowdastro_h5['/wise/cdfs/test_sets'].shape[0]
n_examples, n_params = training_h5['features'].shape
n_params += 1 # Bias term.
n_params += 1 # Number of annotators.
n_params += crowdastro_h5['/wise/cdfs/rgz_raw_labels'].shape[0] * 2
methods = [
'Downsampled negatives',
'No resampling',
]
model = '{} crowdastro.crowd.raykar.RaykarClassifier, '.format(
__version__)
results = Results(results_h5_path, methods, n_splits, n_examples,
n_params, model)
features = training_h5['features'].value
targets = top_n_accurate_targets(crowdastro_h5,
n_annotators=n_annotators)
alphas_all_trials = {
'Downsampled negatives': [],
'No resampling': [],
}
betas_all_trials = {
'Downsampled negatives': [],
'No resampling': [],
}
for split_id, test_set in enumerate(
crowdastro_h5['/wise/cdfs/test_sets']):
logging.info('Test {}/{}'.format(split_id + 1, n_splits))
for method_id, method in enumerate(methods):
logging.info('Method {} ({}/{})'.format(method, method_id + 1,
len(methods)))
downsample = method == 'Downsampled negatives'
runners.raykar(results, method, split_id, features,
targets, list(test_set),
overwrite=overwrite, n_restarts=1,
downsample=downsample)
model = results.get_model(method, split_id)
rc = RaykarClassifier.unserialise(model)
logging.info('{} alpha: {}'.format(method, rc.a_))
logging.info('{} beta: {}'.format(method, rc.b_))
alphas_all_trials[method].append(rc.a_)
betas_all_trials[method].append(rc.b_)
for method in methods:
alphas = numpy.mean(alphas_all_trials[method], axis=0)
betas = numpy.mean(betas_all_trials[method], axis=0)
logging.info('Average alphas for {}: {}'.format(method, alphas))
logging.info('Average betas for {}: {}'.format(method, betas))
if plot:
matplotlib.rcParams['font.family'] = 'serif'
matplotlib.rcParams['font.serif'] = ['Palatino Linotype']
vertical_scatter_ba(
results,
crowdastro_h5['/wise/cdfs/norris_labels'].value,
rotation='horizontal')
plt.ylim((0, 1))
plt.show()
to_hist = []
for method in methods:
alphas = numpy.mean(alphas_all_trials[method], axis=0)
to_hist.append(alphas)
to_hist = numpy.vstack(to_hist).T
plt.hist(to_hist)
plt.legend(methods)
plt.xlabel('$\\alpha$')
plt.ylabel('Number of labellers')
plt.show()
to_hist = []
for method in methods:
betas = numpy.mean(betas_all_trials[method], axis=0)
to_hist.append(betas)
to_hist = numpy.vstack(to_hist).T
plt.hist(to_hist)
plt.legend(methods)
plt.xlabel('$\\beta$')
plt.ylabel('Number of labellers')
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--crowdastro', default='data/crowdastro.h5',
help='HDF5 crowdastro data file')
parser.add_argument('--training', default='data/training.h5',
help='HDF5 training data file')
parser.add_argument(
'--results', default='data/results_rgz_raykar_class_balance.h5',
help='HDF5 results data file')
parser.add_argument('--overwrite', action='store_true',
help='Overwrite existing results')
parser.add_argument('--verbose', '-v', action='store_true',
help='Verbose output')
parser.add_argument('--annotators', type=int, help='Number of annotators',
default=10)
parser.add_argument('--plot', action='store_true', help='Generate a plot')
args = parser.parse_args()
if args.verbose:
logging.root.setLevel(logging.DEBUG)
else:
logging.root.setLevel(logging.INFO)
main(args.crowdastro, args.training, args.results, overwrite=args.overwrite,
plot=args.plot, n_annotators=args.annotators)
| mit | 6,263,749,239,462,406,000 | 37.838028 | 80 | 0.561378 | false |
kjung/scikit-learn | examples/ensemble/test_dsf.py | 1 | 1341 | import numpy as np
import sklearn as sk
from sklearn.ensemble.forest import DoubleSampleForest
################################################################################
# Sim2 from Wager & Athey
################################################################################
train_file = "/Users/kjung/Dropbox/personalized-predictions/data/sim2_train.txt"
train_data = np.loadtxt(fname=train_file,skiprows=1)
test_file = "/Users/kjung/Dropbox/personalized-predictions/data/sim2_test.txt"
test_data = np.loadtxt(fname=test_file,skiprows=1)
tau = train_data[:,0]
y = train_data[:,1]
w = train_data[:,2]
X = train_data[:,3:]
B = 2000
print "Fitting double sample forest with %d base trees..." % (B)
pf = DoubleSampleForest(random_state=0,
n_estimators=B,
min_samples_leaf=1)
pf.fit(X=X, y=y, w=w, subsample_size=2500,)
print "Getting estimates..."
estimates = pf.predict_effect(test_data[:,3:])
fname = "/Users/kjung/Dropbox/personalized-predictions/double_sample_estimates_sim2.txt"
np.savetxt(fname=fname, X=estimates)
# Variance estimates for calculating confidence intervals.
print "Estimating variance..."
variances = pf.estimate_variance(test_data[:,3:])
fname = "/Users/kjung/Dropbox/personalized-predictions/double_sample_variances_sim2.txt"
np.savetxt(fname, X=variances)
| bsd-3-clause | -6,934,342,493,454,822,000 | 36.25 | 88 | 0.635347 | false |
locomatix/locomatix-python | locomatix/responses.py | 1 | 8527 | ###############################################################################
#
# Copyright 2010 Locomatix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
import httplib
from response_handlers import *
from exceptions import *
try: import simplejson as json
except ImportError:
try: import json
except ImportError:
raise ImportError("simplejson is not installed. Please download it from http://code.google.com/p/simplejson/")
class LocomatixResponse(object):
"""This is the base Locomatix Response object from which all Responses are derived.
A Response is initialize with an http_response object (from httplib). The LocomatixResponse
gets the status, and body of the http_response. If the request was successful the LocomatixResponse
will try to parse the XML using a handler specific to the request type. Instance variables
for the specific response type will be set using the handler results. Descendant Responses
need only designate a HANDLER class attribute, then do any relevant instance var assigning
as necessary in their constructor."""
HANDLER = None
def __init__(self, http_response):
self.status = http_response.status
self.body = http_response.read()
self.handler = self.__class__.HANDLER.__class__()
self.request_signature = None
self.response_meta = LxResponseMetadata()
if self.status >= httplib.OK:
data = json.loads(self.body)
self.response_meta.message = data['Status']
self.response_meta.response_time = data['ExecutionTime']
if self.response_meta.message == 'Success':
self.handler.handle(data)
self.body = data
else:
self.response_meta.message = http_response.reason
def get_metadata(self):
return self.response_meta
class CreateFeedResponse(LocomatixResponse):
HANDLER = StatusResponseHandler()
class DeleteFeedResponse(LocomatixResponse):
HANDLER = StatusResponseHandler()
class ListFeedsResponse(LocomatixResponse):
HANDLER = ListFeedsResponseHandler()
def __init__(self, http_response):
super(ListFeedsResponse, self).__init__(http_response)
if self.response_meta.message == 'Success':
self.next_key = self.handler.next_key
self.feeds = self.handler.feeds
else:
self.next_key = None
self.feeds = []
class CreateObjectResponse(LocomatixResponse):
HANDLER = StatusResponseHandler()
class DeleteObjectResponse(LocomatixResponse):
HANDLER = StatusResponseHandler()
class ListObjectsResponse(LocomatixResponse):
HANDLER = ListObjectsResponseHandler()
def __init__(self, http_response):
super(ListObjectsResponse, self).__init__(http_response)
if self.response_meta.message == 'Success':
self.next_key = self.handler.next_key
self.objects = self.handler.objects
self.aggrs = self.handler.aggrs
else:
self.next_key = None
self.aggrs = []
self.objects = []
class GetAttributesResponse(LocomatixResponse):
HANDLER = GetAttributesResponseHandler()
def __init__(self, http_response):
super(GetAttributesResponse, self).__init__(http_response)
if self.response_meta.message == 'Success':
self.object = self.handler.object
else:
self.object = None
class UpdateAttributesResponse(LocomatixResponse):
HANDLER = StatusResponseHandler()
class UpdateLocationResponse(LocomatixResponse):
HANDLER = StatusResponseHandler()
class GetLocationResponse(LocomatixResponse):
HANDLER = GetLocationResponseHandler()
def __init__(self, http_response):
super(GetLocationResponse, self).__init__(http_response)
if self.response_meta.message == 'Success':
self.location = self.handler.location
else:
self.location = None
class SearchNearbyResponse(LocomatixResponse):
HANDLER = SearchResponseHandler()
def __init__(self, http_response):
super(SearchNearbyResponse, self).__init__(http_response)
if self.response_meta.message == 'Success':
self.objlocs = self.handler.objlocs
self.aggrs = self.handler.aggrs
self.next_key = self.handler.next_key
else:
self.objlocs = []
self.aggrs = []
self.next_key = None
class SearchRegionResponse(LocomatixResponse):
HANDLER = SearchResponseHandler()
def __init__(self, http_response):
super(SearchRegionResponse, self).__init__(http_response)
if self.response_meta.message == 'Success':
self.objlocs = self.handler.objlocs
self.aggrs = self.handler.aggrs
self.next_key = self.handler.next_key
else:
self.objlocs = []
self.aggrs = []
self.next_key = None
class CreateZoneResponse(LocomatixResponse):
HANDLER = StatusResponseHandler()
class ActivateZoneResponse(LocomatixResponse):
HANDLER = StatusResponseHandler()
class GetZoneResponse(LocomatixResponse):
HANDLER = GetZoneResponseHandler()
def __init__(self, http_response):
super(GetZoneResponse, self).__init__(http_response)
if self.response_meta.message == 'Success':
self.zone = self.handler.zone
else:
self.zone = None
class ListZonesResponse(LocomatixResponse):
HANDLER = ListZonesResponseHandler()
def __init__(self, http_response):
super(ListZonesResponse, self).__init__(http_response)
if self.response_meta.message == 'Success':
self.next_key = self.handler.next_key
self.zones = self.handler.zones
else:
self.next_key = None
self.zones = None
class DeactivateZoneResponse(LocomatixResponse):
HANDLER = StatusResponseHandler()
class DeleteZoneResponse(LocomatixResponse):
HANDLER = StatusResponseHandler()
class CreateFenceResponse(LocomatixResponse):
HANDLER = StatusResponseHandler()
class ActivateFenceResponse(LocomatixResponse):
HANDLER = StatusResponseHandler()
class GetFenceResponse(LocomatixResponse):
HANDLER = GetFenceResponseHandler()
def __init__(self, http_response):
super(GetFenceResponse, self).__init__(http_response)
if self.response_meta.message == 'Success':
self.fence = self.handler.fence
else:
self.fence = None
class ListFencesResponse(LocomatixResponse):
HANDLER = ListFencesResponseHandler()
def __init__(self, http_response):
super(ListFencesResponse, self).__init__(http_response)
if self.response_meta.message == 'Success':
self.next_key = self.handler.next_key
self.fences = self.handler.fences
else:
self.next_key = None
self.fences = []
class DeactivateFenceResponse(LocomatixResponse):
HANDLER = StatusResponseHandler()
class DeleteFenceResponse(LocomatixResponse):
HANDLER = StatusResponseHandler()
class GetLocationHistoryResponse(LocomatixResponse):
HANDLER = GetLocationHistoryResponseHandler()
def __init__(self, http_response):
super(GetLocationHistoryResponse, self).__init__(http_response)
if self.response_meta.message == 'Success':
self.locations = self.handler.locations
self.aggrs = self.handler.aggrs
self.next_key = self.handler.next_key
else:
self.locations = []
self.aggrs = None
self.next_key = None
class GetSpaceActivityResponse(LocomatixResponse):
HANDLER = GetSpaceActivityResponseHandler()
def __init__(self, http_response):
super(GetSpaceActivityResponse, self).__init__(http_response)
if self.response_meta.message == 'Success':
self.objlocs = self.handler.objlocs
self.aggrs = self.handler.aggrs
self.next_key = self.handler.next_key
else:
self.objlocs = None
self.aggrs = None
self.next_key = None
class GetHistogramResponse(LocomatixResponse):
HANDLER = GetHistogramResponseHandler()
def __init__(self, http_response):
super(GetHistogramResponse, self).__init__(http_response)
if self.response_meta.message == 'Success':
self.grid_aggregates = self.handler.grid_aggregates
else:
self.grid_aggregates = []
| apache-2.0 | 2,477,320,978,344,966,000 | 31.422053 | 114 | 0.700246 | false |
sony/nnabla | python/test/utils/test_graph_converters/ref_graphs/resnets.py | 1 | 19987 | # Copyright 2018,2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
from .helper import create_scale_bias, get_channel_axes
# Small Channel First ResNet
def cf_resblock(x, maps, kernel=(3, 3), pad=(1, 1), stride=(1, 1),
test=False, channel_last=False, name='cf-convblock'):
axes = get_channel_axes(channel_last)
h = x
with nn.parameter_scope(name):
h = PF.convolution(h, maps, kernel=kernel, pad=pad, stride=stride,
channel_last=channel_last, with_bias=False)
h = PF.batch_normalization(h, axes=axes, batch_stat=not test)
return F.relu(h + x)
def small_cf_resnet(image, test=False, channel_last=False):
axes = get_channel_axes(channel_last)
h = image
h /= 255.0
h = PF.convolution(h, 16, kernel=(3, 3), pad=(1, 1), channel_last=channel_last,
with_bias=False, name='first-cf-conv')
h = PF.batch_normalization(
h, axes=axes, batch_stat=not test, name='first-cf-bn')
h = F.relu(h)
h = F.max_pooling(h, (2, 2), channel_last=channel_last)
h = cf_resblock(h, maps=16, test=test,
channel_last=channel_last, name='cf-cb1')
h = cf_resblock(h, maps=16, test=test,
channel_last=channel_last, name='cf-cb2')
h = cf_resblock(h, maps=16, test=test,
channel_last=channel_last, name='cf-cb3')
h = cf_resblock(h, maps=16, test=test,
channel_last=channel_last, name='cf-cb4')
h = F.average_pooling(h, (2, 2), channel_last=channel_last)
pred = PF.affine(h, 10, name='cf-fc')
return pred
# Small Channel Last ResNet
def cl_resblock(x, maps, kernel=(3, 3), pad=(1, 1), stride=(1, 1), test=False, name='cl_convblock'):
h = x
with nn.parameter_scope(name):
h = PF.convolution(h, maps, kernel=kernel, pad=pad, stride=stride,
channel_last=True, with_bias=False)
h = PF.batch_normalization(h, axes=[3], batch_stat=not test)
return F.relu(h + x)
def small_cl_resnet(image, test=False):
h = image
h /= 255.0
h = PF.convolution(h, 16, kernel=(3, 3), pad=(1, 1), channel_last=True,
with_bias=False, name='first-cl-conv')
h = PF.batch_normalization(
h, axes=[3], batch_stat=not test, name='first-cl-bn')
h = F.relu(h)
h = F.max_pooling(h, (2, 2), channel_last=True)
h = cl_resblock(h, maps=16, test=test, name='cl-cb1')
h = cl_resblock(h, maps=16, test=test, name='cl-cb2')
h = cl_resblock(h, maps=16, test=test, name='cl-cb3')
h = cl_resblock(h, maps=16, test=test, name='cl-cb4')
h = F.average_pooling(h, (2, 2), channel_last=True)
pred = PF.affine(h, 10, name='cl-fc')
return pred
# BatchNormalization Self-folding Small ResNet
def bn_self_folding_resblock(x, i, maps, kernel=(3, 3), pad=(1, 1),
stride=(1, 1), channel_last=False, name='convblock'):
h = x
with nn.parameter_scope(name):
h = PF.convolution(h, maps, kernel=kernel, pad=pad, stride=stride,
channel_last=channel_last, with_bias=False)
axes = get_channel_axes(channel_last)
a, b = create_scale_bias(1, h.shape, axes=axes)
h = a * h + b
return F.relu(h + x)
def small_bn_self_folding_resnet(image, channel_last=False, name='bn-self-folding-graph-ref'):
h = image
h /= 255.0
h = PF.convolution(h, 16, kernel=(3, 3), pad=(1, 1), channel_last=channel_last,
with_bias=False, name='first-conv')
axes = get_channel_axes(channel_last)
a, b = create_scale_bias(1, h.shape, axes=axes)
h = a * h + b
h = F.relu(h)
h = F.max_pooling(h, (2, 2), channel_last=channel_last)
h = bn_self_folding_resblock(
h, 2, maps=16, channel_last=channel_last, name='cb1')
h = bn_self_folding_resblock(
h, 3, maps=16, channel_last=channel_last, name='cb2')
h = bn_self_folding_resblock(
h, 4, maps=16, channel_last=channel_last, name='cb3')
h = bn_self_folding_resblock(
h, 5, maps=16, channel_last=channel_last, name='cb4')
h = F.average_pooling(h, (2, 2), channel_last=channel_last)
pred = PF.affine(h, 10, name='fc')
return pred
# BatchNormalization Small ResNet
def bn_resblock(x, maps, kernel=(3, 3), pad=(1, 1), stride=(1, 1),
test=False, w_bias=False, channel_last=False, name='convblock'):
axes = get_channel_axes(channel_last)
h = x
with nn.parameter_scope(name):
h = PF.convolution(h, maps, kernel=kernel, pad=pad, stride=stride,
channel_last=channel_last, with_bias=w_bias)
h = PF.batch_normalization(h, axes=axes, batch_stat=not test)
return F.relu(h + x)
def small_bn_resnet(image, test=False, w_bias=False, channel_last=False, name='bn-graph-ref'):
axes = get_channel_axes(channel_last)
h = image
h /= 255.0
h = PF.convolution(h, 16, kernel=(3, 3), pad=(1, 1), channel_last=channel_last,
with_bias=w_bias, name='first-conv')
h = PF.batch_normalization(
h, axes=axes, batch_stat=not test, name='first-bn')
h = F.relu(h)
h = F.max_pooling(h, (2, 2), channel_last=channel_last)
h = bn_resblock(h, maps=16, test=test, w_bias=w_bias,
channel_last=channel_last, name='cb1')
h = bn_resblock(h, maps=16, test=test, w_bias=w_bias,
channel_last=channel_last, name='cb2')
h = bn_resblock(h, maps=16, test=test, w_bias=w_bias,
channel_last=channel_last, name='cb3')
h = bn_resblock(h, maps=16, test=test, w_bias=w_bias,
channel_last=channel_last, name='cb4')
h = F.average_pooling(h, (2, 2), channel_last=channel_last)
pred = PF.affine(h, 10, name='fc')
return pred
# BatchNormalization Small ResNet Opposite
def bn_opp_resblock(x, maps, kernel=(3, 3), pad=(1, 1), stride=(1, 1),
test=False, channel_last=False, name='convblock'):
axes = get_channel_axes(channel_last)
with nn.parameter_scope(name):
h = PF.batch_normalization(x, axes=axes, batch_stat=not test)
z = h
h = PF.convolution(h, maps, kernel=kernel, pad=pad, stride=stride,
channel_last=channel_last, with_bias=True)
return F.relu(z + h)
def small_bn_opp_resnet(image, test=False, w_bias=False, channel_last=False, name='bn-graph-ref'):
axes = get_channel_axes(channel_last)
h = image
h /= 255.0
h = PF.batch_normalization(
h, axes=axes, batch_stat=not test, name='first-bn')
h = PF.convolution(h, 16, kernel=(3, 3), pad=(1, 1), channel_last=channel_last,
with_bias=w_bias, name='first-conv')
h = F.relu(h)
h = F.max_pooling(h, (2, 2), channel_last=channel_last)
h = bn_opp_resblock(h, maps=16, test=test,
channel_last=channel_last, name='cb1')
h = bn_opp_resblock(h, maps=16, test=test,
channel_last=channel_last, name='cb2')
h = bn_opp_resblock(h, maps=16, test=test,
channel_last=channel_last, name='cb3')
h = bn_opp_resblock(h, maps=16, test=test,
channel_last=channel_last, name='cb4')
h = F.average_pooling(h, (2, 2), channel_last=channel_last)
pred = PF.affine(h, 10, name='fc')
return pred
# BatchNormalization Folding Small ResNet
def bn_folding_resblock(x, maps, kernel=(3, 3), pad=(1, 1), stride=(1, 1),
test=False, channel_last=False, name='convblock'):
h = x
with nn.parameter_scope(name):
h = PF.convolution(h, maps, kernel=kernel, pad=pad, stride=stride,
channel_last=channel_last, with_bias=True)
return F.relu(h + x)
def small_bn_folding_resnet(image, test=False, channel_last=False, name='bn-graph-ref'):
h = image
h /= 255.0
h = PF.convolution(h, 16, kernel=(3, 3), pad=(1, 1), channel_last=channel_last,
with_bias=True, name='first-conv')
h = F.relu(h)
h = F.max_pooling(h, (2, 2), channel_last=channel_last)
h = bn_folding_resblock(h, maps=16, test=test,
channel_last=channel_last, name='cb1')
h = bn_folding_resblock(h, maps=16, test=test,
channel_last=channel_last, name='cb2')
h = bn_folding_resblock(h, maps=16, test=test,
channel_last=channel_last, name='cb3')
h = bn_folding_resblock(h, maps=16, test=test,
channel_last=channel_last, name='cb4')
h = F.average_pooling(h, (2, 2), channel_last=channel_last)
pred = PF.affine(h, 10, name='fc')
return pred
# FusedBatchNormalization Small ResNet
def fbn_resblock(x, maps, kernel=(3, 3), pad=(1, 1), stride=(1, 1), test=False, name='fbn-convblock'):
with nn.parameter_scope(name):
h = PF.convolution(x, maps, kernel=kernel, pad=pad,
stride=stride, with_bias=False)
h = PF.fused_batch_normalization(h, x, batch_stat=not test)
return h
def small_fbn_resnet(image, test=False, name='fbn-graph-ref'):
h = image
h /= 255.0
h = PF.convolution(h, 16, kernel=(3, 3), pad=(1, 1),
with_bias=False, name='first-fbn-conv')
h = PF.batch_normalization(h, batch_stat=not test, name='first-fbn')
h = F.relu(h)
h = F.max_pooling(h, (2, 2))
h = fbn_resblock(h, maps=16, test=test, name='fbn-cb1')
h = fbn_resblock(h, maps=16, test=test, name='fbn-cb2')
h = fbn_resblock(h, maps=16, test=test, name='fbn-cb3')
h = fbn_resblock(h, maps=16, test=test, name='fbn-cb4')
h = F.average_pooling(h, (2, 2))
pred = PF.affine(h, 10, name='fbn-fc')
return pred
# BatchNormalization Small ResNet removed functions
def bn_rm_resblock(x, maps, kernel=(3, 3), pad=(1, 1), stride=(1, 1),
test=False, w_bias=False, name='convblock'):
h = x
with nn.parameter_scope(name):
h = PF.convolution(h, maps, kernel=kernel, pad=pad,
stride=stride, with_bias=w_bias)
return F.relu(h + x)
def small_bn_rm_resnet(image, test=False, w_bias=False, name='bn-rm-graph-ref'):
h = image
h = PF.convolution(h, 16, kernel=(3, 3), pad=(1, 1),
with_bias=w_bias, name='first-conv')
h = F.relu(h)
h = F.max_pooling(h, (2, 2))
h = bn_rm_resblock(h, maps=16, test=test, w_bias=w_bias, name='cb1')
h = bn_rm_resblock(h, maps=16, test=test, w_bias=w_bias, name='cb2')
h = bn_rm_resblock(h, maps=16, test=test, w_bias=w_bias, name='cb3')
h = bn_rm_resblock(h, maps=16, test=test, w_bias=w_bias, name='cb4')
h = F.average_pooling(h, (2, 2))
pred = PF.affine(h, 10, name='bn-rm-fc')
return pred
# BatchNormalization Small ResNet with batch_stat False
def bsf_resblock(x, maps, kernel=(3, 3), pad=(1, 1), stride=(1, 1),
test=False, w_bias=False, name='convblock'):
with nn.parameter_scope(name):
h = PF.convolution(x, maps, kernel=kernel, pad=pad,
stride=stride, with_bias=w_bias)
h = PF.batch_normalization(h, batch_stat=False)
return F.relu(h + x)
def small_bsf_resnet(image, w_bias=False, name='bn-graph-ref'):
h = image
h /= 255.0
h = PF.convolution(h, 16, kernel=(3, 3), pad=(1, 1),
with_bias=w_bias, name='first-conv')
h = PF.batch_normalization(h, batch_stat=False, name='first-bn-bsf')
h = F.relu(h)
h = F.max_pooling(h, (2, 2))
h = bsf_resblock(h, maps=16, test=False, w_bias=w_bias, name='cb1')
h = bsf_resblock(h, maps=16, test=False, w_bias=w_bias, name='cb2')
h = bsf_resblock(h, maps=16, test=False, w_bias=w_bias, name='cb3')
h = bsf_resblock(h, maps=16, test=False, w_bias=w_bias, name='cb4')
h = F.average_pooling(h, (2, 2))
pred = PF.affine(h, 10, name='fc')
return pred
# Small BatchNormalization Multiple Inputs/Outputs ResNet
def multiple_inputs_outputs_resblock(x, maps, kernel=(3, 3), pad=(1, 1), stride=(1, 1),
w_bias=False, test=False, name='mo-convblock'):
h = x
with nn.parameter_scope(name):
h = PF.convolution(h, maps, kernel=kernel, pad=pad,
stride=stride, with_bias=w_bias)
h = PF.batch_normalization(h, axes=[1], batch_stat=not test)
return F.relu(h + x)
def small_multiple_inputs_outputs_resnet(images, test=False, w_bias=False):
# Branches
outputs = []
for i, image in enumerate(images):
h = image
h /= 255.0
h = PF.convolution(h, 16, kernel=(3, 3), pad=(1, 1),
with_bias=w_bias, name='first-mo-conv-{}'.format(i))
h = PF.batch_normalization(
h, axes=[1], batch_stat=not test, name='first-mo-bn-{}'.format(i))
h = F.relu(h)
h = F.max_pooling(h, (2, 2))
outputs.append(h)
# Merge branches
z = sum(outputs)
h = multiple_inputs_outputs_resblock(
z, maps=16, w_bias=w_bias, test=test, name='mo-cb1')
h = F.average_pooling(h, (2, 2))
pred1 = PF.affine(h, 10, name='mo-fc1')
h = multiple_inputs_outputs_resblock(
z, maps=16, w_bias=w_bias, test=test, name='mo-cb2')
h = F.average_pooling(h, (2, 2))
pred2 = PF.affine(h, 10, name='mo-fc2')
return [pred1, pred2]
# Small BatchNormalization Folding Multiple Inputs/Outputs ResNet
def multiple_inputs_outputs_bn_folding_resblock(x, maps, kernel=(3, 3), pad=(1, 1),
stride=(1, 1), test=False, name='mo-convblock'):
h = x
with nn.parameter_scope(name):
h = PF.convolution(h, maps, kernel=kernel, pad=pad,
stride=stride, with_bias=True)
return F.relu(h + x)
def small_multiple_inputs_outputs_bn_folding_resnet(images, test=False):
# Branches
outputs = []
for i, image in enumerate(images):
h = image
h /= 255.0
h = PF.convolution(h, 16, kernel=(3, 3), pad=(1, 1),
with_bias=True, name='first-mo-conv-{}'.format(i))
h = F.relu(h)
h = F.max_pooling(h, (2, 2))
outputs.append(h)
# Merge branches
z = sum(outputs)
h = multiple_inputs_outputs_bn_folding_resblock(
z, maps=16, test=test, name='mo-cb1')
h = F.average_pooling(h, (2, 2))
pred1 = PF.affine(h, 10, name='mo-fc1')
h = multiple_inputs_outputs_bn_folding_resblock(
z, maps=16, test=test, name='mo-cb2')
h = F.average_pooling(h, (2, 2))
pred2 = PF.affine(h, 10, name='mo-fc2')
return [pred1, pred2]
# ChannelLast BatchNormalization Small ResNet
def clbn_resblock(x, maps, kernel=(3, 3), pad=(1, 1), stride=(1, 1), test=False, bias_w=False, name='clbn-convblock'):
h = x
with nn.parameter_scope(name):
h = PF.convolution(x, maps, kernel=kernel, pad=pad, stride=stride,
channel_last=True, with_bias=bias_w)
z = h
h = PF.batch_normalization(h, axes=[3], batch_stat=not test)
return F.relu(h + z)
def small_clbn_resnet(image, test=False, w_bias=False):
h = image
h /= 255.0
h = PF.convolution(h, 16, kernel=(3, 3), pad=(1, 1), channel_last=True,
with_bias=w_bias, name='first-clbn-conv')
h = PF.batch_normalization(
h, axes=[3], batch_stat=not test, name='first-clbn-bn')
h = F.relu(h)
h = F.max_pooling(h, (2, 2), channel_last=True)
h = clbn_resblock(h, maps=16, test=test, bias_w=w_bias, name='clbn-cb1')
h = clbn_resblock(h, maps=16, test=test, bias_w=w_bias, name='clbn-cb2')
h = clbn_resblock(h, maps=16, test=test, bias_w=w_bias, name='clbn-cb3')
h = clbn_resblock(h, maps=16, test=test, bias_w=w_bias, name='clbn-cb4')
h = F.average_pooling(h, (2, 2), channel_last=True)
pred = PF.affine(h, 10, name='clbn-fc')
return pred
# ChannelLast BatchNormalization Folding Small ResNet
def clbn_folding_resblock(x, maps, kernel=(3, 3), pad=(1, 1), stride=(1, 1), test=False, name='clbn-convblock'):
h = x
with nn.parameter_scope(name):
h = PF.convolution(x, maps, kernel=kernel, pad=pad, stride=stride,
channel_last=True, with_bias=True)
z = h
return F.relu(h + z)
def small_clbn_folding_resnet(image, test=False):
h = image
h /= 255.0
h = PF.convolution(h, 16, kernel=(3, 3), pad=(1, 1), channel_last=True,
with_bias=True, name='first-clbn-conv')
h = F.relu(h)
h = F.max_pooling(h, (2, 2), channel_last=True)
h = clbn_folding_resblock(h, maps=16, test=test, name='clbn-cb1')
h = clbn_folding_resblock(h, maps=16, test=test, name='clbn-cb2')
h = clbn_folding_resblock(h, maps=16, test=test, name='clbn-cb3')
h = clbn_folding_resblock(h, maps=16, test=test, name='clbn-cb4')
h = F.average_pooling(h, (2, 2), channel_last=True)
pred = PF.affine(h, 10, name='clbn-fc')
return pred
# ChannelLast BatchNormalization Self-folding Small ResNet
def clbn_self_folding_resblock(x, i, maps, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name='convblock'):
h = x
with nn.parameter_scope(name):
h = PF.convolution(h, maps, kernel=kernel, pad=pad, stride=stride,
channel_last=True, with_bias=False)
a, b = create_scale_bias(i, h.shape[3], axes=[3])
h = a * h + b
return F.relu(h + x)
def small_clbn_self_folding_resnet(image, name='clbn-self-folding-graph-ref'):
h = image
h /= 255.0
h = PF.convolution(h, 16, kernel=(3, 3), pad=(1, 1), channel_last=True,
with_bias=False, name='first-conv')
a, b = create_scale_bias(1, h.shape[3], axes=[3])
h = a * h + b
h = F.relu(h)
h = F.max_pooling(h, (2, 2), channel_last=True)
h = clbn_self_folding_resblock(h, 2, maps=16, name='cb1')
h = clbn_self_folding_resblock(h, 3, maps=16, name='cb2')
h = clbn_self_folding_resblock(h, 4, maps=16, name='cb3')
h = clbn_self_folding_resblock(h, 5, maps=16, name='cb4')
h = F.average_pooling(h, (2, 2), channel_last=True)
pred = PF.affine(h, 10, name='fc')
return pred
# Small Identity ResNet
def id_resblock(x, maps, kernel=(3, 3), pad=(1, 1), stride=(1, 1), test=False, name='id-convblock'):
h = x
with nn.parameter_scope(name):
h = PF.convolution(h, maps, kernel=kernel, pad=pad,
stride=stride, with_bias=False)
h = PF.batch_normalization(h, axes=[1], batch_stat=not test)
return F.relu(h + x)
def small_id_resnet(image, test=False):
h = image
h /= 255.0
h = PF.convolution(h, 16, kernel=(3, 3), pad=(1, 1),
with_bias=False, name='first-id-conv')
h = PF.batch_normalization(
h, axes=[1], batch_stat=not test, name='first-id-bn')
h = F.relu(h)
h = F.max_pooling(h, (2, 2))
h = id_resblock(h, maps=16, test=test, name='id-cb1')
h = id_resblock(h, maps=16, test=test, name='id-cb2')
h = id_resblock(h, maps=16, test=test, name='id-cb3')
h = id_resblock(h, maps=16, test=test, name='id-cb4')
h = F.average_pooling(h, (2, 2))
pred = PF.affine(h, 10, name='id-fc')
return pred
| apache-2.0 | 7,890,678,514,607,160,000 | 39.956967 | 118 | 0.590184 | false |
mdraeger/alarmclock | settingsHandler.py | 1 | 1925 | ## alarmclock (resembles a an alarm clock for raspberry pi with a
## 2.8" LCD touch display
## Copyright (C) 2014 Marco Draeger
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import xml.dom
from xml.dom import Node
from xml.dom.minidom import parse
import sys
class SettingsHandler(object):
def __init__(self, settingsFile):
self.settingsFile = settingsFile
self.doc = parse(settingsFile)
self.settings = self.__getSettings__(self.doc)
def __getSettings__(self, doc):
settings = {}
for s in self.doc.getElementsByTagName("setting"):
settings[s.getAttribute("name")] = s.getAttribute("value")
return settings
def set(self, key, value):
self.settings[key] = value
for s in self.doc.getElementsByTagName("setting"):
if s.getAttribute("name") == key:
s.setAttribute("value", value)
self.__writeSettings__()
def __writeSettings__(self):
f=open (self.settingsFile, "wb")
f.write(self.doc.toprettyxml(newl="", indent="", encoding="UTF-8"))
f.close()
if __name__ == '__main__':
filename = 'settings.xml'
handler = SettingsHandler(filename)
handler.set('snooze', '05:00')
print (handler.settings)
print (handler.doc.toprettyxml(encoding="UTF-8"))
| gpl-3.0 | 5,689,815,801,235,118,000 | 35.320755 | 75 | 0.665974 | false |
ucsd-ccbb/Oncolist | src/server/Schema/DrugsSchemaBuilder.py | 1 | 1857 | __author__ = 'guorongxu'
import sys
def build_schema(output_file, prefix):
filewriter = open(output_file, "a")
filewriter.write("curl -XDELETE \'http://localhost:9200/drugs/" + prefix + "\'\n")
filewriter.write("curl -XPUT \'http://localhost:9200/drugs/" + prefix + "/_mapping\' -d \'\n")
filewriter.write("{\n")
filewriter.write("\t\"" + prefix + "\": {\n")
filewriter.write("\t\t\"properties\": {\n")
filewriter.write("\t\t\t\"source\": {\"type\": " + "\"string\"},\n")
filewriter.write("\t\t\t\"version\": {\"type\": " + "\"string\"},\n")
filewriter.write("\t\t\t\"species\": {\"type\": " + "\"string\"},\n")
filewriter.write("\t\t\t\"network_name\": {\"type\": " + "\"string\"},\n")
filewriter.write("\t\t\t\"node_name\": {\"type\": " + "\"string\", \"index\": \"not_analyzed\"},\n")
filewriter.write("\t\t\t\"node_type\": {\"type\": " + "\"string\"},\n")
filewriter.write("\t\t\t\"drugbank_id\": {\"type\": " + "\"string\"},\n")
filewriter.write("\t\t\t\"synonyms\": {\"type\": " + "\"string\", \"index\": \"not_analyzed\"},\n")
filewriter.write("\t\t\t\"degree\": {\"type\": " + "\"integer\"},\n")
filewriter.write("\t\t\t\"node_list\": {\n")
filewriter.write("\t\t\t\t\"properties\": {\n")
filewriter.write("\t\t\t\t\t\"name\": {\"type\": " + "\"string\", \"index\": \"not_analyzed\"}\n")
filewriter.write("\t\t\t\t}\n")
filewriter.write("\t\t\t}\n")
filewriter.write("\t\t}\n")
filewriter.write("\t}\n")
filewriter.write("}\n")
filewriter.write("\'\n")
## Main entry
if __name__ == "__main__":
#output_file = "/Users/guorongxu/Desktop/SearchEngine/Drugbank/json_files/map.sh"
#prefix = "drugs_drugbank"
output_file = sys.argv[1] + "/" + sys.argv[2] + "/json_files/map.sh"
prefix = sys.argv[3]
build_schema(output_file, prefix)
| mit | -2,766,666,029,948,322,000 | 45.425 | 104 | 0.551427 | false |
mdavoodi/konkourse-python | documents/forms.py | 1 | 1838 | # File upload form
from django import forms
from documents.restrictions import RestrictedFileField
class DocumentForm(forms.Form):
types = [
'application/msword',
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'application/vnd.openxmlformats-officedocument.wordprocessingml.template',
'application/vnd.ms-word.document.macroEnabled.12',
'application/vnd.ms-word.template.macroEnabled.12',
'application/vnd.ms-excel',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'application/vnd.openxmlformats-officedocument.spreadsheetml.template',
'application/vnd.ms-excel.sheet.macroEnabled.12',
'application/vnd.ms-excel.template.macroEnabled.12',
'application/vnd.ms-excel.addin.macroEnabled.12',
'application/vnd.ms-excel.sheet.binary.macroEnabled.12',
'application/vnd.ms-powerpoint',
'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'application/vnd.openxmlformats-officedocument.presentationml.template',
'application/vnd.openxmlformats-officedocument.presentationml.slideshow',
'application/vnd.ms-powerpoint.addin.macroEnabled.12',
'application/vnd.ms-powerpoint.presentation.macroEnabled.12',
'application/vnd.ms-powerpoint.template.macroEnabled.12',
'application/vnd.ms-powerpoint.slideshow.macroEnabled.12',
'application/pdf',
'application/zip',
]
file = RestrictedFileField(max_upload_size=20971520, content_types=types)
message_post = forms.CharField(
required=False,
widget=forms.Textarea(
attrs={
"class": "inputConvo view-port",
"rows": "2",
"placeholder": "Describe the document"}))
| mit | -2,663,933,192,018,307,000 | 47.368421 | 84 | 0.700762 | false |
michelesr/network-monitor-server | src/addresses.py | 1 | 1625 | #! /usr/bin/env python
"""
Framework di monitoraggio della rete
Modulo per la gestione degli indirizzi di rete
"""
from socket import socket
def _get_ip():
"""
Questa funzione restituisce l'indirizzo ip della macchina ottenendolo
dal nome di una socket verso google.com. Restituisce False se si
verificano eccezioni (es mancanza di connessione a internet).
"""
# inizializziamo la socket
s = socket()
try:
# ci connettiamo a 'google.com'
s.connect(('google.com', 80))
# prendiamo l'indirizzo dal nome della socket
address = s.getsockname()[0]
except:
# restituiamo False in caso di errore
address = False
return address
def get_network_address():
"""
Questa funzione tenta di restituire l'indirizzo di rete a partire
dall'indirizzo ip della macchina... e' basato sul fatto che su una
LAN generica l'indirizzo di rete e' ottenibile sostituendo l'ultima
parte dell'ip con '0/24' (notazione CIDR). In caso l'indirizzo
ottenuto in questa maniera non sia corretto sara' necessario utilizzare
la linea di comando per inserire l'indirizzo manualmente.
"""
# otteniamo l'ip della macchina
address = _get_ip()
# se l'indirizzo e' False ritorniamo False
if not address:
return False
else:
# dividiamo l'ip in 4 gruppi da 3 cifre
list = address.split('.')
# sostituiamo l'ultimo gruppo con '0/24'
list[3] = '0/24'
# ricomponiamo l'indirizzo finale
return '.'.join(list)
| gpl-3.0 | 5,626,488,216,610,293,000 | 25.209677 | 75 | 0.634462 | false |
seap-udea/jSpice | bin/jspice/spicext.py | 1 | 9507 | #!/usr/bin/python
#############################################################
# /###### /## #
# /##__ ## |__/ #
# /##| ## \__/ /###### /## /####### /###### #
# |__/| ###### /##__ ##| ## /##_____/ /##__ ## #
# /## \____ ##| ## \ ##| ##| ## | ######## #
# | ## /## \ ##| ## | ##| ##| ## | ##_____/ #
# | ##| ######/| #######/| ##| #######| ####### #
# | ## \______/ | ##____/ |__/ \_______/ \_______/ #
# /## | ## | ## #
# | ######/ | ## #
# \______/ |__/ #
# #
# Jorge I. Zuluaga (C) 2016 #
#############################################################
#Function: an axtension to SpiceyPy
#############################################################
from spiceypy import wrapper as spy
import spiceypy.support_types as spytypes
#############################################################
#EXTERNAL MODULES
#############################################################
import time,datetime
import numpy as np
from scipy.optimize import brentq as _zero
from scipy.optimize import minimize_scalar as _minim
np.set_printoptions(threshold='nan')
#############################################################
#EXTEND SPICE
#############################################################
"""
This routines are intended to extend SPICE and include new
functionalities.
Convention:
def _<jroutine>(*args): Private routine
spy.j<routine>: Extended routine
Use in your code instead of:
from spiceypy import wrapper as spy
This code:
from spicext import *
SpiceyPy and Spicext can be invoked as:
spy.<routine>
spy.j<routine>
"""
#############################################################
#CONSTANTS
#############################################################
spy.IDENTITY=np.identity(3)
spy.RAD=180/np.pi
spy.DEG=1/spy.RAD
#############################################################
#ROUTINES
#############################################################
def _utcnow():
utc=datetime.datetime.utcnow()
now=utc.strftime("%m/%d/%y %H:%M:%S.%f UTC")
return now
spy.jutcnow=_utcnow
def _locnow():
loc=datetime.datetime.now()
now=loc.strftime("%m/%d/%y %H:%M:%S.%f")
return now
spy.jlocnow=_locnow
def _etnow():
return spy.str2et(spy.jlocnow())
spy.jetnow=_etnow
def _et2str(et):
deltet=spy.deltet(et,"ET")
cal=spy.etcal(et-deltet,100)
return cal
spy.jet2str=_et2str
def _dec2sex(dec,sep=None,day=False):
if day:fac=24
else:fac=60
sgn=np.sign(dec)
dec=np.abs(dec)
H=np.floor(dec)
mm=(dec-H)*fac
M=np.floor(mm)
ss=(mm-M)*60;
S=np.floor(ss);
H=sgn*H
if not sep is None:
return "%02d%s%02d%s%02.3f"%(int(H),sep[0],int(M),sep[1],ss)
return [H,M,ss]
spy.jdec2sex=_dec2sex
def _rad():return 180/np.pi
spy.jrad=_rad
def _deg():return np.pi/180
spy.jdeg=_deg
def _obsini(body,lon,lat,alt):
"""
lon: longitude in degree
lat: latitude in degree
alt: altitude in meters
obs: observer dictionary:
lat,lon (radians)
alt (kilometers)
pos (cartesian position with respect to ellipsoid ITRF93)
norm (normal vector wrt ellipoid)
radii (a, b, c, fe)
LOCALtoITRF93, ITRF93toLOCAL (transformation matrices)
"""
obs=dict(
ITRF93toLOCAL=np.zeros((3,3)),
LOCALtoITRF93=np.zeros((3,3)),
radii=np.zeros(3),
pos=np.zeros(3),
norm=np.zeros(3),
)
obs["lon"]=lon*spy.DEG
obs["lat"]=lat*spy.DEG
obs["alt"]=alt/1000.0
obs["body"]=body
# Body properties
n,obs["radii"]=spy.bodvrd(body,"RADII",3)
obs["radii"]=np.append(obs["radii"],
[(obs["radii"][0]-obs["radii"][2])/obs["radii"][0]])
obs["radii"]=np.append(obs["radii"],
[(obs["radii"][0]+obs["radii"][2])/2])
# Position in the ellipsoid
obs["pos"]=spy.georec(obs["lon"],obs["lat"],obs["alt"],
obs["radii"][0],obs["radii"][3])
# Normal vector to location
obs["norm"]=spy.surfnm(obs["radii"][0],obs["radii"][1],obs["radii"][2],obs["pos"])
# Vectors
uz=[0,0,1]
uy=spy.ucrss(obs["norm"],uz)
uz=obs["norm"]
ux=spy.ucrss(uy,uz)
# Matrices
obs["ITRF93toLOCAL"]=np.array([ux,uy,uz])
obs["LOCALtoITRF93"]=spy.invert(obs["ITRF93toLOCAL"]);
return obs
spy.jobsini=_obsini
def _rotmat(t):
mat=dict(
ITRF93toEJ2000=np.zeros((3,3)),
EJ2000toJ2000=np.zeros((3,3)),
J2000toEpoch=np.zeros((3,3)),
J2000toITRF93=np.zeros((3,3)),
)
mat["ITRF93toEJ2000"]=spy.pxform("ITRF93","ECLIPJ2000",t)
mat["EJ2000toJ2000"]=spy.pxform("ECLIPJ2000","J2000",t)
mat["J2000toEpoch"]=spy.pxform("J2000","EARTHTRUEEPOCH",t)
mat["J2000toITRF93"]=spy.pxform("J2000","ITRF93",t)
return mat
spy.jrotmat=_rotmat
def _ephem(target,t,obs,mat,depth='epoch'):
"""
Parameters:
body: string for target body
t: ephemeris time
obs: observer dictionary
mat: rotation matrices
Return:
ephem: dictionary with ephemeris
obsSSBEJ2000: Coordinate of the Observer wrt SSB in ELIPJ2000
targetSSBEJ2000: Coordinate of the target wrt SSB in ECLIPJ2000
targetSSBJ2000: Coordinate of the target wrt SSB in J2000
targetOBSEJ2000: Coordinate of the target wrt observer in ECLIPJ2000
targetOBSJ2000: Coordinate of the target wrt observer in J2000
targetOBST: Coordinate of the target wrt observer at Epoch
targetOBSITRF93: Coordinate of the target wrt observer in ITRF93
targetOBSLOCAL: Coordinate of the target wrt observer in Local coordinates
distance: distance from target to observer
RA (radians): J2000
DEC (radians): J2000
RAt (radians): at epoch
DECt (radians): at epoch
az (radians): Azimuth
el (radians): elevation
"""
ephem=dict(
target=target,
targetSSBEJ2000=np.zeros([0,0,0]),
targetOBSEJ2000=np.zeros([0,0,0]),
targetOBSJ2000=np.zeros([0,0,0]),
distance=0,
RAJ2000=0,
DECJ2000=0,
)
bodySSBEJ2000,ltmp=spy.spkezr(obs["body"],t,
"ECLIPJ2000","NONE","SOLAR SYSTEM BARYCENTER")
obsEJ2000=spy.mxv(mat["ITRF93toEJ2000"],obs["pos"])
ephem["obsSSBEJ2000"]=spy.vadd(bodySSBEJ2000[:3],obsEJ2000)
# Position of target corrected by light-time
n,ephem["radii"]=spy.bodvrd(target,"RADII",3)
ephem["radii"]=np.append(ephem["radii"],
[(ephem["radii"][0]-ephem["radii"][2])/ephem["radii"][0]])
ephem["radii"]=np.append(ephem["radii"],
[(ephem["radii"][0]+ephem["radii"][2])/2])
lt=1;ltold=0
while np.abs((lt-ltold)/lt)>=1e-10:
ltold=lt
ephem["targetSSBEJ2000"],ltmp=spy.spkezr(target,t-lt,"ECLIPJ2000","NONE",
"SOLAR SYSTEM BARYCENTER")
ephem["targetOBSEJ2000"]=spy.vsub(ephem["targetSSBEJ2000"][:3],
ephem["obsSSBEJ2000"])
lt=spy.vnorm(ephem["targetOBSEJ2000"])/spy.clight()
# Ecliptic coordinates at J2000
ephem["distance"],ephem["eclon"],ephem["eclat"]=spy.recrad(ephem["targetOBSEJ2000"])
# Equator J2000
ephem["targetOBSJ2000"]=spy.mxv(mat["EJ2000toJ2000"],ephem["targetOBSEJ2000"])
# Coordinates at J2000
ephem["distance"],ephem["RA"],ephem["DEC"]=spy.recrad(ephem["targetOBSJ2000"])
ephem["angsize"]=2*(ephem["radii"][4]/ephem["distance"])*spy.jrad()*3600
# Coordinates at Epoch
ephem["targetOBST"]=spy.mxv(mat["J2000toEpoch"],ephem["targetOBSJ2000"])
d,ephem["RAt"],ephem["DECt"]=spy.recrad(ephem["targetOBST"])
# Topocentric coordinates
ephem["targetOBSITRF93"]=spy.mxv(mat["J2000toITRF93"],ephem["targetOBSJ2000"])
ephem["targetOBSLOCAL"]=spy.mxv(obs["ITRF93toLOCAL"],ephem["targetOBSITRF93"])
udir,mag=spy.unorm(ephem["targetOBSLOCAL"])
udir[1]*=-1
d,az,el=spy.reclat(udir)
if(az<0):az+=2*np.pi
ephem["el"]=el
ephem["z"]=np.pi/2-ephem["el"]
ephem["az"]=az
return ephem
spy.jephem=_ephem
# Find zeros
spy.jzero=_zero
spy.jminim=_minim
# Angular distance
def _gcdist(lam1,lam2,phi1,phi2):
sf=np.sin((phi2-phi1)/2)
sl=np.sin((lam2-lam1)/2)
d=2*np.arcsin((sf*sf+np.cos(phi1)*np.cos(phi2)*sl*sl)**0.5)
return d
spy.jgcdist=_gcdist
def _angdis(body1,body2,t,obs,k=0):
"""Calculate the angular distance of the contact-function (fk) of two
objects as observed from observatory obs
Parameters:
body1: Body 1 string (largest body)
body2: Body 2 string
t: ephemeris time
obs: observer dictionary
k: k-parameter of the contact-function. k=0 (angular distance),
k=+1 (external contact), k=-1 (internal contact)
Returns:
if k==0: Angular distance
if k!=0: angdist-rad1-k*rad2
"""
mat=spy.jrotmat(t)
ephem1=spy.jephem(body1,t,obs,mat)
ephem2=spy.jephem(body2,t,obs,mat)
angdist=spy.jgcdist(ephem1["RA"],ephem2["RA"],ephem1["DEC"],ephem2["DEC"])
if k==0:
return angdist
else:
rad1=ephem1["angsize"]/2
rad2=ephem2["angsize"]/2
fk=angdist*spy.jrad()*3600.0-rad1-k*rad2
return fk
spy.jangdis=_angdis
| apache-2.0 | -5,815,242,042,989,479,000 | 30.068627 | 88 | 0.535185 | false |
Anaconda-Platform/anaconda-client | binstar_client/mixins/package.py | 1 | 1070 | '''
Created on May 23, 2014
@author: sean
'''
from binstar_client.utils import jencode
from binstar_client.errors import Conflict
class PackageMixin(object):
def copy(self, owner, package, version, basename=None,
to_owner=None, from_label='main', to_label='main', replace=False, update=False):
copy_path = "/".join((owner, package, version, basename or ''))
url = '{}/copy/package/{}'.format(self.domain, copy_path)
payload = dict(to_owner=to_owner, from_channel=from_label, to_channel=to_label)
data, headers = jencode(payload)
if replace:
res = self.session.put(url, data=data, headers=headers)
elif update:
res = self.session.patch(url, data=data, headers=headers)
else:
res = self.session.post(url, data=data, headers=headers)
try:
self._check_response(res)
except Conflict:
raise Conflict('File conflict while copying! Try to use --replace or --update options for force copying')
return res.json()
| bsd-3-clause | 1,313,989,696,757,145,000 | 30.470588 | 117 | 0.628037 | false |
colloquium/spacewalk | backend/server/test/unit-test/rhnSQL/test_executemany.py | 1 | 1833 | #!/usr/bin/python
# Copyright (c) 2005--2010 Red Hat, Inc.
#
#
#
# $Id$
raise Exception("""
This test is no more valid; see the bug
https://bugzilla.redhat.com/show_bug.cgi?id=423351
""")
import os
import unittest
from spacewalk.server import rhnSQL
DB = 'rhnuser/rhnuser@webdev'
class ExecutemanyTest(unittest.TestCase):
def setUp(self):
self.table_name = "misatest_%d" % os.getpid()
rhnSQL.initDB(DB)
self._cleanup()
rhnSQL.execute("create table %s (id int, val varchar2(10))" %
self.table_name)
def _cleanup(self):
try:
rhnSQL.execute("drop table %s" % self.table_name)
except rhnSQL.SQLStatementPrepareError:
pass
def tearDown(self):
self._cleanup()
rhnSQL.commit()
def test_executemany(self):
"""
Tests the case of passing an integer as a value into a VARCHAR2 column
(executemany makes it more interesting because the driver generally
verifies the param types; passing a string and an Int takes it one
step further)
"""
h = rhnSQL.prepare("""
insert into %s (id, val) values (:id, :val)
""" % self.table_name)
params = {
'id' : [1, 2],
'val' : ['', 3],
}
apply(h.executemany, (), params)
h = rhnSQL.prepare("select id, val from %s" % self.table_name)
h.execute()
rows = h.fetchall_dict()
self.assertEqual(len(rows), 2)
v_id, v_val = rows[0]['id'], rows[0]['val']
self.assertEqual(v_id, 1)
self.assertEqual(v_val, None)
v_id, v_val = rows[1]['id'], rows[1]['val']
self.assertEqual(v_id, 2)
self.assertEqual(v_val, '3')
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -2,491,424,647,257,956,400 | 25.185714 | 78 | 0.556465 | false |
agoravoting/agora-tally | agora_tally/ballot_codec/nvotes_codec.py | 1 | 46867 | # self file is part of agora-tally.
#
# Copyright (C) 2021 Agora Voting SL <[email protected]>
# agora-tally is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License.
#
# agora-tally is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with agora-tally. If not, see <http://www.gnu.org/licenses/>.
import unittest
import copy
from operator import itemgetter
from agora_tally.ballot_codec import mixed_radix
from ..file_helpers import serialize
'''
Encodes/Decodes the answer to a question given the question type.
The encoder function always receives answer as a list of answer ids.
'''
VALID_CODECS = [
"plurality-at-large",
"borda-nauru",
"borda",
"desborda3",
"desborda2",
"desborda",
"borda-custom",
"cumulative"
]
class NVotesCodec(object):
'''
Used for encoding and decoding a question
'''
question = None
def __init__(self, question):
self.question = copy.deepcopy(question)
def get_bases(self):
'''
Returns the bases related to this question.
'''
# sort answers by id
sorted_answers = copy.deepcopy(self.question["answers"])
sorted_answers.sort(key=itemgetter('id'))
valid_answers = [
answer
for answer in sorted_answers
if dict(title='invalidVoteFlag', url='true') not in answer.get('urls', [])
]
tally_type = self.question["tally_type"]
# Calculate the base for answers. It depends on the
# `question.tally_type`:
# - plurality-at-large: base 2 (value can be either 0 o 1)
# - preferential (*bordas*): question.max + 1
# - cummulative: question.extra_options.cumulative_number_of_checkboxes + 1
answer_base = 2
if tally_type == "plurality-at-large":
answer_base = 2
elif tally_type == "cumulative":
checkboxes = self.question\
.get("extra_options", {})\
.get("cumulative_number_of_checkboxes", 1)
answer_base = checkboxes + 1;
else:
answer_base = self.question["max"] + 1;
# Set the initial bases and raw ballot, populate bases using the valid
# answers list
bases = [2] + len(valid_answers)*[answer_base]
# populate with byte-sized bases for the \0 end for each write-in
if (
"extra_options" in self.question and
"allow_writeins" in self.question["extra_options"] and
self.question["extra_options"]["allow_writeins"] is True
):
write_in_anwsers = [
answer
for answer in sorted_answers
if dict(title='isWriteIn', url='true') in answer.get('urls', [])
]
bases = bases + len(write_in_anwsers)*[256]
return bases
def encode_to_int(self, raw_ballot):
'''
Converts a raw ballot into an encoded number ready to be encrypted.
A raw ballot is a list of positive integer numbers representing
the ballot, and can be obtained calling to `self.encode_raw_ballot()`.
Encoding is done using mixed radix encoding. The bases are
automatically calculated when instancing this object. The bases
used are either the number of points assigned to each answer or the
position in which that answer was selected for preferential
elections. Please refer to mixed radix documentation to understand
how it works or read https://en.wikipedia.org/wiki/Mixed_radix
# Basics
If in a `plurality-at-large` there are three candidates `A`, `B`,
and `C` with answer ids `0`, `1` and `2`, and the voter wants to
vote to candidates `A` and `C`, then his ballot choices (obtained
using encode_raw_ballot) will be `v = [1, 0, 1]` and the encoded
choices will be encoded this way:
```
encoded_choices = v[0] + v[1]*b[0] + v[2]*b[0]*b[1]
encoded_choices = v[0] + b[0]*(v[1] + b[1]*v[2])
encoded_choices = 1 + 2*(0 + 2 * 1) = 1 + 4*1 = 5
```
And the bases are `b = [2, 2, 2]`. The reason the bases are 2 here
is because plurality-at-large is a non-preferential voting system
and each base is representing if the voter chose (then we use
`v[x] = 1`) or not (then we use `v[x] = 0`), and the base is in
this case max(v[x])+1`.
# Preferential systems
In a preferential system, the voter can choose a specific ordering.
If we reuse the previous example, the voter might have chosen for
the first choice in his ballot candidate `A`, and for his second
choice candidate `B`. Not choosing a candidate would be encoded as
value `0`, so choosing it as first position would be value `1` and
so on. If the voter can choose up to 3 candidates, then the base
would be `maxChoices+1 = 3+1 = 4`, and thus bases will be
`b = [4, 4, 4]` and choices would be `v = [1, 0, 2]` and the
encoded choices would be calculated as:
```
encoded_choices = v[0] + v[1]*b[1] + v[2]*b[1]*b[2]
encoded_choices = v[0] + b[0]*(v[1] + b[1]*v[2])
encoded_choices = 1 + 4*(0 + 4*2) = 1 + 16*2 = 33
```
# Invalid Ballot Flag
What was outlined before is the basics, but actually it does not
work exactly like that. The first value (`v[0]`) in the raw ballot
does not really represent the vote for the first candidate answer,
but it's always a flag saying if the ballot was marked as invalid
or not by the voter. Note that this is not the only way to create
an invalid ballot. For example the voter could vote to more options
than allowed, and that would also be an invalid ballot.
We asumes the invalid ballot flag is represented in the question
as a answer inside `question.answers` and it is flagged by having
an element in `answer.urls` as
`{"title":'invalidVoteFlag', "url":'true'}`.
Using the last example of a preferential vote, the bases would not
be `b = [4, 4, 4]` but `b = [2, 4, 4, 4]` (the first base encodes
always the invalid flag, whose max value is 1 so the base is always
2).
The choices would not be `v = [1, 0, 2]` but (if the vote was
not marked as invalid) `v = [0, 1, 0, 2]` and thus the encoded
choices would be calculated as:
```
encoded_choices = v[0] + b[0]*(v[1] + b[1]*(v[2] + b[2]*v[3])
encoded_choices = 0 + 2*(1 + 4*(0 + 4*2)) = 2*1 + 2*4*4*2
encoded_choices = 2*1 + 32*2 = 66
```
# Cumulative voting system
In a cumulative voting system, the voter would have a total number
of integer points to assign to candidates, and the voter can assign
them to the available candidates with a maximum number of options
that can be assigned to each candidate.
For example, the voter might be able to assign up to 2 points to
each candidate and assign a total of 3 points. In practice, the
encoding is done in a very similar format as with preferential
voting system. For each candidate, the value we assign is a number
that represents the points assigned to the candidate, and the base
used is the maximum number of assignable points plus one.
Retaking the previous example used for plurality-at-large and used
for a preferential voting system, if the voter can assign a
maximum of 4 points, and he wants to assign 2 points to candidate
`A` and 2 points to candidate `C` and he didn't mark his ballot
as invalid, then his choices would be `v = [0, 2, 0, 1]`, the bases
would be `b = [2, 5, 5, 5]` and the encoded choices would be
calculated as:
```
encoded_choices = v[0] + b[0]*(v[1] + b[1]*(v[2] + b[2]*v[3])
encoded_choices = 0 + 2*(2 + 5*(0 + 5*1)) = 2*2 + 2*5*5*1
encoded_choices = 2*2 + 50*1 = 54
```
# Write-ins
This encoder supports write-ins. The idea of write-ins is that the
voter can choose candidates that are not in the preconfigured list
of candidates. The maximum number of write-ins allowed is
calculated automatically by suppossing the voter tries to
distribute his vote entirely just for write-in candidates, which
is usually `question.max`.
The vote for each write-in is encoded using the same procedure as
for normal candidates, in order and as if the write-ins were in
the list of candidates. It asumes all write-ins (even if not
selected) are in the list of candidates and they are flagged as
such simply by an element in `answer.urls` as
`{"title":'isWriteIn', "url":'true'}`.
For example in a plurality-at-large question example with three
candidates `A`, `B` and `C` where the voter can choose up to 2
candidates, if the voter wants to cast a valid ballot to his 2
write-ins, then the bases, the choices and the encoded choices
would be:
```
// bases
b = [2, 2, 2, 2, 2, 2]
// choices
v = [0, 0, 0, 0, 1, 1]
encoded_choices = 1*2^4 + 1*2^5 = 48
```
# Write-in names
Of course that's not where a vote with write-ins ends. If the voter
voted to the write-ins, we would also have to encode the free text
string of the name of the write-ins. This is done by converting the
text from UTF-8 to numeric bytes, and encoding each byte using
2^8 = 256 as a base. The separation between the different write-in
names is done using an empty byte (so `v[x] = 0`).
So if in our case the name of the voter's two write-ins is `D` and
`E`, and knowing that character D is encoded as number `68` and E
is `69`, then the bases, the choices and the encoded choices
would be:
```
// bases
b = [2, 2, 2, 2, 2, 2, 256, 256, 256, 256]
// choices
v = [0, 0, 0, 0, 1, 1, 68, 0, 69, 0]
encoded_choices = 1*2^4 + 1*2^5 + 68*2^6 + 69*2^8 = 22064
```
'''
return mixed_radix.encode(
value_list=raw_ballot["choices"],
base_list=raw_ballot["bases"]
)
def decode_from_int(self, int_ballot):
'''
Does exactly the reverse of of encode_from_int. It should be
such as the following statement is always true:
```
data = codec.decode_from_int(
codec.encode_from_int(raw_ballot)
)
```
This function is very useful for sanity checks.
'''
bases = self.get_bases()
len_bases = len(bases)
choices = mixed_radix.decode(
base_list=bases,
encoded_value=int_ballot,
last_base=256
)
# minor changes are required for the write-ins
if (
"extra_options" in self.question and
"allow_writeins" in self.question["extra_options"] and
self.question["extra_options"]["allow_writeins"] is True
):
# make the number of bases equal to the number of choices
index = len(bases) + 1
while index <= len(choices):
bases.append(256)
index += 1
# ensure that for each write-in answer there is a \0 char at the
# end
num_write_in_answers = len([
answer
for answer in self.question["answers"]
if dict(title='isWriteIn', url='true') in answer.get('urls', [])
])
num_write_in_strings = 0
write_ins_text_start_index = len_bases - num_write_in_answers
index2 = write_ins_text_start_index
while index2 < len(choices):
if choices[index2] == 0:
num_write_in_strings += 1
index2 += 1
# add the missing zeros
index3 = 0
while index3 < num_write_in_answers - num_write_in_strings:
bases.append(256)
choices.append(0)
index3 += 1
return dict(
choices=choices,
bases=bases
)
def encode_raw_ballot(self):
'''
Returns the ballot choices and the bases to be used for encoding
as an object, for example something like:
```
dict(
choices=[0, 0, 0, 0, 1, 1, 68, 0, 69, 0],
bases=[ 2, 2, 2, 2, 2, 2, 256, 256, 256, 256]
)
```
Please read the description of the encode function for details on
the output format of the raw ballot.
'''
# sort answers by id
sorted_answers = copy.deepcopy(self.question["answers"])
sorted_answers.sort(key=itemgetter('id'))
# Separate the answers between:
# - Invalid vote answer (if any)
# - Write-ins (if any)
# - Valid answers (normal answers + write-ins if any)
invalid_answers = [
answer
for answer in sorted_answers
if dict(title='invalidVoteFlag', url='true') in answer.get('urls', [])
]
invalid_vote_answer = (
None
if len(invalid_answers) == 0
else invalid_answers[0]
)
invalid_vote_flag = (
1
if (
invalid_vote_answer is not None and
"selected" in invalid_vote_answer and
invalid_vote_answer["selected"] > -1
)
else 0
)
write_in_anwsers = [
answer
for answer in sorted_answers
if dict(title='isWriteIn', url='true') in answer.get('urls', [])
]
valid_answers = [
answer
for answer in sorted_answers
if dict(title='invalidVoteFlag', url='true') not in answer.get('urls', [])
]
# Set the initial bases and raw ballot. We will populate the rest next
bases = self.get_bases()
choices = [invalid_vote_flag]
# populate raw_ballot and bases using the valid answers list
tally_type = self.question["tally_type"]
for answer in valid_answers:
if tally_type == 'plurality-at-large':
# We just flag if the candidate was selected or not with 1 for selected
# and 0 otherwise
answer_value = (
0
if (
"selected" not in answer or
answer["selected"] is None or
answer["selected"] == -1
)
else 1
)
choices.append(answer_value)
else:
# we add 1 because the counting starts with 1, as zero means this
# answer was not voted / ranked
answer_value = (
0
if (
"selected" not in answer or
answer["selected"] is None
)
else answer["selected"] + 1
)
choices.append(answer_value)
# Populate the bases and the raw_ballot values with the write-ins
# if there's any. We will through each write-in (if any), and then
# encode the write-in answer.text string with UTF-8 and use for
# each byte a specific value with base 256 and end each write-in
# with a \0 byte. Note that even write-ins.
if (
"extra_options" in self.question and
"allow_writeins" in self.question["extra_options"] and
self.question["extra_options"]["allow_writeins"] is True
):
for answer in write_in_anwsers:
if "text" not in answer or len(answer["text"]) == 0:
# we don't do a bases.append(256) as this is done in get_bases()
# end it with a zero
choices.append(0)
continue
encoded_text = answer["text"].encode('utf-8')
for text_byte in encoded_text:
bases.append(256)
choices.append(text_byte)
# End it with a zero. we don't do a bases.append(256) as this is done in
# get_bases()
choices.append(0)
return dict(
bases=bases,
choices=choices
)
def decode_raw_ballot(self, raw_ballot):
'''
Does the opposite of `encode_raw_ballot`.
Returns `self.questions` with the data from the raw ballot.
'''
# 1. clone the question and reset the selections
question = copy.deepcopy(self.question)
for answer in question['answers']:
answer['selected'] = -1
# 2. sort & segment answers
# 2.1. sort answers by id
sorted_answers = question["answers"][:]
sorted_answers.sort(key=itemgetter('id'))
# 3. Obtain the invalidVote flag and set it
valid_answers = [
answer
for answer in sorted_answers
if dict(title='invalidVoteFlag', url='true') not in answer.get('urls', [])
]
invalid_answers = [
answer
for answer in sorted_answers
if dict(title='invalidVoteFlag', url='true') in answer.get('urls', [])
]
invalid_vote_answer = (
None
if len(invalid_answers) == 0
else invalid_answers[0]
)
if invalid_vote_answer is not None:
if raw_ballot["choices"][0] > 0:
invalid_vote_answer["selected"] = 0
else:
invalid_vote_answer["selected"] = -1
# 4. Do some verifications on the number of choices:
# Checking that the raw_ballot has as many choices as required
min_num_choices = len(question["answers"])
if len(raw_ballot["choices"]) < min_num_choices:
raise Exception('Invalid Ballot: Not enough choices to decode')
# 5. Obtain the vote for valid answers and populate the selections.
valid_anwsers = [
answer
for answer in sorted_answers
if dict(title='invalidVoteFlag', url='true') not in answer.get('urls', [])
]
# 5.1. Populate the valid answers. We asume they are in the same order as
# in raw_ballot["choices"]
for index, answer in enumerate(valid_answers):
# we add 1 to the index because raw_ballot.choice[0] is just the
# invalidVoteFlag
choice_index = index + 1
answer["selected"] = raw_ballot["choices"][choice_index] - 1
# 6. Filter for the write ins, decode the write-in texts into
# UTF-8 and split by the \0 character, finally the text for the
# write-ins.
if (
"extra_options" in question and
"allow_writeins" in question["extra_options"] and
question["extra_options"]["allow_writeins"] is True
):
write_in_answers = [
answer
for answer in sorted_answers
if dict(title='isWriteIn', url='true') in answer.get('urls', [])
]
# if no write ins, return
if len(write_in_answers) == 0:
return question
# 6.1. Slice the choices to get only the bytes related to the write ins
if invalid_vote_answer is None:
write_ins_start_index = len(question["answers"]) + 1
else:
write_ins_start_index = len(question["answers"])
write_in_raw_bytes = raw_ballot["choices"][write_ins_start_index:]
# 6.2. Split the write-in bytes arrays in multiple sub-arrays
# using byte \0 as a separator.
write_ins_raw_bytes_array = [ [] ]
for index, byte_element in enumerate(write_in_raw_bytes):
if byte_element == 0:
# Start the next write-in byte array, but only if this is
# not the last one
if index != len(write_in_raw_bytes) - 1:
write_ins_raw_bytes_array.append([])
else:
last_index = len(write_ins_raw_bytes_array) - 1
write_ins_raw_bytes_array[last_index].append(byte_element)
if len(write_ins_raw_bytes_array) != len(write_in_answers):
raise Exception(
"Invalid Ballot: invalid number of write-in bytes," +
" len(write_ins_raw_bytes_array) = " + len(write_ins_raw_bytes_array) +
", len(write_in_answers) = " + len(write_in_answers)
)
# 6.3. Decode each write-in byte array
write_in_decoded = [
bytes(write_in_encoded_utf8).decode('utf-8')
for write_in_encoded_utf8 in write_ins_raw_bytes_array
]
# 6.4. Assign the write-in name for each write in
for index, write_in_answer in enumerate(write_in_answers):
write_in_answer["text"] = write_in_decoded[index]
else:
# if there are no write-ins, we will check that there are no more choices
# set after the choice for the last answer, as they would not mean
# anything and thus it would be an invalid ballot, but one of a different
# type that just marking the ballot invalid or marking more/less options
# than required. It would be gibberish without any meaning, so we raise
# an exception on that use-case.
if len(valid_answers) +1 != len(raw_ballot["choices"]):
raise Exception(
"Invalid Ballot: invalid number of choices," +
" len(raw_ballot[\"choices\"]) = " + len(raw_ballot["choices"]) +
", len(valid_answers) + 1 = " + (len(valid_answers) + 1)
)
return question
def sanity_check(self):
'''
Sanity check with a specific manual example, to see that encoding
and decoding works as expected.
Returns True if the test checks out or False otherwise.
'''
try:
data = dict(
question=dict(
tally_type="plurality-at-large",
max=3,
extra_options=dict(allow_writeins=True),
answers=[
dict(id=0),
dict(id=1),
dict(id=2),
dict(
id=3,
urls=[dict(title='invalidVoteFlag', url='true')]
),
dict(
id=4,
urls=[dict(title='isWriteIn', url='true')]
),
dict(
id=5,
urls=[dict(title='isWriteIn', url='true')]
),
dict(
id=6,
urls=[dict(title='isWriteIn', url='true')]
)
]
),
ballot=dict(
tally_type="plurality-at-large",
max=3,
extra_options=dict(allow_writeins=True),
answers=[
dict(id=0, selected=0 ),
dict(id=1, selected=-1),
dict(id=2, selected=-1),
dict(
id=3,
selected=-1,
urls=[dict(title='invalidVoteFlag', url='true')]
),
dict(
id=4,
text='E',
selected=0,
urls=[dict(title='isWriteIn', url='true')]
),
dict(
id=5,
selected=-1,
text='',
urls=[dict(title='isWriteIn', url='true')]
),
dict(
id=6,
selected=0,
text='Ä bc',
urls=[dict(title='isWriteIn', url='true')]
)
]
),
raw_ballot=dict(
bases= [2, 2, 2, 2, 2, 2, 2, 256, 256, 256, 256, 256, 256, 256, 256, 256],
choices= [0, 1, 0, 0, 1, 0, 1, 69, 0, 0, 195, 132, 32, 98, 99, 0]
),
int_ballot=916649230342635397842
)
# 1. encode from ballot to raw_ballot and test it
encoder = NVotesCodec(data["ballot"])
raw_ballot = encoder.encode_raw_ballot()
if serialize(raw_ballot) != serialize(data["raw_ballot"]):
raise Exception("Sanity Check fail")
# 2. encode from raw_ballot to BigInt and test it
int_ballot = encoder.encode_to_int(raw_ballot)
if serialize(int_ballot) != serialize(data["int_ballot"]):
raise Exception("Sanity Check fail")
# 3. create a pristine encoder using the question without any selection
# set, and decode from BigInt to raw_ballot and test it
decoder = NVotesCodec(data["question"])
decoded_raw_ballot = decoder.decode_from_int(data["int_ballot"])
if serialize(decoded_raw_ballot) != serialize(data["raw_ballot"]):
raise Exception("Sanity Check fail")
# 4. decode from raw ballot to ballot and test it
decoded_ballot = decoder.decode_raw_ballot(decoded_raw_ballot)
if serialize(decoded_ballot) != serialize(data["ballot"]):
import pdb; pdb.set_trace()
raise Exception("Sanity Check fail")
except Exception as e:
raise e
# return False
return True
def biggest_encodable_normal_ballot(self):
'''
Returns the biggest encodable ballot that doesn't include any
write-in text (or they are empty strings) encoded as a big int
voting to non-write-ins.
Used to know if the ballot would overflow, for example during
election creation, because it contains too many options.
'''
bases = self.get_bases()
# calculate the biggest number that can be encoded with the
# minumum number of bases, which should be bigger than modulus
highest_value_list = [base-1 for base in bases]
highest_encoded_ballot = mixed_radix.encode(
value_list=highest_value_list,
base_list=bases
)
return highest_encoded_ballot
def num_write_in_bytes_left(self, modulus):
'''
Returns the numbers of ASCII characters left to encode a number
not bigger than the BigInt modulus given as input.
'''
# The calculations here do not make sense when there are no write-ins
if (
"extra_options" not in self.question or
"allow_writeins" not in self.question["extra_options"] or
self.question["extra_options"]["allow_writeins"] is False
):
raise Exception("Contest does not have write-ins")
# Sanity check: modulus needs to be bigger than the biggest
# encodable normal ballot
bases = self.get_bases()
highest_int = self.biggest_encodable_normal_ballot()
if modulus - highest_int < 1:
raise Exception("modulus too small")
# If we decode the modulus minus one, the value will be the highest
# encodable number plus one, given the set of bases for this
# question and using 256 as the lastBase.
# However, as it overflows the maximum the maximum encodable
# number, the last byte (last base) is unusable and it should be
# discarded. That is why maxBaseLength is obtained by using
# decodedModulus.length - 1
decoded_modulus = mixed_radix.decode(
base_list=bases,
encoded_value=(modulus - 1),
last_base=256
)
encoded_raw_ballot = self.encode_raw_ballot()
max_len = len(decoded_modulus) - 1
# As we know that the modulus is big enough for a ballot with no
# write-ins and because we know all extra bases will be bytes,
# the difference between the number of bases used for encoding the
# ballot and the number of bases used to encode the modulus is the
# number of byte bases left
return max_len - len(encoded_raw_ballot["bases"])
class TestNVotesCodec(unittest.TestCase):
def test_bases(self):
# The question contains the minimum data required for the encoder to work
data_list = [
dict(
question=dict(
tally_type="plurality-at-large",
answers=[
dict(id=0),
dict(id=1,selected=0),
dict(id=2),
dict(id=3),
dict(id=4),
dict(id=5, selected=1),
dict(id=6)
]
),
bases=[2, 2, 2, 2, 2, 2, 2, 2]
),
dict(
question=dict(
tally_type="plurality-at-large",
answers=[
dict(id=0),
]
),
bases=[2, 2]
),
dict(
question=dict(
tally_type="borda",
max=1,
answers=[
dict(id=0),
]
),
bases=[2, 2]
),
dict(
question=dict(
tally_type="borda",
max=2,
answers=[
dict(id=0),
dict(id=1),
dict(id=2)
]
),
bases=[2, 3, 3, 3]
),
]
for data in data_list:
codec = NVotesCodec(data["question"])
self.assertEqual(codec.get_bases(), data["bases"])
def test_encode_raw_ballot(self):
# The question contains the minimum data required for the encoder to work
data_list = [
dict(
question=dict(
tally_type="plurality-at-large",
answers=[
dict(id=0),
dict(id=1, selected=0),
dict(id=2),
dict(id=3),
dict(id=4),
dict(id=5, selected=1),
dict(id=6)
]
),
bases= [2, 2, 2, 2, 2, 2, 2, 2],
choices=[0, 0, 1, 0, 0, 0, 1, 0]
),
dict(
question=dict(
tally_type="plurality-at-large",
answers=[
dict(id=0,selected=0),
dict(id=1,selected=0),
dict(id=2),
dict(id=3),
dict(id=4),
dict(id=5, selected=0),
dict(id=6)
]
),
bases= [2, 2, 2, 2, 2, 2, 2, 2],
choices=[0, 1, 1, 0, 0, 0, 1, 0]
),
dict(
question=dict(
tally_type="borda",
max=3,
answers=[
dict(id=0,selected=0),
dict(id=1,selected=2),
dict(id=2),
dict(id=3),
dict(id=4),
dict(id=5, selected=1),
dict(id=6)
]
),
bases= [2, 4, 4, 4, 4, 4, 4, 4],
choices=[0, 1, 3, 0, 0, 0, 2, 0]
),
dict(
question=dict(
tally_type="plurality-at-large",
answers=[
dict(id=0,selected=1),
dict(id=1),
dict(
id=2,
selected=1,
urls=[dict(title='invalidVoteFlag', url='true')]
)
]
),
bases= [2, 2, 2],
choices=[1, 1, 0]
),
dict(
question=dict(
tally_type="borda",
max=2,
extra_options=dict(allow_writeins=True),
answers=[
dict(id=0, selected=0),
dict(id=1),
dict(id=2),
dict(
id=3,
selected=0,
urls=[dict(title='invalidVoteFlag', url='true')]
),
dict(
id=4,
text='D',
selected=1,
urls=[dict(title='isWriteIn', url='true')]
),
dict(
id=5,
text='',
urls=[dict(title='isWriteIn', url='true')]
)
]
),
bases= [2, 3, 3, 3, 3, 3, 256, 256, 256],
choices= [1, 1, 0, 0, 2, 0, 68, 0, 0]
),
dict(
question=dict(
tally_type="plurality-at-large",
extra_options=dict(allow_writeins=True),
max=3,
answers=[
dict(id=0, selected=1),
dict(id=1),
dict(id=2),
dict(
id=3,
urls=[dict(title='invalidVoteFlag', url='true')]
),
dict(
id=4,
text='E',
selected=1,
urls=[dict(title='isWriteIn', url='true')]
),
dict(
id=5,
text='',
urls=[dict(title='isWriteIn', url='true')]
),
dict(
id=6,
selected=1,
text='Ä bc',
urls=[dict(title='isWriteIn', url='true')]
)
]
),
bases= [2, 2, 2, 2, 2, 2, 2, 256, 256, 256, 256, 256, 256, 256, 256, 256],
choices= [0, 1, 0, 0, 1, 0, 1, 69, 0, 0, 195, 132, 32, 98, 99, 0]
),
]
for data in data_list:
codec = NVotesCodec(data["question"])
self.assertTrue(codec.sanity_check())
# check raw ballot getter
raw_ballot = codec.encode_raw_ballot()
self.assertEqual(
raw_ballot,
dict(
bases=data['bases'],
choices=data['choices']
)
)
def test_decode_raw_ballot(self):
# The question contains the minimum data required for the encoder to work
data_list = [
dict(
question=dict(
tally_type="plurality-at-large",
answers=[
dict(id=0),
dict(id=1),
dict(id=2),
dict(id=3),
dict(id=4),
dict(id=5),
dict(id=6)
]
),
decoded_ballot=dict(
tally_type="plurality-at-large",
answers=[
dict(id=0, selected=-1),
dict(id=1, selected=0 ),
dict(id=2, selected=-1),
dict(id=3, selected=-1),
dict(id=4, selected=-1),
dict(id=5, selected=0 ),
dict(id=6, selected=-1)
]
),
bases= [2, 2, 2, 2, 2, 2, 2, 2],
choices=[0, 0, 1, 0, 0, 0, 1, 0]
),
dict(
question=dict(
tally_type="plurality-at-large",
answers=[
dict(id=0),
dict(id=1),
dict(id=2),
dict(id=3),
dict(id=4),
dict(id=5),
dict(id=6)
]
),
decoded_ballot=dict(
tally_type="plurality-at-large",
answers=[
dict(id=0, selected=0 ),
dict(id=1, selected=0 ),
dict(id=2, selected=-1),
dict(id=3, selected=-1),
dict(id=4, selected=-1),
dict(id=5, selected=0 ),
dict(id=6, selected=-1)
]
),
bases= [2, 2, 2, 2, 2, 2, 2, 2],
choices=[0, 1, 1, 0, 0, 0, 1, 0]
),
dict(
question=dict(
tally_type="borda",
answers=[
dict(id=0),
dict(id=1),
dict(id=2),
dict(id=3),
dict(id=4),
dict(id=5),
dict(id=6)
]
),
decoded_ballot=dict(
tally_type="borda",
answers=[
dict(id=0, selected=0 ),
dict(id=1, selected=2 ),
dict(id=2, selected=-1),
dict(id=3, selected=-1),
dict(id=4, selected=-1),
dict(id=5, selected=1 ),
dict(id=6, selected=-1)
]
),
bases= [2, 4, 4, 4, 4, 4, 4, 4],
choices=[0, 1, 3, 0, 0, 0, 2, 0]
),
dict(
question=dict(
tally_type="plurality-at-large",
answers=[
dict(id=0),
dict(id=1),
dict(
id=2,
selected=1,
urls=[dict(title='invalidVoteFlag', url='true')]
)
]
),
decoded_ballot=dict(
tally_type="plurality-at-large",
answers=[
dict(id=0, selected=0 ),
dict(id=1, selected=-1),
dict(
id=2,
selected=0,
urls=[dict(title='invalidVoteFlag', url='true')]
)
]
),
bases= [2, 2, 2],
choices=[1, 1, 0]
),
dict(
question=dict(
tally_type="borda",
max=2,
extra_options=dict(allow_writeins=True),
answers=[
dict(id=0),
dict(id=1),
dict(id=2),
dict(
id=3,
urls=[dict(title='invalidVoteFlag', url='true')]
),
dict(
id=4,
urls=[dict(title='isWriteIn', url='true')]
),
dict(
id=5,
urls=[dict(title='isWriteIn', url='true')]
)
]
),
decoded_ballot=dict(
tally_type="borda",
max=2,
extra_options=dict(allow_writeins=True),
answers=[
dict(id=0, selected=0 ),
dict(id=1, selected=-1),
dict(id=2, selected=-1),
dict(
id=3,
selected=0,
urls=[dict(title='invalidVoteFlag', url='true')]
),
dict(
id=4,
text='D',
selected=1,
urls=[dict(title='isWriteIn', url='true')]
),
dict(
id=5,
text='',
selected=-1,
urls=[dict(title='isWriteIn', url='true')]
)
]
),
bases= [2, 3, 3, 3, 3, 3, 256, 256, 256],
choices=[1, 1, 0, 0, 2, 0, 68, 0, 0]
),
dict(
question=dict(
tally_type="plurality-at-large",
extra_options=dict(allow_writeins=True),
max=3,
answers=[
dict(id=0),
dict(id=1),
dict(id=2),
dict(
id=3,
urls=[dict(title='invalidVoteFlag', url='true')]
),
dict(
id=4,
text='E',
selected=1,
urls=[dict(title='isWriteIn', url='true')]
),
dict(
id=5,
text='',
urls=[dict(title='isWriteIn', url='true')]
),
dict(
id=6,
text='Ä bc',
urls=[dict(title='isWriteIn', url='true')]
)
]
),
decoded_ballot=dict(
tally_type="plurality-at-large",
extra_options=dict(allow_writeins=True),
max=3,
answers=[
dict(id=0, selected=0 ),
dict(id=1, selected=-1),
dict(id=2, selected=-1),
dict(
id=3,
urls=[dict(title='invalidVoteFlag', url='true')]
),
dict(
id=4,
text='E',
selected=0,
urls=[dict(title='isWriteIn', url='true')]
),
dict(
id=5,
text='',
selected=-1,
urls=[dict(title='isWriteIn', url='true')]
),
dict(
id=6,
selected=0,
text='Ä bc',
urls=[dict(title='isWriteIn', url='true')]
)
]
),
bases= [2, 2, 2, 2, 2, 2, 2, 256, 256, 256, 256, 256, 256, 256, 256, 256],
choices=[0, 1, 0, 0, 1, 0, 1, 69, 0, 0, 195, 132, 32, 98, 99, 0]
),
]
for data in data_list:
codec = NVotesCodec(data["question"])
self.assertTrue(codec.sanity_check())
# check raw ballot getter
decoded_ballot = codec.decode_raw_ballot(dict(
bases=data['bases'],
choices=data['choices']
))
self.assertEqual(
decoded_ballot,
data['decoded_ballot']
)
def test_decode_raw_ballot(self):
# The question contains the minimum data required for the encoder to work
data_list = [
dict(
question=dict(
tally_type="plurality-at-large",
extra_options=dict(allow_writeins=True),
max=3,
answers=[
dict(id=0),
dict(id=1),
dict(id=2),
dict(
id=3,
urls=[dict(title='invalidVoteFlag', url='true')]
),
dict(
id=4,
urls=[dict(title='isWriteIn', url='true')]
),
dict(
id=5,
urls=[dict(title='isWriteIn', url='true')]
),
dict(
id=6,
urls=[dict(title='isWriteIn', url='true')]
)
]
),
ballot=dict(
tally_type="plurality-at-large",
extra_options=dict(allow_writeins=True),
max=3,
answers=[
dict(id=0, selected=0 ),
dict(id=1, selected=-1),
dict(id=2, selected=-1),
dict(
id=3,
selected=-1,
urls=[dict(title='invalidVoteFlag', url='true')]
),
dict(
id=4,
text='E',
selected=0,
urls=[dict(title='isWriteIn', url='true')]
),
dict(
id=5,
text='',
selected=-1,
urls=[dict(title='isWriteIn', url='true')]
),
dict(
id=6,
selected=0,
text='Ä bc',
urls=[dict(title='isWriteIn', url='true')]
)
]
),
raw_ballot=dict(
bases= [2, 2, 2, 2, 2, 2, 2, 256, 256, 256, 256, 256, 256, 256, 256, 256],
choices=[0, 1, 0, 0, 1, 0, 1, 69, 0, 0, 195, 132, 32, 98, 99, 0]
),
int_ballot=916649230342635397842
),
]
for data in data_list:
# 1. encode from ballot to rawBallot and test it
encoder = NVotesCodec(data["ballot"])
self.assertTrue(encoder.sanity_check())
raw_ballot = encoder.encode_raw_ballot()
self.assertEqual(raw_ballot, data["raw_ballot"])
# 2. encode from raw_ballot to BigInt and test it
int_ballot = encoder.encode_to_int(raw_ballot)
self.assertEqual(int_ballot, data["int_ballot"])
# 3. create a pristine encoder using the question without any selection
# set, and decode from BigInt to raw_ballot and test it
decoder = NVotesCodec(data["question"])
self.assertTrue(decoder.sanity_check())
decoded_raw_ballot = decoder.decode_from_int(data["int_ballot"])
self.assertEqual(decoded_raw_ballot, data["raw_ballot"])
# 4. decode from raw ballot to ballot and test it
decoded_ballot = decoder.decode_raw_ballot(decoded_raw_ballot)
self.assertEqual(decoded_ballot, data["ballot"])
def test_biggest_encodable_ballot(self):
data_list = [
dict(
question=dict(
tally_type="plurality-at-large",
answers=[
dict(id=0),
dict(id=1)
]
),
expected_value=7
),
dict(
question=dict(
tally_type="borda",
max=3,
answers=[
dict(id=0),
dict(id=1),
dict(id=2)
]
),
expected_value=(1 + 3*2 + 3*2*4 + 3*2*4*4) # 127
),
dict(
question=dict(
tally_type="plurality-at-large",
max=3,
extra_options=dict(allow_writeins=True),
answers=[
dict(id=0),
dict(id=1),
dict(id=2),
dict(
id=3,
urls=[dict(title='invalidVoteFlag', url='true')]
),
dict(
id=4,
urls=[dict(title='isWriteIn', url='true')]
),
dict(
id=5,
urls=[dict(title='isWriteIn', url='true')]
),
dict(
id=6,
urls=[dict(title='isWriteIn', url='true')]
)
]
),
# highest_value_list = [1, 1, 1, 1, 1, 1, 1, 255, 255, 255]
# bases = [2, 2, 2, 2, 2, 2, 2, 256, 256, 256]
# expected_value = (1 + 1*2 + 2**2 + 2**3 + 2**4 + 2**5 + 2**6 + 255*(2**7) + 255*(2**7)*256 + 255*(2**7)*(256**2)) = 2147483647
expected_value=2147483647
)
]
for data in data_list:
codec = NVotesCodec(data["question"])
self.assertTrue(codec.sanity_check())
# check the number of bytes left
self.assertEqual(
codec.biggest_encodable_normal_ballot(),
data['expected_value']
)
def test_num_write_in_bytes_left(self):
data_list = [
dict(
question=dict(
tally_type='plurality-at-large',
answers=[
dict(id=0),
dict(id=1)
]
),
modulus=111,
bytes_left='throws'
),
dict(
question=dict(
tally_type='plurality-at-large',
max=1,
extra_options=dict(allow_writeins=True),
answers=[
dict(id=0),
dict(id=1),
dict(
id=2,
urls=[dict(title='isWriteIn', url='true')]
)
]
),
# bases = [2, 2, 2, 2, 256]
# biggest normal ballot = [1, 1, 1, 1, 255]
# minimum encoded modulus for one byte free:
# modulus = dict(
# bases=[2, 2, 2, 2, 256, 256, 256]
# value=[0, 0, 0, 0, 0, 0, 1 ]
modulus=(1*2*2*2*2*256*256), # 1048576
bytes_left=0
),
dict(
question=dict(
tally_type='plurality-at-large',
max=1,
extra_options=dict(allow_writeins=True),
answers=[
dict(id=0),
dict(id=1),
dict(
id=2,
urls=[dict(title='isWriteIn', url='true')]
)
]
),
# bases = [2, 2, 2, 2, 256]
# biggest normal ballot = [1, 1, 1, 1, 255]
# minimum encoded modulus for one byte free:
# modulus = dict(
# bases=[2, 2, 2, 2, 256, 256, 256]
# value=[0, 0, 0, 0, 0, 0, 1 ]
modulus=(1*2*2*2*2*256*256+1), # 1048577
bytes_left=1
),
dict(
question=dict(
tally_type='plurality-at-large',
max=1,
extra_options=dict(allow_writeins=True),
answers=[
dict(id=0),
dict(id=1),
dict(
id=2,
urls=[dict(title='isWriteIn', url='true')]
)
]
),
# bases = [2, 2, 2, 2, 256]
# biggest normal ballot = [1, 1, 1, 1, 255]
# minimum encoded modulus for 2 bytes free:
# modulus = dict(
# bases=[2, 2, 2, 2, 256, 256, 256, 256]
# value=[0, 0, 0, 0, 0, 0, 0, 1 ]
modulus=(1*2*2*2*2*256*256*256), # 268435456
bytes_left=1
),
dict(
question=dict(
tally_type='plurality-at-large',
max=1,
extra_options=dict(allow_writeins=True),
answers=[
dict(id=0),
dict(id=1),
dict(
id=2,
urls=[dict(title='isWriteIn', url='true')]
)
]
),
# bases = [2, 2, 2, 2, 256]
# biggest normal ballot = [1, 1, 1, 1, 255]
# minimum encoded modulus for 2 bytes free:
# modulus = {
# bases=[2, 2, 2, 2, 256, 256, 256, 256]
# value=[0, 0, 0, 0, 0, 0, 0, 1 ]
modulus=(1*2*2*2*2*256*256*256+1), # 268435457
bytes_left=2
),
]
for data in data_list:
codec = NVotesCodec(data["question"])
self.assertTrue(codec.sanity_check())
# check the number of bytes left
if data["bytes_left"] == 'throws':
with self.assertRaises(Exception):
codec.num_write_in_bytes_left(data["modulus"])
else:
self.assertEqual(
codec.num_write_in_bytes_left(data["modulus"]),
data["bytes_left"]
)
| agpl-3.0 | -8,964,742,366,602,631,000 | 30.878912 | 141 | 0.524092 | false |
octaflop/artofpython | art/turtle/hilbert.py | 1 | 1024 | # -*- coding: utf-8 -*-
import turtle as t
iteration = 8
length = 5
def left_hilbert(length, width):
if length == 0:
return
t.right(90)
right_hilbert(length - 1, width)
t.forward(width)
t.left(90)
left_hilbert(length - 1, width)
t.forward(width)
left_hilbert(length - 1, width)
t.left(90)
t.forward(width)
right_hilbert(length - 1, width)
t.right(90)
def right_hilbert(length, width):
if length == 0:
return
t.left(90)
left_hilbert(length - 1, width)
t.forward(width)
t.right(90)
right_hilbert(length - 1, width)
t.forward(width)
right_hilbert(length - 1, width)
t.right(90)
t.forward(width)
left_hilbert(length - 1, width)
t.left(90)
if __name__ == '__main__':
# setup
# t.hideturtle()
t.speed(0)
# t.up()
# t.setpos([-800, 0])
# t.setup(width=800, height=800)
t.title("hilbert")
# draw
t.down()
left_hilbert(iteration, length)
# bye!
t.done()
t.bye()
| mit | 3,001,966,034,552,017,000 | 18.320755 | 36 | 0.56543 | false |
shivkantranade/geointegration | geointegration/settings.py | 1 | 2145 | """
Django settings for skr_webapp project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5sc0+rt47lwz_&us6=_rx)4i=tep$4*&61nyu24-$9l4vx69%w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'engage',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'geointegration.urls'
WSGI_APPLICATION = 'geointegration.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'US/Pacific'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
) | unlicense | -6,588,610,857,729,066,000 | 23.386364 | 71 | 0.723077 | false |
cjayb/mne-python | mne/io/tests/test_raw.py | 2 | 15935 | # -*- coding: utf-8 -*-
"""Generic tests that all raw classes should run."""
# Authors: MNE Developers
# Stefan Appelhoff <[email protected]>
#
# License: BSD (3-clause)
from os import path as op
import math
import re
import pytest
import numpy as np
from numpy.testing import (assert_allclose, assert_array_almost_equal,
assert_array_equal)
from mne import concatenate_raws, create_info, Annotations
from mne.datasets import testing
from mne.externals.h5io import read_hdf5, write_hdf5
from mne.io import read_raw_fif, RawArray, BaseRaw, Info, _writing_info_hdf5
from mne.utils import (_TempDir, catch_logging, _raw_annot, _stamp_to_dt,
object_diff, check_version)
from mne.io.meas_info import _get_valid_units
from mne.io._digitization import DigPoint
def assert_named_constants(info):
"""Assert that info['chs'] has named constants."""
# for now we just check one
__tracebackhide__ = True
r = repr(info['chs'][0])
for check in ('.*FIFFV_COORD_.*', '.*FIFFV_COIL_.*', '.*FIFF_UNIT_.*',
'.*FIFF_UNITM_.*',):
assert re.match(check, r, re.DOTALL) is not None, (check, r)
def test_orig_units():
"""Test the error handling for original units."""
# Should work fine
info = create_info(ch_names=['Cz'], sfreq=100, ch_types='eeg')
BaseRaw(info, last_samps=[1], orig_units={'Cz': 'nV'})
# Should complain that channel Cz does not have a corresponding original
# unit.
with pytest.raises(ValueError, match='has no associated original unit.'):
info = create_info(ch_names=['Cz'], sfreq=100, ch_types='eeg')
BaseRaw(info, last_samps=[1], orig_units={'not_Cz': 'nV'})
# Test that a non-dict orig_units argument raises a ValueError
with pytest.raises(ValueError, match='orig_units must be of type dict'):
info = create_info(ch_names=['Cz'], sfreq=100, ch_types='eeg')
BaseRaw(info, last_samps=[1], orig_units=True)
def _test_raw_reader(reader, test_preloading=True, test_kwargs=True,
boundary_decimal=2, **kwargs):
"""Test reading, writing and slicing of raw classes.
Parameters
----------
reader : function
Function to test.
test_preloading : bool
Whether not preloading is implemented for the reader. If True, both
cases and memory mapping to file are tested.
test_kwargs : dict
Test _init_kwargs support.
boundary_decimal : int
Number of decimals up to which the boundary should match.
**kwargs :
Arguments for the reader. Note: Do not use preload as kwarg.
Use ``test_preloading`` instead.
Returns
-------
raw : instance of Raw
A preloaded Raw object.
"""
tempdir = _TempDir()
rng = np.random.RandomState(0)
montage = None
if "montage" in kwargs:
montage = kwargs['montage']
del kwargs['montage']
if test_preloading:
raw = reader(preload=True, **kwargs)
rep = repr(raw)
assert rep.count('<') == 1
assert rep.count('>') == 1
if montage is not None:
raw.set_montage(montage)
# don't assume the first is preloaded
buffer_fname = op.join(tempdir, 'buffer')
picks = rng.permutation(np.arange(len(raw.ch_names) - 1))[:10]
picks = np.append(picks, len(raw.ch_names) - 1) # test trigger channel
bnd = min(int(round(raw.buffer_size_sec *
raw.info['sfreq'])), raw.n_times)
slices = [slice(0, bnd), slice(bnd - 1, bnd), slice(3, bnd),
slice(3, 300), slice(None), slice(1, bnd)]
if raw.n_times >= 2 * bnd: # at least two complete blocks
slices += [slice(bnd, 2 * bnd), slice(bnd, bnd + 1),
slice(0, bnd + 100)]
other_raws = [reader(preload=buffer_fname, **kwargs),
reader(preload=False, **kwargs)]
for sl_time in slices:
data1, times1 = raw[picks, sl_time]
for other_raw in other_raws:
data2, times2 = other_raw[picks, sl_time]
assert_allclose(data1, data2)
assert_allclose(times1, times2)
else:
raw = reader(**kwargs)
assert_named_constants(raw.info)
full_data = raw._data
assert raw.__class__.__name__ in repr(raw) # to test repr
assert raw.info.__class__.__name__ in repr(raw.info)
assert isinstance(raw.info['dig'], (type(None), list))
data_max = full_data.max()
data_min = full_data.min()
# these limits could be relaxed if we actually find data with
# huge values (in SI units)
assert data_max < 1e5
assert data_min > -1e5
if isinstance(raw.info['dig'], list):
for di, d in enumerate(raw.info['dig']):
assert isinstance(d, DigPoint), (di, d)
# gh-5604
meas_date = raw.info['meas_date']
assert meas_date is None or meas_date >= _stamp_to_dt((0, 0))
# test resetting raw
if test_kwargs:
raw2 = reader(**raw._init_kwargs)
assert set(raw.info.keys()) == set(raw2.info.keys())
assert_array_equal(raw.times, raw2.times)
# Test saving and reading
out_fname = op.join(tempdir, 'test_raw.fif')
raw = concatenate_raws([raw])
raw.save(out_fname, tmax=raw.times[-1], overwrite=True, buffer_size_sec=1)
raw3 = read_raw_fif(out_fname)
assert_named_constants(raw3.info)
assert set(raw.info.keys()) == set(raw3.info.keys())
assert_allclose(raw3[0:20][0], full_data[0:20], rtol=1e-6,
atol=1e-20) # atol is very small but > 0
assert_array_almost_equal(raw.times, raw3.times)
assert not math.isnan(raw3.info['highpass'])
assert not math.isnan(raw3.info['lowpass'])
assert not math.isnan(raw.info['highpass'])
assert not math.isnan(raw.info['lowpass'])
assert raw3.info['kit_system_id'] == raw.info['kit_system_id']
# Make sure concatenation works
first_samp = raw.first_samp
last_samp = raw.last_samp
concat_raw = concatenate_raws([raw.copy(), raw])
assert concat_raw.n_times == 2 * raw.n_times
assert concat_raw.first_samp == first_samp
assert concat_raw.last_samp - last_samp + first_samp == last_samp + 1
idx = np.where(concat_raw.annotations.description == 'BAD boundary')[0]
expected_bad_boundary_onset = raw._last_time
assert_array_almost_equal(concat_raw.annotations.onset[idx],
expected_bad_boundary_onset,
decimal=boundary_decimal)
if raw.info['meas_id'] is not None:
for key in ['secs', 'usecs', 'version']:
assert raw.info['meas_id'][key] == raw3.info['meas_id'][key]
assert_array_equal(raw.info['meas_id']['machid'],
raw3.info['meas_id']['machid'])
assert isinstance(raw.annotations, Annotations)
# Make a "soft" test on units: They have to be valid SI units as in
# mne.io.meas_info.valid_units, but we accept any lower/upper case for now.
valid_units = _get_valid_units()
valid_units_lower = [unit.lower() for unit in valid_units]
if raw._orig_units is not None:
assert isinstance(raw._orig_units, dict)
for ch_name, unit in raw._orig_units.items():
assert unit.lower() in valid_units_lower, ch_name
# Test picking with and without preload
if test_preloading:
preload_kwargs = (dict(preload=True), dict(preload=False))
else:
preload_kwargs = (dict(),)
n_ch = len(raw.ch_names)
picks = rng.permutation(n_ch)
for preload_kwarg in preload_kwargs:
these_kwargs = kwargs.copy()
these_kwargs.update(preload_kwarg)
# don't use the same filename or it could create problems
if isinstance(these_kwargs.get('preload', None), str) and \
op.isfile(these_kwargs['preload']):
these_kwargs['preload'] += '-1'
whole_raw = reader(**these_kwargs)
print(whole_raw) # __repr__
assert n_ch >= 2
picks_1 = picks[:n_ch // 2]
picks_2 = picks[n_ch // 2:]
raw_1 = whole_raw.copy().pick(picks_1)
raw_2 = whole_raw.copy().pick(picks_2)
data, times = whole_raw[:]
data_1, times_1 = raw_1[:]
data_2, times_2 = raw_2[:]
assert_array_equal(times, times_1)
assert_array_equal(data[picks_1], data_1)
assert_array_equal(times, times_2,)
assert_array_equal(data[picks_2], data_2)
# Make sure that writing info to h5 format
# (all fields should be compatible)
if check_version('h5py'):
fname_h5 = op.join(tempdir, 'info.h5')
with _writing_info_hdf5(raw.info):
write_hdf5(fname_h5, raw.info)
new_info = Info(read_hdf5(fname_h5))
assert object_diff(new_info, raw.info) == ''
return raw
def _test_concat(reader, *args):
"""Test concatenation of raw classes that allow not preloading."""
data = None
for preload in (True, False):
raw1 = reader(*args, preload=preload)
raw2 = reader(*args, preload=preload)
raw1.append(raw2)
raw1.load_data()
if data is None:
data = raw1[:, :][0]
assert_allclose(data, raw1[:, :][0])
for first_preload in (True, False):
raw = reader(*args, preload=first_preload)
data = raw[:, :][0]
for preloads in ((True, True), (True, False), (False, False)):
for last_preload in (True, False):
t_crops = raw.times[np.argmin(np.abs(raw.times - 0.5)) +
[0, 1]]
raw1 = raw.copy().crop(0, t_crops[0])
if preloads[0]:
raw1.load_data()
raw2 = raw.copy().crop(t_crops[1], None)
if preloads[1]:
raw2.load_data()
raw1.append(raw2)
if last_preload:
raw1.load_data()
assert_allclose(data, raw1[:, :][0])
@testing.requires_testing_data
def test_time_as_index():
"""Test indexing of raw times."""
raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test_raw.fif')
raw = read_raw_fif(raw_fname)
# Test original (non-rounding) indexing behavior
orig_inds = raw.time_as_index(raw.times)
assert(len(set(orig_inds)) != len(orig_inds))
# Test new (rounding) indexing behavior
new_inds = raw.time_as_index(raw.times, use_rounding=True)
assert_array_equal(new_inds, np.arange(len(raw.times)))
@pytest.mark.parametrize('offset, origin', [
pytest.param(0, None, id='times in s. relative to first_samp (default)'),
pytest.param(0, 2.0, id='times in s. relative to first_samp'),
pytest.param(1, 1.0, id='times in s. relative to meas_date'),
pytest.param(2, 0.0, id='absolute times in s. relative to 0')])
def test_time_as_index_ref(offset, origin):
"""Test indexing of raw times."""
info = create_info(ch_names=10, sfreq=10.)
raw = RawArray(data=np.empty((10, 10)), info=info, first_samp=10)
raw.set_meas_date(1)
relative_times = raw.times
inds = raw.time_as_index(relative_times + offset,
use_rounding=True,
origin=origin)
assert_array_equal(inds, np.arange(raw.n_times))
def test_meas_date_orig_time():
"""Test the relation between meas_time in orig_time."""
# meas_time is set and orig_time is set:
# clips the annotations based on raw.data and resets the annotation based
# on raw.info['meas_date]
raw = _raw_annot(1, 1.5)
assert raw.annotations.orig_time == _stamp_to_dt((1, 0))
assert raw.annotations.onset[0] == 1
# meas_time is set and orig_time is None:
# Consider annot.orig_time to be raw.frist_sample, clip and reset
# annotations to have the raw.annotations.orig_time == raw.info['meas_date]
raw = _raw_annot(1, None)
assert raw.annotations.orig_time == _stamp_to_dt((1, 0))
assert raw.annotations.onset[0] == 1.5
# meas_time is None and orig_time is set:
# Raise error, it makes no sense to have an annotations object that we know
# when was acquired and set it to a raw object that does not know when was
# it acquired.
with pytest.raises(RuntimeError, match='Ambiguous operation'):
_raw_annot(None, 1.5)
# meas_time is None and orig_time is None:
# Consider annot.orig_time to be raw.first_sample and clip
raw = _raw_annot(None, None)
assert raw.annotations.orig_time is None
assert raw.annotations.onset[0] == 1.5
assert raw.annotations.duration[0] == 0.2
def test_get_data_reject():
"""Test if reject_by_annotation is working correctly."""
fs = 256
ch_names = ["C3", "Cz", "C4"]
info = create_info(ch_names, sfreq=fs)
raw = RawArray(np.zeros((len(ch_names), 10 * fs)), info)
raw.set_annotations(Annotations(onset=[2, 4], duration=[3, 2],
description="bad"))
with catch_logging() as log:
data = raw.get_data(reject_by_annotation="omit", verbose=True)
msg = ('Omitting 1024 of 2560 (40.00%) samples, retaining 1536' +
' (60.00%) samples.')
assert log.getvalue().strip() == msg
assert data.shape == (len(ch_names), 1536)
with catch_logging() as log:
data = raw.get_data(reject_by_annotation="nan", verbose=True)
msg = ('Setting 1024 of 2560 (40.00%) samples to NaN, retaining 1536' +
' (60.00%) samples.')
assert log.getvalue().strip() == msg
assert data.shape == (len(ch_names), 2560) # shape doesn't change
assert np.isnan(data).sum() == 3072 # but NaNs are introduced instead
def test_5839():
"""Test concatenating raw objects with annotations."""
# Global Time 0 1 2 3 4
# .
# raw_A |---------XXXXXXXXXX
# annot |--------------AA
# latency . 0 0 1 1 2 2 3
# . 5 0 5 0 5 0
#
# raw_B . |---------YYYYYYYYYY
# annot . |--------------AA
# latency . 0 1
# . 5 0
# .
# output |---------XXXXXXXXXXYYYYYYYYYY
# annot |--------------AA---|----AA
# latency . 0 0 1 1 2 2 3
# . 5 0 5 0 5 0
#
EXPECTED_ONSET = [1.5, 2., 2., 2.5]
EXPECTED_DURATION = [0.2, 0., 0., 0.2]
EXPECTED_DESCRIPTION = ['dummy', 'BAD boundary', 'EDGE boundary', 'dummy']
def raw_factory(meas_date):
raw = RawArray(data=np.empty((10, 10)),
info=create_info(ch_names=10, sfreq=10.),
first_samp=10)
raw.set_meas_date(meas_date)
raw.set_annotations(annotations=Annotations(onset=[.5],
duration=[.2],
description='dummy',
orig_time=None))
return raw
raw_A, raw_B = [raw_factory((x, 0)) for x in [0, 2]]
raw_A.append(raw_B)
assert_array_equal(raw_A.annotations.onset, EXPECTED_ONSET)
assert_array_equal(raw_A.annotations.duration, EXPECTED_DURATION)
assert_array_equal(raw_A.annotations.description, EXPECTED_DESCRIPTION)
assert raw_A.annotations.orig_time == _stamp_to_dt((0, 0))
def test_repr():
"""Test repr of Raw."""
sfreq = 256
info = create_info(3, sfreq)
r = repr(RawArray(np.zeros((3, 10 * sfreq)), info))
assert re.search('<RawArray | 3 x 2560 (10.0 s), ~.* kB, data loaded>',
r) is not None, r
| bsd-3-clause | 1,979,013,704,983,558,700 | 38.937343 | 79 | 0.575275 | false |
rolisz/receipt_budget | receipts/receipts/models.py | 1 | 3725 | from collections import namedtuple
import datetime
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Sum
from django.contrib.auth.models import User
from django.db.models.signals import pre_save
from django.dispatch import receiver
from geopy.geocoders import GoogleV3
from geopy.geocoders.googlev3 import GeocoderQueryError
class UserExpenseManager(models.Manager):
def for_user(self, user):
return super(UserExpenseManager, self).get_query_set().filter(user=user)
class Shop(models.Model):
name = models.CharField(max_length=50)
address = models.TextField(blank=True)
cui = models.CharField(max_length=30, blank=True, verbose_name="C.U.I.")
lat = models.FloatField(null=True, blank=True)
lon = models.FloatField(null=True, blank=True)
def __unicode__(self):
if self.address != 'unknown':
return self.name + ((" at " + self.address) if self.address else "")
else:
return self.name
class Expense(models.Model):
objects = UserExpenseManager()
date = models.DateField('expense date')
shop = models.ForeignKey(Shop)
image = models.ImageField(upload_to='receipts/', null=True, blank=True)
user = models.ForeignKey(User)
def __unicode__(self):
return str(self.date) + " - " + str(self.total) + " at " +\
str(self.shop.name)
def _get_total(self):
return self.expenseitem_set.all().aggregate(Sum('price'))['price__sum']
@classmethod
def from_receipt(cls, image, user):
from receipts.receipt import Receipt
rec = Receipt(image)
rec.analyze_text()
props = rec.props
print(props)
shop = Shop.objects.get_or_create(name=props['shop'], address=props['address'], cui=props['cui'])[0]
try:
exp = shop.expense_set.create(date=props['data'], user=user, image=image)
except ValidationError:
exp = shop.expense_set.create(date=datetime.date.today(), user=user, image=image)
for it, price in props['items']:
exp.expenseitem_set.create(name=it, price=price)
return exp
total = property(_get_total)
class ExpenseItem(models.Model):
name = models.CharField(max_length=50)
price = models.DecimalField(decimal_places=2, max_digits=10)
category = models.CharField(max_length=50, blank=True)
expense = models.ForeignKey(Expense, null=True, default=None)
def __unicode__(self):
return self.name + " for " + str(self.price)
geolocator = GoogleV3()
@receiver(pre_save, sender=Shop)
def my_handler(sender, instance, **kwargs):
"""
When editing a shop, do a geocoding request if address changed
"""
print(instance)
try:
obj = Shop.objects.get(pk=instance.pk)
except Shop.DoesNotExist:
try:
address, (latitude, longitude) = geolocator.geocode(instance.address, exactly_one=False)[0]
instance.lat = latitude
instance.lon = longitude
except GQueryError:
pass
return
if obj.address != instance.address:
if instance.address not in ["", "unknown"]:
try:
address, (latitude, longitude) = geolocator.geocode(instance.address, exactly_one=False)[0]
instance.lat = latitude
instance.lon = longitude
except GQueryError:
pass
elif obj.lat != instance.lat or obj.lon != instance.lat:
try:
address, (latitude, longitude) = geolocator.reverse(instance.lat, instance.lon, exactly_one=False)[0]
instance.address = address
except GQueryError:
pass
| bsd-3-clause | -7,431,672,799,133,433,000 | 34.47619 | 113 | 0.644832 | false |
GoogleCloudPlatform/keras-idiomatic-programmer | zoo/mobilenet/mobilenet_v2_c.py | 1 | 8542 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MobileNet v2 + composable (2019)
# Trainable params: 3,504,872
# Paper: https://arxiv.org/pdf/1801.04381.pdf
# 224x224 input: 3,504,872 parameters
import tensorflow as tf
from tensorflow.keras import Input, Model
from tensorflow.keras.layers import ZeroPadding2D, Conv2D, BatchNormalization, ReLU
from tensorflow.keras.layers import DepthwiseConv2D, Add, GlobalAveragePooling2D, Dense
from tensorflow.keras.layers import Activation
from tensorflow.keras.regularizers import l2
import sys
sys.path.append('../')
from models_c import Composable
class MobileNetV2(Composable):
""" Construct a Mobile Convolution Neural Network V2 """
# Meta-parameter: number of filters and blocks per group
groups = [ { 'n_filters' : 16, 'n_blocks' : 1, 'strides': (1, 1) },
{ 'n_filters' : 24, 'n_blocks' : 2, 'strides': (2, 2) },
{ 'n_filters' : 32, 'n_blocks' : 3, 'strides': (2, 2) },
{ 'n_filters' : 64, 'n_blocks' : 4, 'strides': (2, 2) },
{ 'n_filters' : 96, 'n_blocks' : 3, 'strides': (1, 1) },
{ 'n_filters' : 160, 'n_blocks' : 3, 'strides': (2, 2) },
{ 'n_filters' : 320, 'n_blocks' : 1, 'strides': (1, 1) },
{ 'n_filters' : 1280, 'n_blocks' : 1 } ]
# Initial Hyperparameters
hyperparameters = { 'initializer': 'glorot_uniform',
'regularizer': l2(0.001),
'relu_clip' : 6.0,
'bn_epsilon' : None,
'use_bias' : False
}
def __init__(self, groups=None, alpha=1, expansion=6,
input_shape=(224, 224, 3), n_classes=1000, include_top=True,
**hyperparameters):
""" Construct a Mobile Convolution Neural Network V2
groups : number of filters and blocks per group
alpha : width multiplier
expansion : multiplier to expand the number of filters
input_shape : the input shape
n_classes : number of output classes
include_top : whether to include classifier
regularizer : kernel regularizer
initializer : kernel initializer
relu_clip : max value for ReLU
bn_epsilon : epsilon for batch norm
use_bias : whether to use a bias
"""
# Configure base (super) class
Composable.__init__(self, input_shape, include_top, self.hyperparameters, **hyperparameters)
if groups is None:
groups = list(self.groups)
inputs = Input(shape=input_shape)
# The Stem Group
x = self.stem(inputs, alpha=alpha)
# The Learner
outputs = self.learner(x, groups=groups, alpha=alpha, expansion=expansion)
# The Classifier
# Add hidden dropout layer
if include_top:
outputs = self.classifier(outputs, n_classes, dropout=0.0)
# Instantiate the Model
self._model = Model(inputs, outputs)
def stem(self, inputs, **metaparameters):
""" Construct the Stem Group
inputs : input tensor
alpha : width multiplier
"""
alpha = metaparameters['alpha']
# Calculate the number of filters for the stem convolution
# Must be divisible by 8
n_filters = max(8, (int(32 * alpha) + 4) // 8 * 8)
# Convolutional block
x = ZeroPadding2D(padding=((0, 1), (0, 1)))(inputs)
x = self.Conv2D(x, n_filters, (3, 3), strides=(2, 2), padding='valid', **metaparameters)
x = self.BatchNormalization(x)
x = self.ReLU(x)
return x
def learner(self, x, **metaparameters):
""" Construct the Learner
x : input to the learner
alpha : width multiplier
expansion: multipler to expand number of filters
"""
groups = metaparameters['groups']
alpha = metaparameters['alpha']
expansion = metaparameters['expansion']
last = groups.pop()
# First Inverted Residual Convolution Group
group = groups.pop(0)
x = self.group(x, **group, alpha=alpha, expansion=1)
# Add remaining Inverted Residual Convolution Groups
for group in groups:
x = self.group(x, **group, alpha=alpha, expansion=expansion)
# Last block is a 1x1 linear convolutional layer,
# expanding the number of filters to 1280.
x = self.Conv2D(x, 1280, (1, 1), **metaparameters)
x = self.BatchNormalization(x)
x = self.ReLU(x)
return x
def group(self, x, **metaparameters):
""" Construct an Inverted Residual Group
x : input to the group
strides : whether first inverted residual block is strided.
n_blocks : number of blocks in the group
"""
n_blocks = metaparameters['n_blocks']
strides = metaparameters['strides']
del metaparameters['strides']
# In first block, the inverted residual block maybe strided - feature map size reduction
x = self.inverted_block(x, strides=strides, **metaparameters)
# Remaining blocks
for _ in range(n_blocks - 1):
x = self.inverted_block(x, strides=(1, 1), **metaparameters)
return x
def inverted_block(self, x, strides=(1, 1), **metaparameters):
""" Construct an Inverted Residual Block
x : input to the block
strides : strides
n_filters : number of filters
alpha : width multiplier
expansion : multiplier for expanding number of filters
"""
n_filters = metaparameters['n_filters']
alpha = metaparameters['alpha']
if 'alpha' in metaparameters:
alpha = metaparameters['alpha']
else:
alpha = self.alpha
if 'expansion' in metaparameters:
expansion = metaparameters['expansion']
else:
expansion = self.expansion
del metaparameters['n_filters']
# Remember input
shortcut = x
# Apply the width filter to the number of feature maps for the pointwise convolution
filters = int(n_filters * alpha)
n_channels = int(x.shape[3])
# Dimensionality Expansion (non-first block)
if expansion > 1:
# 1x1 linear convolution
x = self.Conv2D(x, expansion * n_channels, (1, 1), padding='same', **metaparameters)
x = self.BatchNormalization(x)
x = self.ReLU(x)
# Strided convolution to match number of filters
if strides == (2, 2):
x = ZeroPadding2D(padding=((0, 1), (0, 1)))(x)
padding = 'valid'
else:
padding = 'same'
# Depthwise Convolution
x = self.DepthwiseConv2D(x, (3, 3), strides, padding=padding, **metaparameters)
x = self.BatchNormalization(x)
x = self.ReLU(x)
# Linear Pointwise Convolution
x = self.Conv2D(x, filters, (1, 1), strides=(1, 1), padding='same', **metaparameters)
x = self.BatchNormalization(x)
# Number of input filters matches the number of output filters
if n_channels == filters and strides == (1, 1):
x = Add()([shortcut, x])
return x
# Example
# mobilenet = MobileNetV2()
def example():
''' Example for constructing/training a MobileNet V2 model on CIFAR-10
'''
# Example of constructing a mini-MobileNet
groups = [ { 'n_filters': 16, 'n_blocks': 1, 'strides' : 2 },
{ 'n_filters': 32, 'n_blocks': 2, 'strides' : 1 },
{ 'n_filters': 64, 'n_blocks': 3, 'strides' : 1 } ]
mobilenet = MobileNetV2(groups, input_shape=(32, 32, 3), n_classes=10)
mobilenet.model.summary()
mobilenet.cifar10()
# example()
| apache-2.0 | -1,422,260,383,125,294,800 | 37.304933 | 100 | 0.581948 | false |
comparemetrics/GoogleAppsAccountManager | src/GoogleAppsAccountManager/frontend/nickname2.py | 1 | 6343 | # -*- coding: utf-8 -*-
#
# GoogleAppsAccountManager: frontend/nickname2
# Copyright (C) 2012-2013 KAMEI Yutaka
#
# License: GNU General Public License version 2 or later
# Date: 2013-01-16, since 2013-01-16
#
# NICKNAME2 HELP
NICKNAME_HELP2 = """Usage: gapps-tool nickname2 <subcommand>
Available subcommands:
create - Create nickname_email
delete - Delete nickname_email
list - List all nickname_emails
listnicknamesof - List all nickname_emails of user_email
"""
# Subcommand list
__subcommand__ = { "create" : "_create"
, "delete" : "_delete"
, "list" : "_list"
, "listnicknamesof" : "_listnicknamesof"
, "create_f" : "_create_f"
, "delete_f" : "_delete_f"
}
import sys
from GoogleAppsAccountManager.multipledomainnickname import client as NICKNAME2
from GoogleAppsAccountManager.frontend import _func, _messages
def run(options, parser, my_name):
from GoogleAppsAccountManager.frontend import _runSubcommand
_runSubcommand(options, parser, my_name, __subcommand__, NICKNAME_HELP2)
def _create(options, parser):
# Set parser options
parser.add_argument( "user_email"
, action = "store"
, help = "User name."
)
parser.add_argument( "nickname_email"
, action = "store"
, help = "Nickname."
)
# Get options
namespace = parser.parse_args(options)
# Get auth token
auth_token = _func.getAuthTokenByLogin(namespace.admin_name, namespace.domain)
# Operation
return _func.operate( NICKNAME2.createAlias
, namespace.nickname_email.lower()
, parser.prog
, namespace.result_file
, namespace.domain
, auth_token
, namespace.user_email.lower() # user_email
, namespace.nickname_email.lower() # nickname_email
)
def _delete(options, parser):
# Set parser options
parser.add_argument( "nickname_email"
, action = "store"
, help = "Nickname."
)
# Get options
namespace = parser.parse_args(options)
# Get auth token
auth_token = _func.getAuthTokenByLogin(namespace.admin_name, namespace.domain)
# Operation
return _func.operate( NICKNAME2.deleteAlias
, namespace.nickname_email.lower()
, parser.prog
, namespace.result_file
, namespace.domain
, auth_token
, namespace.nickname_email.lower() # nickname_email
)
def _list(options, parser):
# Get options
namespace = parser.parse_args(options)
# Get auth token
auth_token = _func.getAuthTokenByLogin(namespace.admin_name, namespace.domain)
# Operation
NICKNAME2.outputAllAliases(namespace.domain, auth_token)
return True
def _listnicknamesof(options, parser):
# Set parser options
parser.add_argument( "user_email"
, action = "store"
, help = "User name."
)
# Get options
namespace = parser.parse_args(options)
# Get auth token
auth_token = _func.getAuthTokenByLogin(namespace.admin_name, namespace.domain)
# Operation
NICKNAME2.outputAllAliasesOfUser(namespace.domain, auth_token, namespace.user_email.lower())
return True
############################# CSV operation #############################
def _create_f(options, parser):
import csv
# Get options
namespace = parser.parse_args(options)
# Get auth token
auth_token = _func.getAuthTokenByLogin(namespace.admin_name, namespace.domain)
# Get records from csv file
with open(namespace.csv_file) as f:
reader = csv.DictReader(f)
# Check header
header = f.next().replace("\n", "").split(",")
must_keys = ["user_email", "nickname_email"]
if not _func.checkValidHeader(header, *must_keys):
return False
f.seek(0, 0)
# Read csv
for record in reader:
must_values = ["user_email", "nickname_email"]
if not _func.checkRecordHasValue(record, *must_values):
continue
# Operation
_func.operate( NICKNAME2.createAlias
, record["nickname_email"].lower()
, parser.prog
, namespace.result_file
, namespace.domain
, auth_token
, record["user_email"].lower() # user_email
, record["nickname_email"] # nickname_email
)
return True
def _delete_f(options, parser):
import csv
# Get options
namespace = parser.parse_args(options)
# Get auth token
auth_token = _func.getAuthTokenByLogin(namespace.admin_name, namespace.domain)
# Get records from csv file
with open(namespace.csv_file) as f:
reader = csv.DictReader(f)
# Check header
header = f.next().replace("\n", "").split(",")
must_keys = ["nickname_email"]
if not _func.checkValidHeader(header, *must_keys):
return False
f.seek(0, 0)
# Read csv
for record in reader:
must_values = ["nickname_email"]
if not _func.checkRecordHasValue(*must_values):
continue
# Operation
_func.operate( NICKNAME2.deleteAlias
, record["nickname_email"].lower()
, parser.prog
, namespace.result_file
, namespace.domain
, auth_token
, record["nickname_email"].lower() # nickname_email
)
return True
| gpl-2.0 | 8,476,544,285,538,669,000 | 31.362245 | 96 | 0.523412 | false |
adfernandes/mbed | targets/TARGET_STM/tools/STM32_gen_PeripheralPins.py | 5 | 73232 | #!/usr/bin/env python
"""
* SPDX-License-Identifier: BSD-3-Clause
******************************************************************************
*
* Copyright (c) 2016-2020 STMicroelectronics.
* All rights reserved.
*
* This software component is licensed by ST under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
******************************************************************************
"""
import argparse
import datetime
import fnmatch
import json
import os
import re
import sys
import textwrap
from xml.dom.minidom import parse, Node
from argparse import RawTextHelpFormatter
import subprocess
GENPINMAP_VERSION = "1.20.1"
ADD_DEVICE_IF = 0
ADD_GPIO_PINMAP = 0
DEBUG_PRINT = 0
FLAT_DIRECTORY = 0
mcu_file=""
mcu_list = [] #'name'
gpio_list = [] #'PIN','name','BOOT/OSC'
adclist = [] #'PIN','name','ADCSignal'
daclist = [] #'PIN','name','DACSignal'
i2cscl_list = [] #'PIN','name','I2CSCLSignal'
i2csda_list = [] #'PIN','name','I2CSDASignal'
pwm_list = [] #'PIN','name','PWM'
uarttx_list = [] #'PIN','name','UARTtx'
uartrx_list = [] #'PIN','name','UARTrx'
uartcts_list = [] #'PIN','name','UARTcts'
uartrts_list = [] #'PIN','name','UARTrts'
spimosi_list = [] #'PIN','name','SPIMOSI'
spimiso_list = [] #'PIN','name','SPIMISO'
spissel_list = [] #'PIN','name','SPISSEL'
spisclk_list = [] #'PIN','name','SPISCLK'
cantd_list = [] #'PIN','name','CANTD'
canrd_list = [] #'PIN','name','CANRD'
eth_list = [] #'PIN','name','ETH'
quadspidata0_list = [] #'PIN','name','QUADSPIDATA0'
quadspidata1_list = [] #'PIN','name','QUADSPIDATA1'
quadspidata2_list = [] #'PIN','name','QUADSPIDATA2'
quadspidata3_list = [] #'PIN','name','QUADSPIDATA3'
quadspisclk_list = [] #'PIN','name','QUADSPISCLK'
quadspissel_list = [] #'PIN','name','QUADSPISSEL'
octospidata0_list = [] #'PIN','name','OCTOSPIDATA0'
octospidata1_list = [] #'PIN','name','OCTOSPIDATA1'
octospidata2_list = [] #'PIN','name','OCTOSPIDATA2'
octospidata3_list = [] #'PIN','name','OCTOSPIDATA3'
octospidata4_list = [] #'PIN','name','OCTOSPIDATA4'
octospidata5_list = [] #'PIN','name','OCTOSPIDATA5'
octospidata6_list = [] #'PIN','name','OCTOSPIDATA6'
octospidata7_list = [] #'PIN','name','OCTOSPIDATA7'
octospidqs_list = [] #'PIN','name','OCTOSPIDQS'
octospisclk_list = [] #'PIN','name','OCTOSPISCLK'
octospissel_list = [] #'PIN','name','OCTOSPISSEL'
usb_list = [] # 'PIN','name','USB'
usb_otgfs_list = [] # 'PIN','name','USB'
usb_otghs_list = [] # 'PIN','name','USB'
osc_list = [] #'PIN','name','OSC'
sys_list = [] #'PIN','name','SYS'
STDIO_list = ["Pxx", "Pxx"] # TX , RX
LED_list = []
BUTTON_list = []
DUAL_PAD = False
MCU_USERNAME= ""
TIM_MST = ""
ALTERNATE_DEFINITION = 0
TARGET_NAME = ""
TIM_DUALCORE_LIST = { # Timer used for us ticker is hardcoded in this script
"H745":"TIM2",
"H747":"TIM2",
"H750":"TIM2",
"H755":"TIM2"
}
VCP_UART_LIST = { # Used interface is HW option
"Nucleo_NUCLEO-L552ZE-Q":"LPUART1",
"Discovery_STM32L4R9I":"USART2",
"Discovery_STM32L496G":"USART2"
}
def print_debug(console_line):
if DEBUG_PRINT == 1:
print("DEBUG: %s" % console_line)
def find_gpio_file():
res = "ERROR"
itemlist = xml_mcu.getElementsByTagName("IP")
for s in itemlist:
a = s.attributes["Name"].value
if "GPIO" in a:
res = s.attributes["Version"].value
return res
def find_tim_mst():
global TIM_MST
# Let's list first the available timers
tim_list = []
itemlist = xml_mcu.getElementsByTagName("IP")
for s in itemlist:
a = s.attributes["Name"].value
if "TIM" in a:
tim_list.append(s.attributes["InstanceName"].value)
# Then choose 1 timer for us ticker
TIM_MST = ""
if TARGET_FAMILY == "STM32F0":
search_order = ["TIM5", "TIM2", "TIM1"]
elif TARGET_FAMILY == "STM32F1":
search_order = ["TIM5", "TIM4", "TIM2"]
elif TARGET_FAMILY == "STM32F3":
search_order = ["TIM5", "TIM2"]
elif TARGET_FAMILY == "STM32G0":
search_order = ["TIM5", "TIM2", "TIM3"]
elif TARGET_FAMILY == "STM32G4":
search_order = ["TIM5", "TIM2"]
elif TARGET_FAMILY == "STM32L0":
search_order = ["TIM5", "TIM21"]
elif TARGET_FAMILY == "STM32L1":
search_order = ["TIM5", "TIM2"]
elif TARGET_FAMILY == "STM32L4":
search_order = ["TIM5", "TIM2"]
elif TARGET_FAMILY == "STM32WB":
search_order = ["TIM16", "TIM2"]
elif TARGET_FAMILY == "STM32WL":
search_order = ["TIM2"]
else:
search_order = ["TIM5"]
for EachTimer in search_order:
if EachTimer in tim_list:
TIM_MST = EachTimer
break
if TIM_MST == "":
print("!!! error TIM_MST not found")
else:
print_debug("TIM_MST=%s" % TIM_MST)
def get_gpio_af_num(pintofind, iptofind):
pintofind = pintofind.split("-")[0].split(" ")[0] # to avoid for ex "PC14-OSC32_IN", "PB4 (NJTRST)"
if "STM32F10" in mcu_file:
return get_gpio_af_num_stm32f1(pintofind, iptofind)
i = 0
mygpioaf = ""
for n in xml_gpio.documentElement.childNodes:
i += 1
j = 0
if n.nodeType == Node.ELEMENT_NODE:
for firstlevel in n.attributes.items():
# if 'PB7' in firstlevel:
if pintofind == firstlevel[1].split("-")[0].split(" ")[0]: # to avoid for ex "PC14-OSC32_IN", "PB4 (NJTRST)"
# n = pin node found
for each_child_node in n.childNodes:
j += 1
k = 0
if each_child_node.nodeType == Node.ELEMENT_NODE:
for secondlevel in each_child_node.attributes.items():
k += 1
# if 'I2C1_SDA' in secondlevel:
if iptofind in secondlevel[1].replace("_CTS_NSS", "_CTS"): # to avoid "USART2_CTS_NSS"
# m = IP node found
for p in each_child_node.childNodes:
if p.nodeType == Node.ELEMENT_NODE:
# p node of 'Specific parameter'
for myc in p.childNodes:
if myc.nodeType == Node.ELEMENT_NODE:
# myc = node of ALTERNATE
for mygpioaflist in myc.childNodes:
if mygpioaflist.data not in mygpioaf:
if mygpioaf != "":
mygpioaf += " "
mygpioaf += mygpioaflist.data
if mygpioaf == "":
mygpioaf = "GPIO_AF_NONE"
return mygpioaf
def get_gpio_af_num_stm32f1(pintofind, iptofind):
print_debug('pin to find ' + pintofind + ' ip to find ' + iptofind)
i = 0
mygpioaf = ""
for gpio_child_node in xml_gpio.documentElement.childNodes:
i += 1
j = 0
if gpio_child_node.nodeType == Node.ELEMENT_NODE:
for firstlevel in gpio_child_node.attributes.items():
if pintofind == firstlevel[1]:
# gpio_child_node = pin node found
for each_child_node in gpio_child_node.childNodes:
j += 1
k = 0
if each_child_node.nodeType == Node.ELEMENT_NODE:
for secondlevel in each_child_node.attributes.items():
k += 1
# if 'I2C1_SDA' in secondlevel:
if iptofind in secondlevel:
# m = IP node found
for p in each_child_node.childNodes:
# p node 'RemapBlock'
if (
p.nodeType == Node.ELEMENT_NODE
and p.hasChildNodes() is False
):
if mygpioaf != "":
mygpioaf += " "
mygpioaf += "AFIO_NONE"
else:
for s in p.childNodes:
if s.nodeType == Node.ELEMENT_NODE:
# s node 'Specific parameter'
for myc in s.childNodes:
if (
myc.nodeType
== Node.ELEMENT_NODE
):
# myc = AF value
for (
mygpioaflist
) in myc.childNodes:
if mygpioaf != "":
mygpioaf += " "
mygpioaf += mygpioaflist.data.replace(
"__HAL_", ""
).replace(
"_REMAP", ""
)
if mygpioaf == "":
mygpioaf = "AFIO_NONE"
return mygpioaf.replace("AFIO_NONE", "0")\
.replace("AFIO_SPI1_ENABLE", "1")\
.replace("AFIO_I2C1_ENABLE", "2")\
.replace("AFIO_USART1_ENABLE", "3")\
.replace("AFIO_USART3_PARTIAL", "5")\
.replace("AFIO_TIM1_PARTIAL", "6")\
.replace("AFIO_TIM3_PARTIAL", "7")\
.replace("AFIO_TIM2_ENABLE", "8")\
.replace("AFIO_TIM2_PARTIAL_1", "8")\
.replace("AFIO_TIM2_PARTIAL_2", "8")\
.replace("AFIO_TIM3_ENABLE", "9")\
.replace("AFIO_CAN1_2", "10")
def store_pin(pin, name, functionality):
# store pin I/O
gpio_list.append([pin, name, functionality])
# function to store ADC list
def store_adc(pin, name, signal):
adclist.append([pin, name, signal])
# function to store DAC list
def store_dac(pin, name, signal):
daclist.append([pin, name, signal])
# function to store I2C list
def store_i2c(pin, name, signal):
# is it SDA or SCL ?
if "_SCL" in signal:
i2cscl_list.append([pin, name, signal])
if "_SDA" in signal:
i2csda_list.append([pin, name, signal])
# function to store timers
def store_pwm(pin, name, signal):
if "_CH" in signal:
pwm_list.append([pin, name, signal])
# function to store Uart pins
def store_uart(pin, name, signal):
if "_TX" in signal:
uarttx_list.append([pin, name, signal])
if "_RX" in signal:
uartrx_list.append([pin, name, signal])
if "_CTS" in signal:
uartcts_list.append([pin, name, signal])
if "_RTS" in signal:
uartrts_list.append([pin, name, signal])
# function to store SPI pins
def store_spi(pin, name, signal):
if "_MISO" in signal:
spimiso_list.append([pin, name, signal])
if "_MOSI" in signal:
spimosi_list.append([pin, name, signal])
if "_SCK" in signal:
spisclk_list.append([pin, name, signal])
if "_NSS" in signal:
spissel_list.append([pin, name, signal])
# function to store CAN pins
def store_can(pin, name, signal):
if "_RX" in signal:
canrd_list.append([pin, name, signal])
if "_TX" in signal:
cantd_list.append([pin, name, signal])
# function to store ETH list
def store_eth(pin, name, signal):
eth_list.append([pin, name, signal])
# function to store QSPI pins
def store_qspi(pin, name, signal):
if "_IO0" in signal:
quadspidata0_list.append([pin, name, signal])
if "_IO1" in signal:
quadspidata1_list.append([pin, name, signal])
if "_IO2" in signal:
quadspidata2_list.append([pin, name, signal])
if "_IO3" in signal:
quadspidata3_list.append([pin, name, signal])
if "_CLK" in signal:
quadspisclk_list.append([pin, name, signal])
if "_NCS" in signal:
quadspissel_list.append([pin, name, signal])
# function to store OSPI pins
def store_ospi(pin, name, signal):
if "_IO0" in signal:
octospidata0_list.append([pin, name, signal])
if "_IO1" in signal:
octospidata1_list.append([pin, name, signal])
if "_IO2" in signal:
octospidata2_list.append([pin, name, signal])
if "_IO3" in signal:
octospidata3_list.append([pin, name, signal])
if "_IO4" in signal:
octospidata4_list.append([pin, name, signal])
if "_IO5" in signal:
octospidata5_list.append([pin, name, signal])
if "_IO6" in signal:
octospidata6_list.append([pin, name, signal])
if "_IO7" in signal:
octospidata7_list.append([pin, name, signal])
if "_CLK" in signal:
octospisclk_list.append([pin, name, signal])
if "_NCS" in signal:
octospissel_list.append([pin, name, signal])
if "_DQS" in signal:
octospidqs_list.append([pin, name, signal])
# function to store USB pins
def store_usb(pin, name, signal):
if "OTG" not in signal:
usb_list.append([pin, name, signal])
elif signal.startswith("USB_OTG_FS"):
usb_otgfs_list.append([pin, name, signal])
elif signal.startswith("USB_OTG_HS"):
usb_otghs_list.append([pin, name, signal])
# function to store OSC pins
def store_osc(pin, name, signal):
osc_list.append([pin, name, signal])
# function to store SYS pins
def store_sys(pin, name, signal):
sys_list.append([pin, name, signal])
def print_header():
global ALTERNATE_DEFINITION
date_year = datetime.datetime.now().year
line_to_write = ("""/* mbed Microcontroller Library
* SPDX-License-Identifier: BSD-3-Clause
******************************************************************************
*
* Copyright (c) 2016-%i STMicroelectronics.
* All rights reserved.
*
* This software component is licensed by ST under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
******************************************************************************
*
* Automatically generated from STM32CubeMX/db/mcu/%s
*/
#include "PeripheralPins.h"
#include "mbed_toolchain.h"
//==============================================================================
// Notes
//
// - The pins mentioned Px_y_ALTz are alternative possibilities which use other
// HW peripheral instances. You can use them the same way as any other "normal"
// pin (i.e. PwmOut pwm(PA_7_ALT0);). These pins are not displayed on the board
// pinout image on mbed.org.
//
// - The pins which are connected to other components present on the board have
// the comment "Connected to xxx". The pin function may not work properly in this
// case. These pins may not be displayed on the board pinout image on mbed.org.
// Please read the board reference manual and schematic for more information.
//
// - Warning: pins connected to the default STDIO_UART_TX and STDIO_UART_RX pins are commented
// See https://os.mbed.com/teams/ST/wiki/STDIO for more information.
//
//==============================================================================
""" % (date_year, os.path.basename(input_file_name)))
out_c_file.write(line_to_write)
line_to_write = ("""/* mbed Microcontroller Library
* SPDX-License-Identifier: BSD-3-Clause
******************************************************************************
*
* Copyright (c) 2016-%i STMicroelectronics.
* All rights reserved.
*
* This software component is licensed by ST under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
******************************************************************************
*
* Automatically generated from STM32CubeMX/db/mcu/%s
*/
/* MBED TARGET LIST: %s */
#ifndef MBED_PINNAMES_H
#define MBED_PINNAMES_H
#include "cmsis.h"
#include "PinNamesTypes.h"
#ifdef __cplusplus
extern "C" {
#endif
""" % (date_year, os.path.basename(input_file_name), TARGET_NAME))
out_h_file.write(line_to_write)
if DUAL_PAD:
line_to_write = ("""
#define DUAL_PAD 0xF00
""")
out_h_file.write(line_to_write)
if ADD_GPIO_PINMAP:
line_to_write = ("""
/* If this macro is defined, then PinMap_GPIO is present in PeripheralPins.c */
#define GPIO_PINMAP_READY 1
""")
out_h_file.write(line_to_write)
line_to_write = ("""
typedef enum {
""")
out_h_file.write(line_to_write)
def print_footer():
line_to_write = ("""
// Not connected
NC = (int)0xFFFFFFFF
} PinName;
// Standardized LED and button names
""")
out_h_file.write(line_to_write)
name_counter = 1
if not LED_list:
LED_list.append("Pxx")
StandardLED = {}
for EachLED in LED_list:
PinLabel[EachLED] = "TODO"
StandardLED[PinLabel[EachLED]] = EachLED
for EachLED in sorted(StandardLED):
led_label = " // %s" % EachLED
out_h_file.write("#define LED%i %-5s %s\n" % (name_counter, re.sub(r'(P.)', r'\1_', StandardLED[EachLED]), led_label))
name_counter += 1
name_counter = 1
if not BUTTON_list:
BUTTON_list.append("Pxx")
for EachBUTTON in BUTTON_list:
button_label = ""
if EachBUTTON in PinLabel:
button_label = " // %s" % PinLabel[EachBUTTON]
out_h_file.write("#define BUTTON%i %-5s %s\n" % (name_counter, re.sub(r'(P.)', r'\1_', EachBUTTON).split('/')[0].split('-')[0], button_label))
name_counter += 1
line_to_write = ("""
#ifdef __cplusplus
}
#endif
#endif
""")
out_h_file.write(line_to_write)
def print_all_lists():
if ADD_GPIO_PINMAP:
if print_list_header("GPIO", "GPIO", gpio_list, "GPIO"):
print_gpio()
if print_list_header("ADC", "ADC", adclist, "ANALOGIN"):
print_adc()
if print_list_header("DAC", "DAC", daclist, "ANALOGOUT"):
print_dac()
if print_list_header("I2C", "I2C_SDA", i2csda_list, "I2C"):
print_i2c(i2csda_list)
if print_list_header("", "I2C_SCL", i2cscl_list, "I2C"):
print_i2c(i2cscl_list)
if print_list_header("PWM", "PWM", pwm_list, "PWMOUT"):
print_pwm()
if print_list_header("SERIAL", "UART_TX", uarttx_list, "SERIAL"):
print_uart(uarttx_list)
if print_list_header("", "UART_RX", uartrx_list, "SERIAL"):
print_uart(uartrx_list)
if print_list_header("", "UART_RTS", uartrts_list, "SERIAL"):
print_uart(uartrts_list)
if print_list_header("", "UART_CTS", uartcts_list, "SERIAL"):
print_uart(uartcts_list)
if print_list_header("SPI", "SPI_MOSI", spimosi_list, "SPI"):
print_spi(spimosi_list)
if print_list_header("", "SPI_MISO", spimiso_list, "SPI"):
print_spi(spimiso_list)
if print_list_header("", "SPI_SCLK", spisclk_list, "SPI"):
print_spi(spisclk_list)
if print_list_header("", "SPI_SSEL", spissel_list, "SPI"):
print_spi(spissel_list)
if print_list_header("CAN", "CAN_RD", canrd_list, "CAN"):
print_can(canrd_list)
if print_list_header("", "CAN_TD", cantd_list, "CAN"):
print_can(cantd_list)
if print_list_header("QUADSPI", "QSPI_DATA0", quadspidata0_list, "QSPI"):
print_qspi(quadspidata0_list)
if print_list_header("", "QSPI_DATA1", quadspidata1_list, "QSPI"):
print_qspi(quadspidata1_list)
if print_list_header("", "QSPI_DATA2", quadspidata2_list, "QSPI"):
print_qspi(quadspidata2_list)
if print_list_header("", "QSPI_DATA3", quadspidata3_list, "QSPI"):
print_qspi(quadspidata3_list)
if print_list_header("", "QSPI_SCLK", quadspisclk_list, "QSPI"):
print_qspi(quadspisclk_list)
if print_list_header("", "QSPI_SSEL", quadspissel_list, "QSPI"):
print_qspi(quadspissel_list)
if print_list_header("OCTOSPI", "OSPI_DATA0", octospidata0_list, "OCTO"):
print_ospi(octospidata0_list)
if print_list_header("", "OSPI_DATA1", octospidata1_list, "OCTO"):
print_ospi(octospidata1_list)
if print_list_header("", "OSPI_DATA2", octospidata2_list, "OCTO"):
print_ospi(octospidata2_list)
if print_list_header("", "OSPI_DATA3", octospidata3_list, "OCTO"):
print_ospi(octospidata3_list)
if print_list_header("", "OSPI_DATA4", octospidata2_list, "OCTO"):
print_ospi(octospidata4_list)
if print_list_header("", "OSPI_DATA5", octospidata3_list, "OCTO"):
print_ospi(octospidata5_list)
if print_list_header("", "OSPI_DATA6", octospidata2_list, "OCTO"):
print_ospi(octospidata6_list)
if print_list_header("", "OSPI_DATA7", octospidata3_list, "OCTO"):
print_ospi(octospidata7_list)
if print_list_header("", "OSPI_DQS", octospidqs_list, "OCTO"):
print_ospi(octospidqs_list)
if print_list_header("", "OSPI_SCLK", octospisclk_list, "OCTO"):
print_ospi(octospisclk_list)
if print_list_header("", "OSPI_SSEL", octospissel_list, "OCTO"):
print_ospi(octospissel_list)
if print_list_header("USBDEVICE", "USB_FS", usb_list, "USBDEVICE"):
print_usb(usb_list)
if print_list_header("USBDEVICE", "USB_FS", usb_otgfs_list, "USBDEVICE"):
print_usb(usb_otgfs_list)
if print_list_header("USBDEVICE", "USB_HS", usb_otghs_list, "USBDEVICE"):
print_usb(usb_otghs_list)
print_pin_list(gpio_list)
print_h_file(usb_list, "USB")
print_h_file(usb_otgfs_list, "USB FS")
print_h_file(usb_otghs_list, "USB HS")
print_h_file(eth_list, "ETHERNET")
print_h_file(osc_list, "OSCILLATOR")
print_h_file(sys_list, "DEBUG")
def print_list_header(comment, name, l, switch):
line_to_write = ""
if len(l)>0:
if comment:
line_to_write += "\n//*** %s ***\n" % comment
line_to_write += "\n"
if name == "PWM":
line_to_write += "// %s cannot be used because already used by the us_ticker\n" % TIM_MST
line_to_write += "// (update us_ticker_data.h file if another timer is chosen)\n"
default_timer_core2 = ""
for each_target in TIM_DUALCORE_LIST:
if each_target in mcu_file:
default_timer_core2 = TIM_DUALCORE_LIST[each_target]
if default_timer_core2 != "":
line_to_write += "// %s cannot be used because already used by the us_ticker (DUAL_CORE)\n" % default_timer_core2
if ADD_DEVICE_IF:
line_to_write += "#if DEVICE_%s\n" % switch
line_to_write += "MBED_WEAK const PinMap PinMap_%s[] = {\n" % name
out_c_file.write(line_to_write)
return len(l)
def print_gpio():
for parsed_pin in gpio_list:
commented_line = " "
if parsed_pin[1] in PinLabel:
if "STDIO_UART" in PinLabel[parsed_pin[1]]:
commented_line = "//"
if "RCC_OSC" in PinLabel[parsed_pin[1]]:
commented_line = "//"
if parsed_pin[1] in PinPuPd:
commented_line = "//"
if "OSC" in parsed_pin[2]:
commented_line = "//"
line_to_write = "%-11s" % (commented_line + " {" + parsed_pin[0] + ',')
line_to_write += ' 0, GPIO_NOPULL},'
if parsed_pin[1] in PinLabel:
line_to_write += ' // Connected to ' + PinLabel[parsed_pin[1]]
if parsed_pin[1] in PinPuPd:
line_to_write += ' // ' + PinPuPd[parsed_pin[1]]
if parsed_pin[2] != "":
line_to_write += ' // ' + parsed_pin[2]
line_to_write += '\n'
out_c_file.write(line_to_write)
out_c_file.write( """ {NC, NC, 0}
};
""")
if ADD_DEVICE_IF:
out_c_file.write( "#endif\n" )
def print_adc():
global ALTERNATE_DEFINITION
# Check GPIO version (alternate or not)
s_pin_data = "STM_PIN_DATA_EXT(STM_MODE_ANALOG"
# For STM32L47xxx/48xxx, it is necessary to configure
# the GPIOx_ASCR register
if re.match("STM32L4[78]+", mcu_file):
s_pin_data += "_ADC_CONTROL"
prev_p = ''
alt_index = 0
for parsed_pin in adclist:
if "IN" in parsed_pin[2]:
commented_line = " "
if parsed_pin[1] in PinLabel:
if "STDIO_UART" in PinLabel[parsed_pin[1]]:
commented_line = "//"
if "RCC_OSC" in PinLabel[parsed_pin[1]]:
commented_line = "//"
if commented_line != "//":
if parsed_pin[0] == prev_p:
if "STM32F1" in mcu_file:
continue
else:
prev_p = parsed_pin[0]
parsed_pin[0] += '_ALT%d' % alt_index
store_pin(parsed_pin[0], parsed_pin[0], "")
alt_index += 1
if alt_index > ALTERNATE_DEFINITION:
ALTERNATE_DEFINITION += 1
else:
prev_p = parsed_pin[0]
alt_index = 0
line_to_write = "%-17s" % (commented_line + " {" + parsed_pin[0] + ',')
a = parsed_pin[2].split('_')
inst = a[0].replace("ADC", "")
if len(inst) == 0:
inst = '1' #single ADC for this product
line_to_write += "%-7s" % ('ADC_' + inst + ',')
chan = re.sub(r"^IN[N|P]?|\D*$", "", a[1])
bank = "_ADC_CHANNEL_BANK_B" if a[1].endswith("b") else ""
line_to_write += s_pin_data + bank + ", GPIO_NOPULL, 0, " + chan
line_to_write += ', 0)}, // ' + parsed_pin[2]
if parsed_pin[1] in PinLabel:
line_to_write += ' // Connected to ' + PinLabel[parsed_pin[1]]
line_to_write += '\n'
out_c_file.write(line_to_write)
out_c_file.write( """ {NC, NC, 0}
};
// !!! SECTION TO BE CHECKED WITH DEVICE REFERENCE MANUAL
MBED_WEAK const PinMap PinMap_ADC_Internal[] = {
// {ADC_TEMP, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 16, 0)},
// {ADC_VREF, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 17, 0)},
// {ADC_VBAT, ADC_1, STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, 18, 0)},
{NC, NC, 0}
};
""")
if ADD_DEVICE_IF:
out_c_file.write( "#endif\n" )
def print_dac():
for parsed_pin in daclist:
commented_line = " "
if parsed_pin[1] in PinLabel:
if "STDIO_UART" in PinLabel[parsed_pin[1]]:
commented_line = "//"
if "RCC_OSC" in PinLabel[parsed_pin[1]]:
commented_line = "//"
s1 = "%-17s" % (commented_line + " {" + parsed_pin[0] + ',')
#parsed_pin[2] : DAC_OUT1 / DAC1_OUT1
a = parsed_pin[2].split('_')
inst = a[0].replace("DAC", "")
b = a[1].replace("OUT", "")
if len(inst) == 0:
inst = '1' # single DAC for this product
s1 += "%-7s" % ('DAC_' + inst + ',')
s1 += 'STM_PIN_DATA_EXT(STM_MODE_ANALOG, GPIO_NOPULL, 0, ' + b + ', 0)}, // ' + parsed_pin[2]
if parsed_pin[1] in PinLabel:
s1 += ' // Connected to ' + PinLabel[parsed_pin[1]]
s1 += '\n'
out_c_file.write(s1)
out_c_file.write( """ {NC, NC, 0}
};
""")
if ADD_DEVICE_IF:
out_c_file.write( "#endif\n" )
def print_i2c(l):
global ALTERNATE_DEFINITION
prev_p = ''
alt_index = 0
for parsed_pin in l:
result = get_gpio_af_num(parsed_pin[1], parsed_pin[2])
commented_line = " "
if parsed_pin[1] in PinLabel:
if "STDIO_UART" in PinLabel[parsed_pin[1]]:
commented_line = "//"
if "RCC_OSC" in PinLabel[parsed_pin[1]]:
commented_line = "//"
if commented_line != "//":
if parsed_pin[0] == prev_p:
prev_p = parsed_pin[0]
parsed_pin[0] += '_ALT%d' % alt_index
store_pin(parsed_pin[0], parsed_pin[0], "")
alt_index += 1
if alt_index > ALTERNATE_DEFINITION:
ALTERNATE_DEFINITION += 1
else:
prev_p = parsed_pin[0]
alt_index = 0
s1 = "%-17s" % (commented_line + " {" + parsed_pin[0] + ',')
# parsed_pin[2] : I2C1_SDA / FMPI2C1_SDA
if "FMP" in parsed_pin[2]:
inst = parsed_pin[2].split('_')[0].replace("FMPI2C", "")
s1 += "%-10s" % ('FMPI2C_' + inst + ',')
else:
inst = parsed_pin[2].split('_')[0].replace("I2C", "")
s1 += "%-7s" % ('I2C_' + inst + ',')
s1 += 'STM_PIN_DATA(STM_MODE_AF_OD, GPIO_NOPULL, '
r = result.split(' ')
for af in r:
s2 = s1 + af + ')},'
if parsed_pin[1] in PinLabel:
s2 += ' // Connected to ' + PinLabel[parsed_pin[1]]
s2 += '\n'
out_c_file.write(s2)
out_c_file.write( """ {NC, NC, 0}
};
""")
if ADD_DEVICE_IF:
out_c_file.write( "#endif\n" )
def print_pwm():
global ALTERNATE_DEFINITION
prev_p = ''
alt_index = 0
tim_dualcore = "NOT_KNOWN"
for EachTarget in TIM_DUALCORE_LIST:
if EachTarget in mcu_file:
tim_dualcore = TIM_DUALCORE_LIST[EachTarget]
for parsed_pin in pwm_list:
result = get_gpio_af_num(parsed_pin[1], parsed_pin[2])
commented_line = " "
if parsed_pin[1] in PinLabel:
if "STDIO_UART" in PinLabel[parsed_pin[1]]:
commented_line = "//"
if "RCC_OSC" in PinLabel[parsed_pin[1]]:
commented_line = "//"
if "%s_" % TIM_MST in parsed_pin[2]:
commented_line = "//"
if "%s_" % tim_dualcore in parsed_pin[2]:
commented_line = "//"
if commented_line != "//":
if parsed_pin[0] == prev_p:
prev_p = parsed_pin[0]
parsed_pin[0] += '_ALT%d' % alt_index
store_pin(parsed_pin[0], parsed_pin[0], "")
alt_index += 1
if alt_index > ALTERNATE_DEFINITION:
ALTERNATE_DEFINITION = alt_index
else:
prev_p = parsed_pin[0]
alt_index = 0
s1 = "%-17s" % (commented_line + " {" + parsed_pin[0] + ',')
# parsed_pin[2] : TIM2_CH1 / TIM15_CH1N
a = parsed_pin[2].split('_')
inst = a[0].replace("TIM", "PWM_")
# if len(inst) == 3:
# inst += '1'
s1 += "%-8s" % (inst + ',')
chan = a[1].replace("CH", "")
if chan.endswith('N'):
neg = ', 1'
chan = chan.strip('N')
else:
neg = ', 0'
s1 += 'STM_PIN_DATA_EXT(STM_MODE_AF_PP, GPIO_NOPULL, '
r = result.split(' ')
prev_s1 = ""
for af in r:
if s1 == prev_s1:
continue
else:
prev_s1 = s1
s2 = s1 + af + ', ' + chan + neg + ')}, // ' + parsed_pin[2]
if parsed_pin[1] in PinLabel:
s2 += ' // Connected to ' + PinLabel[parsed_pin[1]]
s2 += '\n'
out_c_file.write(s2)
out_c_file.write( """ {NC, NC, 0}
};
""")
if ADD_DEVICE_IF:
out_c_file.write( "#endif\n" )
def print_uart(l):
global ALTERNATE_DEFINITION
prev_p = ''
alt_index = 0
for parsed_pin in l:
result = get_gpio_af_num(parsed_pin[1], parsed_pin[2])
commented_line = " "
if parsed_pin[1] in PinLabel:
if "RCC_OSC" in PinLabel[parsed_pin[1]]:
commented_line = "//"
if commented_line != "//":
if parsed_pin[0] == prev_p:
prev_p = parsed_pin[0]
parsed_pin[0] += '_ALT%d' % alt_index
store_pin(parsed_pin[0], parsed_pin[0], "")
alt_index += 1
if alt_index > ALTERNATE_DEFINITION:
ALTERNATE_DEFINITION += 1
else:
prev_p = parsed_pin[0]
alt_index = 0
s1 = "%-17s" % (commented_line + " {" + parsed_pin[0] + ',')
# parsed_pin[2] : USART2_RX
b=parsed_pin[2].split('_')[0]
b = b.replace("UART", "UART_")
b = b.replace("USART", "UART_")
s1 += "%-10s" % (b[:len(b)-1] + b[len(b)-1:] + ',')
if 'STM32F10' in mcu_file and l == uartrx_list:
s1 += 'STM_PIN_DATA(STM_MODE_INPUT, GPIO_PULLUP, '
else:
s1 += 'STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, '
r = result.split(' ')
for af in r:
s2 = s1 + af + ')},'
if parsed_pin[1] in PinLabel:
s2 += ' // Connected to ' + PinLabel[parsed_pin[1]]
s2 += '\n'
out_c_file.write(s2)
out_c_file.write( """ {NC, NC, 0}
};
""")
if ADD_DEVICE_IF:
out_c_file.write( "#endif\n" )
def print_spi(l):
global ALTERNATE_DEFINITION
prev_p = ''
alt_index = 0
for parsed_pin in l:
result = get_gpio_af_num(parsed_pin[1], parsed_pin[2])
commented_line = " "
if parsed_pin[1] in PinLabel:
if "STDIO_UART" in PinLabel[parsed_pin[1]]:
commented_line = "//"
if "RCC_OSC" in PinLabel[parsed_pin[1]]:
commented_line = "//"
if commented_line != "//":
if parsed_pin[0] == prev_p:
prev_p = parsed_pin[0]
parsed_pin[0] += '_ALT%d' % alt_index
store_pin(parsed_pin[0], parsed_pin[0], "")
alt_index += 1
if alt_index > ALTERNATE_DEFINITION:
ALTERNATE_DEFINITION += 1
else:
prev_p = parsed_pin[0]
alt_index = 0
s1 = "%-17s" % (commented_line + " {" + parsed_pin[0] + ',')
# parsed_pin[2] : SPI1_MISO
instance=parsed_pin[2].split('_')[0].replace("SPI", "")
s1 += "%-7s" % ('SPI_' + instance + ',')
s1 += 'STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, '
r = result.split(' ')
for af in r:
s2 = s1 + af + ')},'
if parsed_pin[1] in PinLabel:
s2 += ' // Connected to ' + PinLabel[parsed_pin[1]]
s2 += '\n'
out_c_file.write(s2)
out_c_file.write( """ {NC, NC, 0}
};
""")
if ADD_DEVICE_IF:
out_c_file.write( "#endif\n" )
def print_can(l):
for parsed_pin in l:
result = get_gpio_af_num(parsed_pin[1], parsed_pin[2])
commented_line = " "
if parsed_pin[1] in PinLabel:
if "STDIO_UART" in PinLabel[parsed_pin[1]]:
commented_line = "//"
if "RCC_OSC" in PinLabel[parsed_pin[1]]:
commented_line = "//"
s1 = "%-17s" % (commented_line + " {" + parsed_pin[0] + ',')
# parsed_pin[2] : CAN_RX / CAN1_RX
parsed_pin[2] = parsed_pin[2].replace("FD", "")
instance = parsed_pin[2].split('_')[0].replace("CAN", "")
if len(instance) == 0:
instance = '1'
s1 += "%-7s" % ('CAN_' + instance + ',')
if 'STM32F10' in mcu_file and l == canrd_list:
s1 += 'STM_PIN_DATA(STM_MODE_INPUT, GPIO_NOPULL, '
else:
s1 += 'STM_PIN_DATA(STM_MODE_AF_PP, GPIO_NOPULL, '
r = result.split(' ')
for af in r:
s2 = s1 + af + ')},'
if parsed_pin[1] in PinLabel:
s2 += ' // Connected to ' + PinLabel[parsed_pin[1]]
s2 += '\n'
out_c_file.write(s2)
out_c_file.write( """ {NC, NC, 0}
};
""")
if ADD_DEVICE_IF:
out_c_file.write( "#endif\n" )
def print_qspi(l):
for parsed_pin in l:
result = get_gpio_af_num(parsed_pin[1], parsed_pin[2])
if "BK2" in parsed_pin[2]: # QSPI Bank 2 is not supported
continue
commented_line = " "
if parsed_pin[1] in PinLabel:
if "STDIO_UART" in PinLabel[parsed_pin[1]]:
commented_line = "//"
if "RCC_OSC" in PinLabel[parsed_pin[1]]:
commented_line = "//"
s1 = "%-16s" % (commented_line + " {" + parsed_pin[0] + ',')
# parsed_pin[2] : QUADSPI_BK1_IO3 / QUADSPI_CLK / QUADSPI_NCS
if "OCTOSPIM_P2" in parsed_pin[2]:
s1 += "%-8s" % 'QSPI_2,'
else:
s1 += "%-8s" % 'QSPI_1,'
result = result.replace("GPIO_AF10_OTG_FS", "GPIO_AF10_QSPI")
s1 += 'STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, ' + result +')},'
s1 += ' // ' + parsed_pin[2]
if parsed_pin[1] in PinLabel:
s1 += ' // Connected to ' + PinLabel[parsed_pin[1]]
s1 += '\n'
out_c_file.write(s1)
out_c_file.write( """ {NC, NC, 0}
};
""")
if ADD_DEVICE_IF:
out_c_file.write( "#endif\n" )
def print_ospi(l):
for parsed_pin in l:
result = get_gpio_af_num(parsed_pin[1], parsed_pin[2])
commented_line = " "
if parsed_pin[1] in PinLabel:
if "STDIO_UART" in PinLabel[parsed_pin[1]]:
commented_line = "//"
if "RCC_OSC" in PinLabel[parsed_pin[1]]:
commented_line = "//"
s1 = "%-16s" % (commented_line + " {" + parsed_pin[0] + ',')
# parsed_pin[2] : QUADSPI_BK1_IO3 / QUADSPI_CLK / QUADSPI_NCS
if "OCTOSPIM_P2" in parsed_pin[2]:
s1 += "%-8s" % 'OSPI_2,'
else:
s1 += "%-8s" % 'OSPI_1,'
# result = result.replace("GPIO_AF10_OTG_FS", "GPIO_AF10_QSPI")
s1 += 'STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, ' + result +')},'
s1 += ' // ' + parsed_pin[2]
if parsed_pin[1] in PinLabel:
s1 += ' // Connected to ' + PinLabel[parsed_pin[1]]
s1 += '\n'
out_c_file.write(s1)
out_c_file.write( """ {NC, NC, 0}
};
""")
if ADD_DEVICE_IF:
out_c_file.write( "#endif\n" )
def print_usb(lst):
use_hs_in_fs = False
nb_loop = 1
inst = "USB_FS"
if lst is usb_otgfs_list:
inst = "USB_FS"
elif lst is usb_otghs_list:
inst = "USB_HS"
nb_loop = 2
for nb in range(nb_loop):
for parsed_pin in lst:
result = get_gpio_af_num(parsed_pin[1], parsed_pin[2])
commented_line = " "
if parsed_pin[1] in PinLabel:
if "STDIO_UART" in PinLabel[parsed_pin[1]]:
commented_line = "//"
if "RCC_OSC" in PinLabel[parsed_pin[1]]:
commented_line = "//"
if "_SOF" in parsed_pin[2] or "_NOE" in parsed_pin[2]:
commented_line = "//"
if lst is usb_otghs_list:
if nb == 0:
if "ULPI" in parsed_pin[2]:
continue
elif not use_hs_in_fs:
out_c_file.write("#if (MBED_CONF_TARGET_USB_SPEED == USE_USB_HS_IN_FS)\n")
use_hs_in_fs = True
else:
if "ULPI" not in parsed_pin[2]:
continue
elif use_hs_in_fs:
out_c_file.write("#else /* MBED_CONF_TARGET_USB_SPEED */\n")
use_hs_in_fs = False
s1 = "%-16s" % (commented_line + " {" + parsed_pin[0] + ',')
# 2nd element is the USB_XXXX signal
if not parsed_pin[2].startswith("USB_D") and "VBUS" not in parsed_pin[2]:
if "ID" not in parsed_pin[2]:
s1 += inst + ", STM_PIN_DATA(STM_MODE_AF_PP, GPIO_PULLUP, "
else:
# ID pin: AF_PP + PULLUP
s1 += inst + ", STM_PIN_DATA(STM_MODE_AF_OD, GPIO_PULLUP, "
else:
# USB_DM/DP and VBUS: INPUT + NOPULL
s1 += inst + ", STM_PIN_DATA(STM_MODE_INPUT, GPIO_NOPULL, "
if result == "NOTFOUND":
s1 += "0)},"
else:
r = result.split(" ")
for af in r:
s1 += af + ")},"
s1 += " // " + parsed_pin[2]
if parsed_pin[1] in PinLabel:
s1 += ' // Connected to ' + PinLabel[parsed_pin[1]]
s1 += "\n"
out_c_file.write(s1)
if lst:
if lst is usb_otghs_list:
out_c_file.write("#endif /* MBED_CONF_TARGET_USB_SPEED */\n")
out_c_file.write(""" {NC, NC, 0}
};
""")
if ADD_DEVICE_IF:
out_c_file.write( "#endif\n" )
def print_pin_list(pin_list):
if ALTERNATE_DEFINITION > 0:
line_to_write = (""" ALT0 = 0x100,""")
if ALTERNATE_DEFINITION > 1:
line_to_write += """
ALT1 = 0x200,"""
if ALTERNATE_DEFINITION > 2:
line_to_write += """
ALT2 = 0x300,"""
if ALTERNATE_DEFINITION > 3:
line_to_write += """
ALT3 = 0x400,"""
if ALTERNATE_DEFINITION > 4:
line_to_write += """
ALT4 = 0x500,"""
line_to_write += """
} ALTx;
typedef enum {
"""
out_h_file.write(line_to_write)
pin_list.sort(key=natural_sortkey)
previous_pin = ""
for parsed_pin in pin_list:
print_debug("pin %s => %s" % (parsed_pin, parsed_pin[0]))
if parsed_pin[0] == previous_pin:
continue
previous_pin = parsed_pin[0]
if "_ALT" in parsed_pin[0]:
s1 = " %-10s = %-5s | %s, // same pin used for alternate HW\n" % (parsed_pin[0], parsed_pin[0].split('_A')[0], parsed_pin[0].split('_')[2])
elif len(parsed_pin[0]) > 4 and "C" == parsed_pin[0][4]:
s1 = " %-10s = %-5s | DUAL_PAD, // dual pad\n" % (parsed_pin[0], parsed_pin[0].split('_A')[0].replace("PC", "PP").replace("C", "").replace("PP", "PC"))
else:
pin_value = 0
if "PA" in parsed_pin[0]:
pin_value = 0
elif "PB" in parsed_pin[0]:
pin_value = 0x10
elif "PC" in parsed_pin[0]:
pin_value = 0x20
elif "PD" in parsed_pin[0]:
pin_value = 0x30
elif "PE" in parsed_pin[0]:
pin_value = 0x40
elif "PF" in parsed_pin[0]:
pin_value = 0x50
elif "PG" in parsed_pin[0]:
pin_value = 0x60
elif "PH" in parsed_pin[0]:
pin_value = 0x70
elif "PI" in parsed_pin[0]:
pin_value = 0x80
elif "PJ" in parsed_pin[0]:
pin_value = 0x90
elif "PK" in parsed_pin[0]:
pin_value = 0xA0
elif "PZ" in parsed_pin[0]:
pin_value = 0x0 # to update
else:
print("error in print_pin_list with pin %s" % parsed_pin[0])
pin_value += int(parsed_pin[0].split('_')[1])
s1 = " %-10s = 0x%02X,\n" % (parsed_pin[0], pin_value)
out_h_file.write(s1)
out_h_file.write("""\n /**** ADC internal channels ****/
ADC_TEMP = 0xF0, // Internal pin virtual value
ADC_VREF = 0xF1, // Internal pin virtual value
ADC_VBAT = 0xF2, // Internal pin virtual value
#ifdef TARGET_FF_ARDUINO_UNO
// Arduino Uno (Rev3) pins
ARDUINO_UNO_A0 = Px_x,
ARDUINO_UNO_A1 = Px_x,
ARDUINO_UNO_A2 = Px_x,
ARDUINO_UNO_A3 = Px_x,
ARDUINO_UNO_A4 = Px_x,
ARDUINO_UNO_A5 = Px_x,
ARDUINO_UNO_D0 = Px_x,
ARDUINO_UNO_D1 = Px_x,
ARDUINO_UNO_D2 = Px_x,
ARDUINO_UNO_D3 = Px_x,
ARDUINO_UNO_D4 = Px_x,
ARDUINO_UNO_D5 = Px_x,
ARDUINO_UNO_D6 = Px_x,
ARDUINO_UNO_D7 = Px_x,
ARDUINO_UNO_D8 = Px_x,
ARDUINO_UNO_D9 = Px_x,
ARDUINO_UNO_D10 = Px_x,
ARDUINO_UNO_D11 = Px_x,
ARDUINO_UNO_D12 = Px_x,
ARDUINO_UNO_D13 = Px_x,
ARDUINO_UNO_D14 = Px_x,
ARDUINO_UNO_D15 = Px_x,
#endif
""")
s = ("""
// STDIO for console print
#ifdef MBED_CONF_TARGET_STDIO_UART_TX
CONSOLE_TX = MBED_CONF_TARGET_STDIO_UART_TX,
#else
CONSOLE_TX = %s,
#endif
#ifdef MBED_CONF_TARGET_STDIO_UART_RX
CONSOLE_RX = MBED_CONF_TARGET_STDIO_UART_RX,
#else
CONSOLE_RX = %s,
#endif
""" % (re.sub(r'(P.)', r'\1_', STDIO_list[0]), re.sub(r'(P.)', r'\1_', STDIO_list[1])))
out_h_file.write(s)
def print_h_file(pin_list, comment):
global ALTERNATE_DEFINITION
pin_list.sort(key=natural_sortkey2)
if len(pin_list) > 0:
line_to_write = ("\n /**** %s pins ****/\n" % comment)
out_h_file.write(line_to_write)
prev_s = ''
alt_index = 0
for parsed_pin in pin_list:
if parsed_pin[2] == prev_s:
prev_s = parsed_pin[2]
parsed_pin[2] += '_ALT%d' % alt_index
store_pin(parsed_pin[0], parsed_pin[0], "")
alt_index += 1
if alt_index > ALTERNATE_DEFINITION:
ALTERNATE_DEFINITION += 1
else:
prev_s = parsed_pin[2]
alt_index = 0
line_to_write = " %s = %s,\n" % (parsed_pin[2].replace("-", "_"), parsed_pin[0])
out_h_file.write(line_to_write)
tokenize = re.compile(r"(\d+)|(\D+)").findall
def natural_sortkey(list_2_elem):
return tuple(int(num) if num else alpha for num, alpha in tokenize(list_2_elem[0]))
def natural_sortkey2(list_2_elem):
return tuple(int(num) if num else alpha for num, alpha in tokenize(list_2_elem[2]))
def natural_sortkey_uart(list_2_elem):
return tuple(int(num) if num else alpha for num, alpha in tokenize(list_2_elem[2].replace("USART", "UART").replace("LPUART", "ZUART")))
def natural_sortkey_i2c(list_2_elem):
return tuple(int(num) if num else alpha for num, alpha in tokenize(list_2_elem[2].replace("FMPI2C", "ZFMPI2C")))
def sort_my_lists():
gpio_list.sort(key=natural_sortkey)
adclist.sort(key=natural_sortkey)
daclist.sort(key=natural_sortkey)
i2cscl_list.sort(key=natural_sortkey_i2c) # first sort on name column
i2csda_list.sort(key=natural_sortkey_i2c) # first sort on name column
i2cscl_list.sort(key=natural_sortkey)
i2csda_list.sort(key=natural_sortkey)
pwm_list.sort(key=natural_sortkey2) # first sort on name column
pwm_list.sort(key=natural_sortkey)
uarttx_list.sort(key=natural_sortkey_uart) # first sort on name column
uartrx_list.sort(key=natural_sortkey_uart) # first sort on name column
uartcts_list.sort(key=natural_sortkey_uart) # first sort on name column
uartrts_list.sort(key=natural_sortkey_uart) # first sort on name column
uarttx_list.sort(key=natural_sortkey)
uartrx_list.sort(key=natural_sortkey)
uartcts_list.sort(key=natural_sortkey)
uartrts_list.sort(key=natural_sortkey)
spimosi_list.sort(key=natural_sortkey)
spimiso_list.sort(key=natural_sortkey)
spissel_list.sort(key=natural_sortkey)
spisclk_list.sort(key=natural_sortkey)
cantd_list.sort(key=natural_sortkey)
canrd_list.sort(key=natural_sortkey)
quadspidata0_list.sort(key=natural_sortkey)
quadspidata1_list.sort(key=natural_sortkey)
quadspidata2_list.sort(key=natural_sortkey)
quadspidata3_list.sort(key=natural_sortkey)
quadspisclk_list.sort(key=natural_sortkey)
quadspissel_list.sort(key=natural_sortkey)
octospidata0_list.sort(key=natural_sortkey)
octospidata1_list.sort(key=natural_sortkey)
octospidata2_list.sort(key=natural_sortkey)
octospidata3_list.sort(key=natural_sortkey)
octospidata4_list.sort(key=natural_sortkey)
octospidata5_list.sort(key=natural_sortkey)
octospidata6_list.sort(key=natural_sortkey)
octospidata7_list.sort(key=natural_sortkey)
octospidqs_list.sort(key=natural_sortkey)
octospisclk_list.sort(key=natural_sortkey)
octospissel_list.sort(key=natural_sortkey)
usb_list.sort(key=natural_sortkey)
usb_otgfs_list.sort(key=natural_sortkey)
usb_otghs_list.sort(key=natural_sortkey)
def clean_all_lists():
del gpio_list[:]
del adclist[:]
del daclist[:]
del i2cscl_list[:]
del i2csda_list[:]
del pwm_list[:]
del uarttx_list[:]
del uartrx_list[:]
del uartcts_list[:]
del uartrts_list[:]
del spimosi_list[:]
del spimiso_list[:]
del spissel_list[:]
del spisclk_list[:]
del cantd_list[:]
del canrd_list[:]
del eth_list[:]
del quadspidata0_list[:]
del quadspidata1_list[:]
del quadspidata2_list[:]
del quadspidata3_list[:]
del quadspisclk_list[:]
del quadspissel_list[:]
del octospidata0_list[:]
del octospidata1_list[:]
del octospidata2_list[:]
del octospidata3_list[:]
del octospidata4_list[:]
del octospidata5_list[:]
del octospidata6_list[:]
del octospidata7_list[:]
del octospidqs_list[:]
del octospisclk_list[:]
del octospissel_list[:]
del usb_list[:]
del usb_otgfs_list[:]
del usb_otghs_list[:]
del osc_list[:]
del sys_list[:]
def parse_pins():
global DUAL_PAD
pinregex = r"^(P[A-Z][0-9][0-5]?[_]?[C]?)"
itemlist = xml_mcu.getElementsByTagName("Pin")
for s in itemlist:
if "Variant" in s.attributes:
continue
pinregex_match = re.match(pinregex, s.attributes["Name"].value)
if pinregex_match:
pin = (
pinregex_match.group(0)[:2] + "_" + pinregex_match.group(0)[2:].replace("_", "")
) # pin formatted P<port>_<number>: PF_O
name = s.attributes["Name"].value.strip() # full name: "PF0 / OSC_IN"
if "_C" in name:
DUAL_PAD = True
if s.attributes["Type"].value == "I/O":
if "-" in s.attributes["Name"].value:
store_pin(pin, name, s.attributes["Name"].value)
else:
store_pin(pin, name, "")
if DUAL_PAD:
if "_C" in name:
store_pin(pin.replace("2C", "2").replace("3C", "3"), name, "")
else:
continue
siglist = s.getElementsByTagName("Signal")
for a in siglist:
sig = a.attributes["Name"].value.strip()
if "ADC" in sig:
store_adc(pin, name, sig)
if all(["DAC" in sig, "_OUT" in sig]):
store_dac(pin, name, sig)
if "I2C" in sig:
store_i2c(pin, name, sig)
if re.match("^TIM", sig) is not None: # ignore HRTIM
store_pwm(pin, name, sig)
if re.match("^(LPU|US|U)ART", sig) is not None:
store_uart(pin, name, sig)
if "SPI" in sig:
store_spi(pin, name, sig)
if "CAN" in sig:
store_can(pin, name, sig)
if "ETH" in sig:
store_eth(pin, name, sig)
if "QUADSPI" in sig or "OCTOSPI" in sig:
store_qspi(pin, name, sig)
if "OCTOSPI" in sig:
store_ospi(pin, name, sig)
if "USB" in sig:
store_usb(pin, name, sig)
if "RCC_OSC" in sig:
store_osc(pin, name, sig)
if "SYS_" in sig or "PWR_" in sig or "DEBUG_" in sig:
store_sys(pin, name, sig)
PinData = {}
PinLabel = {}
PinPuPd = {}
def parse_board_file(file_name):
global MCU_USERNAME
print(" * Board file: '%s'" % file_name)
board_file = open(file_name, "r")
ioc_pin_pattern = re.compile(r'(.*)\.([\w]*)=(.*)')
for line in board_file.readlines():
ioc_pin_match = re.match(ioc_pin_pattern, line)
if ioc_pin_match:
if ioc_pin_match.groups()[0] in PinData:
PinData[ioc_pin_match.groups()[0]][ioc_pin_match.groups()[1]] = ioc_pin_match.groups()[2]
else:
PinData[ioc_pin_match.groups()[0]] = {}
PinData[ioc_pin_match.groups()[0]][ioc_pin_match.groups()[1]] = ioc_pin_match.groups()[2]
ioc_mcu_match = re.match(r'Mcu\.Name=(.*)', line)
if ioc_mcu_match:
mcu_list.append("%s.xml" % ioc_mcu_match.groups()[0])
ioc_mcu_match = re.match(r'Mcu\.UserName=(.*)', line)
if ioc_mcu_match:
MCU_USERNAME = ioc_mcu_match.groups()[0]
board_file.close()
for EachPin in PinData:
PinLabel[EachPin] = ""
if "Signal" in PinData[EachPin]:
PinLabel[EachPin] = PinData[EachPin]["Signal"]
if "GPIO_Label" in PinData[EachPin]:
PinLabel[EachPin] = PinData[EachPin]["GPIO_Label"]
if "GPIO_PuPdOD" in PinData[EachPin]:
if PinData[EachPin]["GPIO_PuPdOD"] == "GPIO_PULLUP":
PinPuPd[EachPin] = "PULLUP"
elif PinData[EachPin]["GPIO_PuPdOD"] == "GPIO_NOPULL":
pass
else:
print("!!! error SCRIPT ISSUE with %s for %s" % (PinData[EachPin]["GPIO_PuPdOD"], EachPin))
if any(led in PinLabel[EachPin].upper() for led in
["LED", "LD1", "LD2", "LD3", "LD4", "LD5", "LD6", "LD7", "LD8", "LD9"]):
LED_list.append(EachPin)
elif any(button in PinLabel[EachPin].upper() for button in ["BUTTON", "B_USER", "BTN"]):
BUTTON_list.append(EachPin)
uart_hw_option = "NO_NEED"
for each_target in VCP_UART_LIST:
if each_target in file_name:
uart_hw_option = VCP_UART_LIST[each_target]
try:
if "STLK_RX" in PinLabel[EachPin] or "STLK_TX" in PinLabel[EachPin]:
# Patch waiting for CubeMX correction
if "RX" in PinData[EachPin]["Signal"]:
PinLabel[EachPin] = "STDIO_UART_RX"
STDIO_list[1] = EachPin
else:
PinLabel[EachPin] = "STDIO_UART_TX"
STDIO_list[0] = EachPin
elif "USART_RX" in PinLabel[EachPin]:
PinLabel[EachPin] = "STDIO_UART_RX"
STDIO_list[1] = EachPin
elif "USART_TX" in PinLabel[EachPin]:
PinLabel[EachPin] = "STDIO_UART_TX"
STDIO_list[0] = EachPin
elif "VCP_RX" in PinLabel[EachPin]:
PinLabel[EachPin] = "STDIO_UART_RX"
STDIO_list[1] = EachPin
elif "VCP_TX" in PinLabel[EachPin]:
PinLabel[EachPin] = "STDIO_UART_TX"
STDIO_list[0] = EachPin
elif "ST_LINK_UART1_RX" in PinLabel[EachPin]:
PinLabel[EachPin] = "STDIO_UART_RX"
STDIO_list[1] = EachPin
elif "ST_LINK_UART1_TX" in PinLabel[EachPin]:
PinLabel[EachPin] = "STDIO_UART_TX"
STDIO_list[0] = EachPin
elif "ST-LINK-UART1_RX" in PinLabel[EachPin]:
PinLabel[EachPin] = "STDIO_UART_RX"
STDIO_list[1] = EachPin
elif "ST-LINK-UART1_TX" in PinLabel[EachPin]:
PinLabel[EachPin] = "STDIO_UART_TX"
STDIO_list[0] = EachPin
elif "STLINK_RX" in PinLabel[EachPin] or "STLINK_TX" in PinLabel[EachPin]:
# Patch waiting for CubeMX correction
if "RX" in PinData[EachPin]["Signal"]:
PinLabel[EachPin] = "STDIO_UART_RX"
STDIO_list[1] = EachPin
else:
PinLabel[EachPin] = "STDIO_UART_TX"
STDIO_list[0] = EachPin
elif "%s_RX" % uart_hw_option in PinLabel[EachPin]:
PinLabel[EachPin] = "STDIO_UART_RX"
STDIO_list[1] = EachPin
elif "%s_TX" % uart_hw_option in PinLabel[EachPin]:
STDIO_list[0] = EachPin
PinLabel[EachPin] = "STDIO_UART_TX"
elif "_RESERVED" in PinLabel[EachPin]:
PinLabel[EachPin] = "RESERVED_RADIO"
except:
pass
# main
print ("\nScript version %s" % GENPINMAP_VERSION)
cur_dir = os.getcwd()
PeripheralPins_c_filename = "PeripheralPins.c"
PinNames_h_filename = "PinNames.h"
parser = argparse.ArgumentParser(
description=textwrap.dedent('''\
Script will generate %s thanks to the xml files description available in STM32_open_pin_data GitHub repo\n
More information in targets/TARGET_STM/README.md''' % (PeripheralPins_c_filename)),
epilog=textwrap.dedent('''\
Once generated, you have to check and comment pins that can not be used (specific HW, internal ADC channels, remove PWM using us ticker timer, ...)
'''),
formatter_class=RawTextHelpFormatter)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-l", "--list", help="list available mcu xml files description in STM32CubeMX", action="store_true")
group.add_argument("-b", "--boards", help="list available boards description in STM32CubeMX", action="store_true")
group.add_argument("-m", "--mcu", metavar='xml', help=textwrap.dedent('''\
specify the mcu xml file description in STM32CubeMX to use (use double quotes).
Parameter can be a filter like L496 if you want to parse all L496 chips (-m STM32 to parse all).
'''))
group.add_argument("-t", "--target", metavar='HW', help=textwrap.dedent('''\
specify the board file description in STM32CubeMX to use (use double quotes).
Parameter can be a filter like L496 (only the first file found will be parsed).
'''))
group.add_argument("-c", "--custom", help=textwrap.dedent('''\
specify a custom board .ioc file description to use (use double quotes).
'''))
parser.add_argument("-g", "--gpio", help="Add GPIO PinMap table", action="store_true")
parser.add_argument("-n", "--nopull", help="Avoid STM32_open_pin_data git pull", action="store_true")
parser.add_argument("-f", "--flat", help="All targets stored in targets_custom/TARGET_STM/", action="store_true")
args = parser.parse_args()
print ("\nChecking STM32_open_pin_data repo...")
if not os.path.exists("STM32_open_pin_data"):
print("*** git clone https://github.com/STMicroelectronics/STM32_open_pin_data.git ***")
try:
CONSOLE = subprocess.check_output(["git", "clone", r"https://github.com/STMicroelectronics/STM32_open_pin_data.git"], stderr=subprocess.STDOUT)
print("*** git clone done\n")
# print(CONSOLE)
except:
print("!!! Repo clone error !!!")
else:
if args.nopull:
print(" ... skipped\n")
else:
try:
os.chdir("STM32_open_pin_data")
CONSOLE = subprocess.check_output(["git", "pull"], stderr=subprocess.STDOUT).decode('ascii')
print("\t%s" % CONSOLE)
os.chdir("..")
except:
print("!!! git pull issue !!!")
exit(3)
cubemxdirMCU = os.path.join("STM32_open_pin_data", "mcu")
cubemxdirIP = os.path.join("STM32_open_pin_data", "mcu", "IP")
cubemxdirBOARDS = os.path.join("STM32_open_pin_data", "boards")
os.chdir("STM32_open_pin_data")
# print("*** git tag ***")
CONSOLE = subprocess.check_output(["git", "tag"], stderr=subprocess.STDOUT).decode('ascii')
# print(CONSOLE)
VERSION_LIST=CONSOLE.splitlines()
# print("\t%s" % VERSION_LIST)
cubemx_db_version = VERSION_LIST[-1]
os.chdir("..")
print ("STM32_open_pin_data DB version %s\n" % cubemx_db_version)
if args.gpio:
ADD_GPIO_PINMAP = 1
if args.flat:
FLAT_DIRECTORY = 1
if args.list:
file_count = 0
for f in fnmatch.filter(os.listdir(cubemxdirMCU), "STM32*.xml"):
print(f)
file_count += 1
print()
print("%i available xml files description" % file_count)
sys.exit(0)
if args.boards:
NucleoFileCount = 0
DiscoFileCount = 0
for f in fnmatch.filter(os.listdir(cubemxdirBOARDS), '*AllConfig.ioc'):
print(f)
if "Nucleo" in f:
NucleoFileCount += 1
elif "Discovery" in f:
DiscoFileCount += 1
print()
print("%2i available Nucleo files description" % NucleoFileCount)
print("%2i available Disco files description" % DiscoFileCount)
sys.exit(0)
if args.mcu:
#check input file exists
if os.path.isfile(os.path.join(cubemxdirMCU, args.mcu)):
mcu_list.append(args.mcu)
else:
mcu_list = fnmatch.filter(os.listdir(cubemxdirMCU), '*%s*' % args.mcu)
if len(mcu_list) == 0:
print (" ! ! ! " + args.mcu + " file not found")
print (" ! ! ! Check in " + cubemxdirMCU + " the correct name of this file")
print (" ! ! ! You may use double quotes for this file if it contains special characters")
sys.exit(1)
if args.target:
board_file_name = os.path.join(cubemxdirBOARDS, args.target)
if not(os.path.isfile(board_file_name)):
board_list = fnmatch.filter(os.listdir(cubemxdirBOARDS), '*%s*AllConfig.ioc' % args.target)
if len(board_list) == 0:
print (" ! ! ! No file contains " + args.target)
print (" ! ! ! Check in " + cubemxdirBOARDS + " the correct filter to apply")
sys.exit(1)
elif len(board_list) > 1:
print (" ! ! ! Multiple files contains " + args.target)
for board_elem in board_list: print (board_elem)
print (" ! ! ! Only the first one will be parsed\n")
board_file_name = os.path.join(cubemxdirBOARDS,board_list[0])
if not (os.path.isfile(board_file_name)):
print (" ! ! ! " + args.target + " file not found")
print (" ! ! ! Check in " + cubemxdirBOARDS + " the correct name of this file")
print (" ! ! ! You may use double quotes for this file if it contains special characters")
sys.exit(1)
# Add some hardcoded check
if "J01_" in board_file_name:
print("J01_Discovery_STM32F4-DISCO-AudioPack_STM32F407V_Board not parsed")
sys.exit(0)
elif "G00_" in board_file_name:
print("G00_Nucleo_NUCLEO-WB52VGY_STM32WB52VGY_Board not parsed")
sys.exit(0)
elif "C40_" in board_file_name:
print("C40_Discovery_STM32F4DISCOVERY_STM32F407VG_Board replaced by C47_Discovery_STM32F407G-DISC1_STM32F407VG_Board")
sys.exit(0)
elif "P-NUCLEO-WB55" in board_file_name:
print("Same board as NUCLEO-WB55 (J02)")
sys.exit(0)
elif "MultiToSingleCore_Board" in board_file_name:
print("Same board as PL0_Nucleo_NUCLEO-WL55JC1_STM32WL55JCI_Board_AllConfig.ioc")
sys.exit(0)
elif "WL55JC2" in board_file_name:
print("Same board as PL0_Nucleo_NUCLEO-WL55JC1_STM32WL55JCI_Board_AllConfig.ioc")
sys.exit(0)
elif "B-L475E-IOT01A2" in board_file_name:
print("Same board as B-L475E-IOT01A1 (42)")
sys.exit(0)
elif "USBDongle" in board_file_name:
print("USB dongle not parsed")
sys.exit(0)
elif "TrustZoneEnabled" in board_file_name:
print("TrustZoneEnabled boards not parsed")
sys.exit(0)
parse_board_file(board_file_name)
if "Nucleo" in board_file_name:
TARGET_NAME += "NUCLEO_"
elif "Discovery" in board_file_name:
TARGET_NAME += "DISCO_"
elif "Evaluation" in board_file_name:
TARGET_NAME += "EVAL_"
m = re.search(r'STM32([MFLGWH][\w]*)_Board', board_file_name)
if m:
TARGET_NAME += "%s" % m.group(1)
# specific case
if "-P" in board_file_name:
TARGET_NAME += "_P"
elif "-Q" in board_file_name:
TARGET_NAME += "_Q"
target_rename = { # manual renaming for some boards
"DISCO_L072C": "DISCO_L072CZ_LRWAN1",
"DISCO_L475V": "DISCO_L475VG_IOT01A",
"DISCO_L4S5V": "B_L4S5I_IOT01A",
"DISCO_G071RBT": "DISCO_G071RB",
"DISCO_L4R9A": "DISCO_L4R9I",
"NUCLEO_WB55R": "NUCLEO_WB55RG",
"NUCLEO_WL55JCI": "NUCLEO_WL55JC",
"NUCLEO_H743ZIT": "NUCLEO_H743ZI2",
"NUCLEO_H7A3ZIT_Q": "NUCLEO_H7A3ZI_Q",
"DISCO_F0DISCOVERY_STM32F051R8": "DISCO_F051R8",
"DISCO_F3DISCOVERY_STM32F303VC": "DISCO_F303VC",
"DISCO_F469NIH": "DISCO_F469NI",
"DISCO_F412ZGT": "DISCO_F412ZG",
"DISCO_F746NGH": "DISCO_F746NG",
"DISCO_F769NIH": "DISCO_F769NI",
"DISCO_H747XIH": "DISCO_H747I"
}
if TARGET_NAME in target_rename:
TARGET_NAME = target_rename[TARGET_NAME]
if "DISC1" in board_file_name:
TARGET_NAME += "_DISC1"
else:
sys.exit(1)
# Parse the user's custom board .ioc file
if args.custom:
parse_board_file(args.custom)
for mcu_file in mcu_list:
TargetNameList = []
# print("--- mcu_file %s ---" %(mcu_file))
m2 = re.match("(.*)\(([\w])\-([\w])\)(.*)", mcu_file)
m3 = re.match("(.*)\(([\w])\-([\w])\-([\w])\)(.*)", mcu_file)
m4 = re.match("(.*)\(([\w])\-([\w])\-([\w])\-([\w])\)(.*)", mcu_file)
m5 = re.match("(.*)\(([\w])\-([\w])\-([\w])\-([\w])\-([\w])\)(.*)", mcu_file)
if m2:
new_mcu_file = m2.group(1) + m2.group(2) + m2.group(4)
TargetNameList.append(os.path.splitext(new_mcu_file)[0])
new_mcu_file = m2.group(1) + m2.group(3) + m2.group(4)
TargetNameList.append(os.path.splitext(new_mcu_file)[0])
elif m3:
new_mcu_file = m3.group(1) + m3.group(2) + m3.group(5)
TargetNameList.append(os.path.splitext(new_mcu_file)[0])
new_mcu_file = m3.group(1) + m3.group(3) + m3.group(5)
TargetNameList.append(os.path.splitext(new_mcu_file)[0])
new_mcu_file = m3.group(1) + m3.group(4) + m3.group(5)
TargetNameList.append(os.path.splitext(new_mcu_file)[0])
elif m4:
new_mcu_file = m4.group(1) + m4.group(2) + m4.group(6)
TargetNameList.append(os.path.splitext(new_mcu_file)[0])
new_mcu_file = m4.group(1) + m4.group(3) + m4.group(6)
TargetNameList.append(os.path.splitext(new_mcu_file)[0])
new_mcu_file = m4.group(1) + m4.group(4) + m4.group(6)
TargetNameList.append(os.path.splitext(new_mcu_file)[0])
new_mcu_file = m4.group(1) + m4.group(5) + m4.group(6)
TargetNameList.append(os.path.splitext(new_mcu_file)[0])
elif m5:
new_mcu_file = m5.group(1) + m5.group(2) + m5.group(7)
TargetNameList.append(os.path.splitext(new_mcu_file)[0])
new_mcu_file = m5.group(1) + m5.group(3) + m5.group(7)
TargetNameList.append(os.path.splitext(new_mcu_file)[0])
new_mcu_file = m5.group(1) + m5.group(4) + m5.group(7)
TargetNameList.append(os.path.splitext(new_mcu_file)[0])
new_mcu_file = m5.group(1) + m5.group(5) + m5.group(7)
TargetNameList.append(os.path.splitext(new_mcu_file)[0])
new_mcu_file = m5.group(1) + m5.group(6) + m5.group(7)
TargetNameList.append(os.path.splitext(new_mcu_file)[0])
elif "(" in mcu_file:
print("!!! error SCRIPT ISSUE with file %s" % mcu_file)
sys.exit(4)
else:
TargetNameList.append(os.path.splitext(mcu_file)[0])
for EachTargetName in TargetNameList:
# print("EachTargetName %s" % EachTargetName)
m = re.match("(STM32[\w]{2})", EachTargetName)
if m:
TARGET_FAMILY = m.group(0)
else:
print("!!! no TARGET_FAMILY")
sys.exit(2)
SearchSubFamily = EachTargetName[:9] + 'x' + EachTargetName[10:]
m = re.match("(STM32[\w]{6})", SearchSubFamily)
if m:
TARGET_SUBFAMILY = m.group(0)
else:
print("!!! no TARGET_SUBFAMILY")
sys.exit(2)
if args.mcu:
m = re.match("(STM32[\w]{7})", EachTargetName)
if m:
out_path = os.path.join(cur_dir, 'targets_custom', 'TARGET_STM', 'TARGET_%s' %TARGET_FAMILY, 'TARGET_%s' %TARGET_SUBFAMILY, 'TARGET_%s' % m.group(0))
if EachTargetName.endswith('A'):
out_path += "_A"
elif EachTargetName.endswith('P'):
out_path += "_P"
elif EachTargetName.endswith('Q'):
out_path += "_Q"
elif EachTargetName.endswith('N'):
out_path += "_N"
elif EachTargetName.endswith('S'):
out_path += "_S"
elif EachTargetName.endswith('X'):
out_path += "_X"
else:
print("!!! Warning output directory not found")
sys.exit(8)
else:
if EachTargetName == MCU_USERNAME:
if FLAT_DIRECTORY == 0:
out_path = os.path.join(cur_dir, 'targets_custom', 'TARGET_STM', 'TARGET_%s' % TARGET_FAMILY, 'TARGET_%s' % TARGET_SUBFAMILY, 'TARGET_%s' % TARGET_NAME)
else:
out_path = os.path.join(cur_dir, 'targets_custom', 'TARGET_STM', 'TARGET_%s' % TARGET_NAME)
else:
continue
print(" * Output directory: %s" % out_path)
if not (os.path.isdir(out_path)):
os.makedirs(out_path)
else:
print("!!! %s already exist" % out_path)
input_file_name = os.path.join(cubemxdirMCU, mcu_file)
print(" * Generating %s and %s with '%s'" % (PeripheralPins_c_filename, PinNames_h_filename, input_file_name))
output_cfilename = os.path.join(out_path, PeripheralPins_c_filename)
output_hfilename = os.path.join(out_path, PinNames_h_filename)
if os.path.isfile(output_cfilename):
print_debug(" * Requested %s file already exists and will be overwritten" % PeripheralPins_c_filename)
os.remove(output_cfilename)
out_c_file = open(output_cfilename, 'w')
out_h_file = open(output_hfilename, 'w')
#open input file
try:
xml_mcu = parse(input_file_name)
except:
# Patch waiting for CubeMX correction
if "STM32F042K6Tx" in input_file_name:
input_file_name = os.path.join(cubemxdirMCU, "STM32F042K(4-6)Tx.xml")
xml_mcu = parse(input_file_name)
elif "STM32F429Z" in input_file_name:
input_file_name = os.path.join(cubemxdirMCU, "STM32F429ZITx.xml")
xml_mcu = parse(input_file_name)
elif "STM32F746Z" in input_file_name:
input_file_name = os.path.join(cubemxdirMCU, "STM32F746ZGTx.xml")
xml_mcu = parse(input_file_name)
elif "STM32F767Z" in input_file_name:
input_file_name = os.path.join(cubemxdirMCU, "STM32F767ZGTx.xml")
xml_mcu = parse(input_file_name)
elif "STM32L011K4Tx" in input_file_name:
input_file_name = os.path.join(cubemxdirMCU, "STM32L011K(3-4)Tx.xml")
xml_mcu = parse(input_file_name)
elif "STM32L432KCUx" in input_file_name:
input_file_name = os.path.join(cubemxdirMCU, "STM32L432K(B-C)Ux.xml")
xml_mcu = parse(input_file_name)
elif "STM32F746N" in input_file_name:
input_file_name = os.path.join(cubemxdirMCU, "STM32F746NGHx.xml")
xml_mcu = parse(input_file_name)
else:
print ("\n ! ! ! Error in CubeMX file. File " + input_file_name + " doesn't exist")
print (" ! ! ! Check in " + cubemxdirMCU)
sys.exit(1)
gpiofile = find_gpio_file()
if gpiofile == "ERROR":
print("error: Could not find GPIO file")
sys.exit(1)
xml_gpio = parse(os.path.join(cubemxdirIP, "GPIO-" + gpiofile + "_Modes.xml"))
print (" * GPIO file: " + os.path.join(cubemxdirIP, "GPIO-" + gpiofile + "_Modes.xml"))
find_tim_mst()
parse_pins()
sort_my_lists()
print_header()
print_all_lists()
print_footer()
nb_pin = (len(gpio_list))
nb_connected_pin = len(PinLabel)
print (" * I/O pins found: %i connected: %i\n" % (nb_pin, nb_connected_pin))
clean_all_lists()
out_c_file.close()
out_h_file.close()
| apache-2.0 | 4,804,557,385,279,924,000 | 37.421826 | 172 | 0.527993 | false |
xuhdev/nikola | nikola/plugins/command/auto/__init__.py | 1 | 19075 | # -*- coding: utf-8 -*-
# Copyright © 2012-2017 Chris Warrick, Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Automatic rebuilds for Nikola."""
import mimetypes
import datetime
import re
import os
import sys
import subprocess
import asyncio
try:
import aiohttp
from aiohttp import web
from aiohttp.web_urldispatcher import StaticResource
from yarl import unquote
from aiohttp.web_exceptions import HTTPNotFound, HTTPForbidden
from aiohttp.web_response import Response
from aiohttp.web_fileresponse import FileResponse
except ImportError:
aiohttp = web = unquote = None
StaticResource = HTTPNotFound = HTTPForbidden = Response = FileResponse = object
try:
from watchdog.observers import Observer
except ImportError:
Observer = None
import webbrowser
import pkg_resources
from nikola.plugin_categories import Command
from nikola.utils import dns_sd, req_missing, get_logger, get_theme_path
LRJS_PATH = os.path.join(os.path.dirname(__file__), 'livereload.js')
if sys.platform == 'win32':
asyncio.set_event_loop(asyncio.ProactorEventLoop())
class CommandAuto(Command):
"""Automatic rebuilds for Nikola."""
name = "auto"
logger = None
has_server = True
doc_purpose = "builds and serves a site; automatically detects site changes, rebuilds, and optionally refreshes a browser"
dns_sd = None
delta_last_rebuild = datetime.timedelta(milliseconds=100)
cmd_options = [
{
'name': 'port',
'short': 'p',
'long': 'port',
'default': 8000,
'type': int,
'help': 'Port number (default: 8000)',
},
{
'name': 'address',
'short': 'a',
'long': 'address',
'type': str,
'default': '127.0.0.1',
'help': 'Address to bind (default: 127.0.0.1 -- localhost)',
},
{
'name': 'browser',
'short': 'b',
'long': 'browser',
'type': bool,
'help': 'Start a web browser',
'default': False,
},
{
'name': 'ipv6',
'short': '6',
'long': 'ipv6',
'default': False,
'type': bool,
'help': 'Use IPv6',
},
{
'name': 'no-server',
'long': 'no-server',
'default': False,
'type': bool,
'help': 'Disable the server, automate rebuilds only'
},
]
def _execute(self, options, args):
"""Start the watcher."""
self.logger = get_logger('auto')
self.sockets = []
self.rebuild_queue = asyncio.Queue()
self.last_rebuild = datetime.datetime.now()
if aiohttp is None and Observer is None:
req_missing(['aiohttp', 'watchdog'], 'use the "auto" command')
elif aiohttp is None:
req_missing(['aiohttp'], 'use the "auto" command')
elif Observer is None:
req_missing(['watchdog'], 'use the "auto" command')
if sys.argv[0].endswith('__main__.py'):
self.nikola_cmd = [sys.executable, '-m', 'nikola', 'build']
else:
self.nikola_cmd = [sys.argv[0], 'build']
if self.site.configuration_filename != 'conf.py':
self.nikola_cmd.append('--conf=' + self.site.configuration_filename)
# Run an initial build so we are up-to-date (synchronously)
self.logger.info("Rebuilding the site...")
subprocess.call(self.nikola_cmd)
port = options and options.get('port')
self.snippet = '''<script>document.write('<script src="http://'
+ (location.host || 'localhost').split(':')[0]
+ ':{0}/livereload.js?snipver=1"></'
+ 'script>')</script>
</head>'''.format(port)
# Deduplicate entries by using a set -- otherwise, multiple rebuilds are triggered
watched = set([
'templates/'
] + [get_theme_path(name) for name in self.site.THEMES])
for item in self.site.config['post_pages']:
watched.add(os.path.dirname(item[0]))
for item in self.site.config['FILES_FOLDERS']:
watched.add(item)
for item in self.site.config['GALLERY_FOLDERS']:
watched.add(item)
for item in self.site.config['LISTINGS_FOLDERS']:
watched.add(item)
for item in self.site._plugin_places:
watched.add(item)
# Nikola itself (useful for developers)
watched.add(pkg_resources.resource_filename('nikola', ''))
out_folder = self.site.config['OUTPUT_FOLDER']
if options and options.get('browser'):
browser = True
else:
browser = False
if options['ipv6']:
dhost = '::'
else:
dhost = '0.0.0.0'
host = options['address'].strip('[').strip(']') or dhost
# Set up asyncio server
webapp = web.Application()
webapp.router.add_get('/livereload.js', self.serve_livereload_js)
webapp.router.add_get('/robots.txt', self.serve_robots_txt)
webapp.router.add_route('*', '/livereload', self.websocket_handler)
resource = IndexHtmlStaticResource(True, self.snippet, '', out_folder)
webapp.router.register_resource(resource)
# Prepare asyncio event loop
# Required for subprocessing to work
loop = asyncio.get_event_loop()
# Set debug setting
loop.set_debug(self.site.debug)
# Server can be disabled (Issue #1883)
self.has_server = not options['no-server']
if self.has_server:
handler = webapp.make_handler()
srv = loop.run_until_complete(loop.create_server(handler, host, port))
self.wd_observer = Observer()
# Watch output folders and trigger reloads
if self.has_server:
self.wd_observer.schedule(NikolaEventHandler(self.reload_page, loop), 'output/', recursive=True)
# Watch input folders and trigger rebuilds
for p in watched:
if os.path.exists(p):
self.wd_observer.schedule(NikolaEventHandler(self.run_nikola_build, loop), p, recursive=True)
# Watch config file (a bit of a hack, but we need a directory)
_conf_fn = os.path.abspath(self.site.configuration_filename or 'conf.py')
_conf_dn = os.path.dirname(_conf_fn)
self.wd_observer.schedule(ConfigEventHandler(_conf_fn, self.run_nikola_build, loop), _conf_dn, recursive=False)
self.wd_observer.start()
if not self.has_server:
self.logger.info("Watching for changes...")
# Run the event loop forever (no server mode).
try:
# Run rebuild queue
loop.run_until_complete(self.run_rebuild_queue())
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
self.wd_observer.stop()
self.wd_observer.join()
loop.close()
return
host, port = srv.sockets[0].getsockname()
self.logger.info("Serving HTTP on {0} port {1}...".format(host, port))
if browser:
if options['ipv6'] or '::' in host:
server_url = "http://[{0}]:{1}/".format(host, port)
else:
server_url = "http://{0}:{1}/".format(host, port)
self.logger.info("Opening {0} in the default web browser...".format(server_url))
webbrowser.open('http://{0}:{1}'.format(host, port))
# Run the event loop forever and handle shutdowns.
try:
# Run rebuild queue
loop.run_until_complete(self.run_rebuild_queue())
self.dns_sd = dns_sd(port, (options['ipv6'] or '::' in host))
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
self.logger.info("Server is shutting down.")
if self.dns_sd:
self.dns_sd.Reset()
srv.close()
self.rebuild_queue.put((None, None))
loop.run_until_complete(srv.wait_closed())
loop.run_until_complete(webapp.shutdown())
loop.run_until_complete(handler.shutdown(5.0))
loop.run_until_complete(webapp.cleanup())
self.wd_observer.stop()
self.wd_observer.join()
loop.close()
@asyncio.coroutine
def run_nikola_build(self, event):
"""Rebuild the site."""
# Move events have a dest_path, some editors like gedit use a
# move on larger save operations for write protection
event_path = event.dest_path if hasattr(event, 'dest_path') else event.src_path
fname = os.path.basename(event_path)
if (fname.endswith('~') or
fname.startswith('.') or
'__pycache__' in event_path or
event_path.endswith(('.pyc', '.pyo', '.pyd', '_bak')) or
event.is_directory): # Skip on folders, these are usually duplicates
return
self.logger.debug('Queuing rebuild from {0}'.format(event_path))
yield from self.rebuild_queue.put((datetime.datetime.now(), event_path))
@asyncio.coroutine
def run_rebuild_queue(self):
"""Run rebuilds from a queue (Nikola can only build in a single instance)."""
while True:
date, event_path = yield from self.rebuild_queue.get()
if date is None:
# Shutdown queue
return
if date < (self.last_rebuild + self.delta_last_rebuild):
self.logger.debug("Skipping rebuild from {0} (within delta)".format(event_path))
continue
self.last_rebuild = datetime.datetime.now()
self.logger.info('REBUILDING SITE (from {0})'.format(event_path))
p = yield from asyncio.create_subprocess_exec(*self.nikola_cmd, stderr=subprocess.PIPE)
exit_code = yield from p.wait()
error = yield from p.stderr.read()
errord = error.decode('utf-8')
if exit_code != 0:
self.logger.error(errord)
yield from self.send_to_websockets({'command': 'alert', 'message': errord})
else:
self.logger.info("Rebuild successful\n" + errord)
@asyncio.coroutine
def reload_page(self, event):
"""Reload the page."""
# Move events have a dest_path, some editors like gedit use a
# move on larger save operations for write protection
event_path = event.dest_path if hasattr(event, 'dest_path') else event.src_path
p = os.path.relpath(event_path, os.path.abspath(self.site.config['OUTPUT_FOLDER'])).replace(os.sep, '/')
self.logger.info('REFRESHING: {0}'.format(p))
yield from self.send_to_websockets({'command': 'reload', 'path': p, 'liveCSS': True})
@asyncio.coroutine
def serve_livereload_js(self, request):
"""Handle requests to /livereload.js and serve the JS file."""
return FileResponse(LRJS_PATH)
@asyncio.coroutine
def serve_robots_txt(self, request):
"""Handle requests to /robots.txt."""
return Response(body=b'User-Agent: *\nDisallow: /\n', content_type='text/plain', charset='utf-8')
@asyncio.coroutine
def websocket_handler(self, request):
"""Handle requests to /livereload and initiate WebSocket communication."""
ws = web.WebSocketResponse()
yield from ws.prepare(request)
self.sockets.append(ws)
while True:
msg = yield from ws.receive()
self.logger.debug("Received message: {0}".format(msg))
if msg.type == aiohttp.WSMsgType.TEXT:
message = msg.json()
if message['command'] == 'hello':
response = {
'command': 'hello',
'protocols': [
'http://livereload.com/protocols/official-7',
],
'serverName': 'Nikola Auto (livereload)',
}
yield from ws.send_json(response)
elif message['command'] != 'info':
self.logger.warn("Unknown command in message: {0}".format(message))
elif msg.type == aiohttp.WSMsgType.CLOSED:
break
elif msg.type == aiohttp.WSMsgType.CLOSE:
self.logger.debug("Closing WebSocket")
yield from ws.close()
break
elif msg.type == aiohttp.WSMsgType.ERROR:
self.logger.error('WebSocket connection closed with exception {0}'.format(ws.exception()))
break
else:
self.logger.warn("Received unknown message: {0}".format(msg))
self.sockets.remove(ws)
self.logger.debug("WebSocket connection closed: {0}".format(ws))
return ws
@asyncio.coroutine
def send_to_websockets(self, message):
"""Send a message to all open WebSockets."""
to_delete = []
for ws in self.sockets:
if ws.closed:
to_delete.append(ws)
continue
try:
yield from ws.send_json(message)
except RuntimeError as e:
if 'closed' in e.args[0]:
self.logger.warn("WebSocket {0} closed uncleanly".format(ws))
to_delete.append(ws)
else:
raise
for ws in to_delete:
self.sockets.remove(ws)
class IndexHtmlStaticResource(StaticResource):
"""A StaticResource implementation that serves /index.html in directory roots."""
modify_html = True
snippet = "</head>"
def __init__(self, modify_html=True, snippet="</head>", *args, **kwargs):
"""Initialize a resource."""
self.modify_html = modify_html
self.snippet = snippet
super().__init__(*args, **kwargs)
@asyncio.coroutine
def _handle(self, request):
"""Handle incoming requests (pass to handle_file)."""
filename = unquote(request.match_info['filename'])
ret = yield from self.handle_file(request, filename)
return ret
@asyncio.coroutine
def handle_file(self, request, filename, from_index=None):
"""Handle file requests."""
try:
filepath = self._directory.joinpath(filename).resolve()
if not self._follow_symlinks:
filepath.relative_to(self._directory)
except (ValueError, FileNotFoundError) as error:
# relatively safe
raise HTTPNotFound() from error
except Exception as error:
# perm error or other kind!
request.app.logger.exception(error)
raise HTTPNotFound() from error
# on opening a dir, load it's contents if allowed
if filepath.is_dir():
if filename.endswith('/') or not filename:
ret = yield from self.handle_file(request, filename + 'index.html', from_index=filename)
else:
ret = yield from self.handle_file(request, filename + '/index.html', from_index=filename)
elif filepath.is_file():
ct, encoding = mimetypes.guess_type(str(filepath))
encoding = encoding or 'utf-8'
if ct == 'text/html' and self.modify_html:
if sys.version_info[0] == 3 and sys.version_info[1] <= 5:
# Python 3.4 and 3.5 do not accept pathlib.Path objects in calls to open()
filepath = str(filepath)
with open(filepath, 'r', encoding=encoding) as fh:
text = fh.read()
text = self.transform_html(text)
ret = Response(text=text, content_type=ct, charset=encoding)
else:
ret = FileResponse(filepath, chunk_size=self._chunk_size)
elif from_index:
filepath = self._directory.joinpath(from_index).resolve()
try:
return Response(text=self._directory_as_html(filepath),
content_type="text/html")
except PermissionError:
raise HTTPForbidden
else:
raise HTTPNotFound
return ret
def transform_html(self, text):
"""Apply some transforms to HTML content."""
# Inject livereload.js
text = text.replace('</head>', self.snippet, 1)
# Disable <base> tag
text = re.sub(r'<base\s([^>]*)>', '<!--base \g<1>-->', text, flags=re.IGNORECASE)
return text
# Based on code from the 'hachiko' library by John Biesnecker — thanks!
# https://github.com/biesnecker/hachiko
class NikolaEventHandler:
"""A Nikola-specific event handler for Watchdog. Based on code from hachiko."""
def __init__(self, function, loop):
"""Initialize the handler."""
self.function = function
self.loop = loop
@asyncio.coroutine
def on_any_event(self, event):
"""Handle all file events."""
yield from self.function(event)
def dispatch(self, event):
"""Dispatch events to handler."""
self.loop.call_soon_threadsafe(asyncio.async, self.on_any_event(event))
class ConfigEventHandler(NikolaEventHandler):
"""A Nikola-specific handler for Watchdog that handles the config file (as a workaround)."""
def __init__(self, configuration_filename, function, loop):
"""Initialize the handler."""
self.configuration_filename = configuration_filename
self.function = function
self.loop = loop
@asyncio.coroutine
def on_any_event(self, event):
"""Handle file events if they concern the configuration file."""
if event._src_path == self.configuration_filename:
yield from self.function(event)
| mit | 1,237,268,884,878,527,200 | 37.220441 | 126 | 0.582529 | false |
astrobin/astrobin | astrobin/tests/test_image.py | 1 | 156608 | # -*- coding: UTF-8
import re
import sys
import time
from datetime import date, timedelta
from django.conf import settings
from django.contrib.auth.models import Group, User
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.test import TestCase, override_settings
from mock import patch
from astrobin.enums import SubjectType
from astrobin.enums.full_size_display_limitation import FullSizeDisplayLimitation
from astrobin.enums.license import License
from astrobin.enums.mouse_hover_image import MouseHoverImage
from astrobin.models import (
Image,
ImageRevision,
Telescope,
Mount,
Camera,
FocalReducer,
Software,
Filter,
Accessory,
DeepSky_Acquisition,
SolarSystem_Acquisition,
Location)
from astrobin.tests.generators import Generators
from astrobin_apps_groups.models import Group as AstroBinGroup
from astrobin_apps_platesolving.models import Solution
from astrobin_apps_platesolving.solver import Solver
from astrobin_apps_platesolving.tests.platesolving_generators import PlateSolvingGenerators
from nested_comments.models import NestedComment
from toggleproperties.models import ToggleProperty
class ImageTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(
'test', '[email protected]', 'password')
self.user2 = User.objects.create_user(
'test2', '[email protected]', 'password')
# Test gear
self.imaging_telescopes = [
Telescope.objects.create(
make="Test make", name="Test imaging telescope")]
self.guiding_telescopes = [
Telescope.objects.create(
make="Test make", name="Test guiding telescope")]
self.mounts = [
Mount.objects.create(
make="Test make", name="Test mount")]
self.imaging_cameras = [
Camera.objects.create(
make="Test make", name="Test imaging camera")]
self.guiding_cameras = [
Camera.objects.create(
make="Test make", name="Test guiding camera")]
self.focal_reducers = [
FocalReducer.objects.create(
make="Test make", name="Test focal reducer")]
self.software = [
Software.objects.create(
make="Test make", name="Test software")]
self.filters = [
Filter.objects.create(
make="Test make", name="Test filter")]
self.accessories = [
Accessory.objects.create(
make="Test make", name="Test accessory")]
profile = self.user.userprofile
profile.telescopes = self.imaging_telescopes + self.guiding_telescopes
profile.mounts = self.mounts
profile.cameras = self.imaging_cameras + self.guiding_cameras
profile.focal_reducers = self.focal_reducers
profile.software = self.software
profile.filters = self.filters
profile.accessories = self.accessories
###########################################################################
# HELPERS #
###########################################################################
def _do_upload(self, filename, wip=False):
# type: (basestring, bool, bool) -> None
data = {'image_file': open(filename, 'rb')}
if wip:
data['wip'] = True
return self.client.post(
reverse('image_upload_process'),
data,
follow=True)
def _do_upload_revision(self, image, filename, description=None, skip_notifications=False, mark_as_final=True):
data = {
'image_id': image.get_id(),
'image_file': open(filename, 'rb'),
'description': description,
}
if skip_notifications:
data['skip_notifications'] = True
if mark_as_final:
data['mark_as_final'] = u'on'
return self.client.post(
reverse('image_revision_upload_process'),
data,
follow=True)
def _get_last_image(self):
return Image.objects_including_wip.all().order_by('-id')[0]
def _get_last_image_revision(self):
return ImageRevision.objects.all().order_by('-id')[0]
def _assert_message(self, response, tags, content):
messages = response.context[0]['messages']
if len(messages) == 0:
self.assertEqual(False, True)
found = False
for message in messages:
if message.tags == tags and content in message.message:
found = True
self.assertTrue(found)
###########################################################################
# View tests #
###########################################################################
def test_image_upload_process_view(self):
self.client.login(username='test', password='password')
# Test file with invalid extension
response = self._do_upload('astrobin/fixtures/invalid_file')
self.assertRedirects(
response,
reverse('image_upload') + '?forceClassicUploader',
status_code=302,
target_status_code=200)
self._assert_message(response, "error unread", "Invalid image")
# Test file with invalid content
response = self._do_upload('astrobin/fixtures/invalid_file.jpg')
self.assertRedirects(
response,
reverse('image_upload') + '?forceClassicUploader',
status_code=302,
target_status_code=200)
self._assert_message(response, "error unread", "Invalid image")
# Test failure due to full use of Free membership
self.user.userprofile.premium_counter = settings.PREMIUM_MAX_IMAGES_FREE
self.user.userprofile.save(keep_deleted=True)
response = self._do_upload('astrobin/fixtures/test.jpg')
self.assertRedirects(
response,
reverse('image_upload') + '?forceClassicUploader',
status_code=302,
target_status_code=200)
self._assert_message(response, "error unread", "Please upgrade")
self.user.userprofile.premium_counter = 0
self.user.userprofile.save(keep_deleted=True)
# Test failure due to read-only mode
with self.settings(READONLY_MODE=True):
response = self._do_upload('astrobin/fixtures/test.jpg')
self.assertRedirects(
response,
reverse('image_upload') + '?forceClassicUploader',
status_code=302,
target_status_code=200)
self._assert_message(response, "error unread", "read-only mode")
# Test missing image file
response = self.client.post(
reverse('image_upload_process'),
follow=True)
self.assertRedirects(
response,
reverse('image_upload') + '?forceClassicUploader',
status_code=302,
target_status_code=200)
self._assert_message(response, "error unread", "Invalid image")
# Test indexed PNG
response = self._do_upload('astrobin/fixtures/test_indexed.png')
image = self._get_last_image()
self.assertRedirects(
response,
reverse('image_edit_thumbnails', kwargs={'id': image.get_id()}),
status_code=302,
target_status_code=200)
self._assert_message(response, "warning unread", "Indexed PNG")
image.delete()
# Test WIP
response = self._do_upload('astrobin/fixtures/test.jpg', wip=True)
image = self._get_last_image()
self.assertEqual(image.is_wip, True)
self.assertIsNone(image.published)
image.delete()
# Test successful upload workflow
response = self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self.assertRedirects(
response,
reverse('image_edit_thumbnails', kwargs={'id': image.get_id()}),
status_code=302,
target_status_code=200)
self.assertEqual(image.title, u"")
self.assertTrue((image.published - image.uploaded).total_seconds() < 1)
# Test thumbnails
response = self.client.post(
reverse('image_edit_thumbnails', kwargs={'id': image.get_id()}),
{
'image_id': image.get_id(),
'square_cropping': '100, 0, 100, 0',
'submit_watermark': True,
},
follow=True)
image = Image.objects.get(pk=image.pk)
self.assertRedirects(
response,
reverse('image_edit_watermark', kwargs={'id': image.get_id()}) + "?upload",
status_code=302,
target_status_code=200)
# Test watermark
response = self.client.post(
reverse('image_edit_save_watermark'),
{
'image_id': image.get_id(),
'watermark': True,
'watermark_text': "Watermark test",
'watermark_position': 0,
'watermark_size': 'S',
'watermark_opacity': 100
},
follow=True)
self.assertRedirects(
response,
reverse('image_edit_basic', kwargs={'id': image.get_id()}) + "?upload",
status_code=302,
target_status_code=200)
image = Image.objects.get(pk=image.pk)
self.assertEqual(image.watermark, True)
self.assertEqual(image.watermark_text, "Watermark test")
self.assertEqual(image.watermark_position, 0)
self.assertEqual(image.watermark_size, 'S')
self.assertEqual(image.watermark_opacity, 100)
# Test basic settings
location, created = Location.objects.get_or_create(
name="Test location")
self.user.userprofile.location_set.add(location)
# Test missing data_source
response = self.client.post(
reverse('image_edit_basic', args=(image.get_id(),)),
{
'submit_gear': True,
'title': "Test title",
'link': "http://www.example.com",
'link_to_fits': "http://www.example.com/fits",
'acquisition_type': 'REGULAR',
'subject_type': SubjectType.OTHER,
'locations': [location.pk],
'description': "Image description",
'allow_comments': True
},
follow=True)
self._assert_message(response, "error unread", "There was one or more errors processing the form")
# Test missing remote_source
response = self.client.post(
reverse('image_edit_basic', args=(image.get_id(),)),
{
'submit_gear': True,
'title': "Test title",
'link': "http://www.example.com",
'link_to_fits': "http://www.example.com/fits",
'acquisition_type': 'REGULAR',
'data_source': 'AMATEUR_HOSTING',
'subject_type': SubjectType.OTHER,
'locations': [location.pk],
'description': "Image description",
'allow_comments': True
},
follow=True)
self._assert_message(response, "error unread", "There was one or more errors processing the form")
response = self.client.post(
reverse('image_edit_basic', args=(image.get_id(),)),
{
'submit_gear': True,
'title': "Test title",
'link': "http://www.example.com",
'link_to_fits': "http://www.example.com/fits",
'acquisition_type': 'REGULAR',
'data_source': 'OTHER',
'subject_type': SubjectType.OTHER,
'locations': [location.pk],
'description': "Image description",
'allow_comments': True
},
follow=True)
image = Image.objects.get(pk=image.pk)
self.assertRedirects(
response,
reverse('image_edit_gear', kwargs={'id': image.get_id()}) + "?upload",
status_code=302,
target_status_code=200)
self.assertEqual(image.title, "Test title")
self.assertEqual(image.link, "http://www.example.com")
self.assertEqual(image.link_to_fits, "http://www.example.com/fits")
self.assertEqual(image.subject_type, SubjectType.OTHER)
self.assertEqual(image.solar_system_main_subject, None)
self.assertEqual(image.locations.count(), 1)
self.assertEqual(image.locations.all().first().pk, location.pk)
self.assertEqual(image.description, "Image description")
self.assertEqual(image.allow_comments, True)
self.user.userprofile.location_set.clear()
response = self.client.post(
reverse('image_edit_gear', args=(image.get_id(),)),
{
'image_id': image.pk,
'submit_acquisition': True,
'imaging_telescopes': ','.join(["%d" % x.pk for x in self.imaging_telescopes]),
'guiding_telescopes': ','.join(["%d" % x.pk for x in self.guiding_telescopes]),
'mounts': ','.join(["%d" % x.pk for x in self.mounts]),
'imaging_cameras': ','.join(["%d" % x.pk for x in self.imaging_cameras]),
'guiding_cameras': ','.join(["%d" % x.pk for x in self.guiding_cameras]),
'focal_reducers': ','.join(["%d" % x.pk for x in self.focal_reducers]),
'software': ','.join(["%d" % x.pk for x in self.software]),
'filters': ','.join(["%d" % x.pk for x in self.filters]),
'accessories': ','.join(["%d" % x.pk for x in self.accessories])
},
follow=True)
image = Image.objects.get(pk=image.pk)
self.assertRedirects(
response,
reverse('image_edit_acquisition', kwargs={'id': image.get_id()}) + "?upload",
status_code=302,
target_status_code=200)
# Test simple deep sky acquisition
today = time.strftime('%Y-%m-%d')
response = self.client.post(
reverse('image_edit_save_acquisition'),
{
'image_id': image.get_id(),
'edit_type': 'deep_sky',
'advanced': 'false',
'date': today,
'number': 10,
'duration': 1200
},
follow=True)
self.assertRedirects(
response,
reverse('image_detail', kwargs={'id': image.get_id()}),
status_code=302,
target_status_code=200)
image = Image.objects.get(pk=image.pk)
acquisition = image.acquisition_set.all()[0].deepsky_acquisition
self.assertEqual(acquisition.date.strftime('%Y-%m-%d'), today)
self.assertEqual(acquisition.number, 10)
self.assertEqual(acquisition.duration, 1200)
image.delete()
@patch("astrobin.signals.push_notification")
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_upload_process_view_skip_notifications(self, push_notification):
self.client.login(username='test', password='password')
ToggleProperty.objects.create(
property_type='follow',
user=self.user2,
content_object=self.user
)
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self.assertTrue(push_notification.called)
push_notification.reset_mock()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg', skip_notifications=True)
self.assertFalse(push_notification.called)
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_upload_process_view_dont_mark_as_final(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg', mark_as_final=False)
revision = self._get_last_image_revision()
self.assertTrue(image.is_final)
self.assertFalse(revision.is_final)
def test_image_upload_process_view_image_too_large_free(self):
self.client.login(username='test', password='password')
with self.settings(PREMIUM_MAX_IMAGE_SIZE_FREE_2020=10 * 1024):
response = self._do_upload('astrobin/fixtures/test.jpg')
self.assertContains(response, "this image is too large")
self.assertContains(response, "maximum allowed image size is 10.0")
def test_image_upload_process_view_image_too_large_lite(self):
self.client.login(username='test', password='password')
us = Generators.premium_subscription(self.user, "AstroBin Lite")
with self.settings(PREMIUM_MAX_IMAGE_SIZE_LITE_2020=1):
response = self._do_upload('astrobin/fixtures/test.jpg')
self.assertNotContains(response, "this image is too large")
us.delete()
def test_image_upload_process_view_image_too_large_lite_2020(self):
self.client.login(username='test', password='password')
us = Generators.premium_subscription(self.user, "AstroBin Lite 2020+")
with self.settings(PREMIUM_MAX_IMAGE_SIZE_LITE_2020=10 * 1024):
response = self._do_upload('astrobin/fixtures/test.jpg')
self.assertContains(response, "this image is too large")
self.assertContains(response, "maximum allowed image size is 10.0")
us.delete()
def test_image_upload_process_view_image_too_large_premium(self):
self.client.login(username='test', password='password')
us = Generators.premium_subscription(self.user, "AstroBin Premium")
with self.settings(PREMIUM_MAX_IMAGE_SIZE_PREMIUM_2020=1):
response = self._do_upload('astrobin/fixtures/test.jpg')
self.assertNotContains(response, "this image is too large")
us.delete()
def test_image_upload_process_view_image_too_large_premium_2020(self):
self.client.login(username='test', password='password')
us = Generators.premium_subscription(self.user, "AstroBin Premium 2020+")
with self.settings(PREMIUM_MAX_IMAGE_SIZE_PREMIUM_2020=10 * 1024):
response = self._do_upload('astrobin/fixtures/test.jpg')
self.assertContains(response, "this image is too large")
self.assertContains(response, "maximum allowed image size is 10.0")
us.delete()
def test_image_upload_process_view_inactive_subscription(self):
self.client.login(username='test', password='password')
premium = Generators.premium_subscription(self.user, "AstroBin Premium 2020+")
response = self.client.get(reverse('image_upload'))
self.assertNotContains(response, "Your Lite or Premium subscription is not active")
premium.expires = date.today() - timedelta(1)
premium.save()
response = self.client.get(reverse('image_upload'))
self.assertContains(response, "Your Lite or Premium subscription is not active")
premium.expires = date.today() + timedelta(1)
premium.save()
ultimate = Generators.premium_subscription(self.user, "AstroBin Ultimate 2020+")
ultimate.expires = date.today() - timedelta(1)
ultimate.save()
response = self.client.get(reverse('image_upload'))
self.assertNotContains(response, "Your Lite or Premium subscription is not active")
def test_image_upload_process_view_image_too_large_ultimate_2020(self):
self.client.login(username='test', password='password')
us = Generators.premium_subscription(self.user, "AstroBin Ultimate 2020+")
with self.settings(PREMIUM_MAX_IMAGE_SIZE_PREMIUM_2020=1):
response = self._do_upload('astrobin/fixtures/test.jpg')
self.assertNotContains(response, "this image is too large")
us.delete()
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
@override_settings(PREMIUM_MAX_IMAGE_SIZE_FREE_2020=sys.maxsize)
def test_image_upload_revision_process_view_image_too_large_free(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
with self.settings(PREMIUM_MAX_IMAGE_SIZE_FREE_2020=10 * 1024):
response = self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
self.assertContains(response, "this image is too large")
self.assertContains(response, "maximum allowed image size is 10.0")
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
@override_settings(PREMIUM_MAX_IMAGE_SIZE_FREE_2020=sys.maxsize)
def test_image_upload_revision_process_view_image_too_large_lite(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
us = Generators.premium_subscription(self.user, "AstroBin Lite")
with self.settings(PREMIUM_MAX_IMAGE_SIZE_LITE_2020=10 * 1024):
response = self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
self.assertNotContains(response, "this image is too large")
us.delete()
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
@override_settings(PREMIUM_MAX_IMAGE_SIZE_FREE_2020=sys.maxsize)
def test_image_upload_revision_process_view_image_too_large_lite_2020(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
us = Generators.premium_subscription(self.user, "AstroBin Lite 2020+")
with self.settings(PREMIUM_MAX_IMAGE_SIZE_LITE_2020=10 * 1024):
response = self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
self.assertContains(response, "this image is too large")
us.delete()
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
@override_settings(PREMIUM_MAX_IMAGE_SIZE_FREE_2020=sys.maxsize)
def test_image_upload_revision_process_view_image_too_large_premium(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
us = Generators.premium_subscription(self.user, "AstroBin Premium")
with self.settings(PREMIUM_MAX_IMAGE_SIZE_PREMIUM_2020=10 * 1024):
response = self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
self.assertNotContains(response, "this image is too large")
us.delete()
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
@override_settings(PREMIUM_MAX_IMAGE_SIZE_FREE_2020=sys.maxsize)
def test_image_upload_revision_process_view_image_too_large_premium_2020(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
us = Generators.premium_subscription(self.user, "AstroBin Premium 2020+")
with self.settings(PREMIUM_MAX_IMAGE_SIZE_PREMIUM_2020=10 * 1024):
response = self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
self.assertContains(response, "this image is too large")
us.delete()
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
@override_settings(PREMIUM_MAX_IMAGE_SIZE_FREE_2020=sys.maxsize)
def test_image_upload_revision_process_view_image_too_large_ultimate_2020(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
us = Generators.premium_subscription(self.user, "AstroBin Ultimate 2020+")
with self.settings(PREMIUM_MAX_IMAGE_SIZE_PREMIUM_2020=10 * 1024):
response = self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
self.assertNotContains(response, "this image is too large")
us.delete()
def test_image_upload_revision_process_view_too_many_revisions_free(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
response = self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
self.assertContains(response, "you have reached the maximum amount of allowed image revisions")
self.assertContains(response, "Under your current subscription, the limit is 0 revisions per image")
def test_image_upload_revision_process_view_too_many_revisions_premium(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
us = Generators.premium_subscription(self.user, "AstroBin Premium")
response = self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
self.assertContains(response, "Image uploaded. Thank you!")
us.delete()
def test_image_upload_revision_process_view_too_many_revisions_lite_2020(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
us = Generators.premium_subscription(self.user, "AstroBin Lite 2020+")
response = self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
self.assertContains(response, "Image uploaded. Thank you!")
response = self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
self.assertContains(response, "you have reached the maximum amount of allowed image revisions")
self.assertContains(response, "Under your current subscription, the limit is 1 revision per image")
us.delete()
def test_image_upload_revision_process_view_too_many_revisions_premium_2020(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
us = Generators.premium_subscription(self.user, "AstroBin Premium 2020+")
response = self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
self.assertContains(response, "Image uploaded. Thank you!")
with self.settings(PREMIUM_MAX_REVISIONS_PREMIUM_2020=1):
response = self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
self.assertContains(response, "you have reached the maximum amount of allowed image revisions")
self.assertContains(response, "Under your current subscription, the limit is 1 revision per image")
us.delete()
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_detail_view_original_revision_overlay(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
Solution.objects.create(
status=Solver.SUCCESS,
content_object=image
)
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg', "Test revision description")
revision = self._get_last_image_revision()
image.mouse_hover_image = "REVISION__%s" % revision.label
image.save()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertContains(response, "hover-overlay-original-revision")
self.assertNotContains(response, "hover-overlay-solution")
image.delete()
self.client.logout()
def test_image_detail_view_original_solution_overlay(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
Solution.objects.create(
status=Solver.SUCCESS,
content_object=image
)
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertContains(response, "hover-overlay-solution")
image.delete()
self.client.logout()
def test_image_detail_view_original_inverted_overlay(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
Solution.objects.create(
status=Solver.SUCCESS,
content_object=image
)
image.mouse_hover_image = MouseHoverImage.INVERTED
image.save()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertContains(response, "hover-overlay-original-inverted")
self.assertNotContains(response, "hover-overlay-solution")
image.delete()
self.client.logout()
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_detail_view_revision_original_overlay(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg', "Test revision description")
revision = self._get_last_image_revision()
Solution.objects.create(
status=Solver.SUCCESS,
content_object=revision
)
revision.mouse_hover_image = "ORIGINAL"
revision.save()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id(), "r": revision.label}))
self.assertContains(response, "hover-overlay-revision-original")
self.assertNotContains(response, "hover-overlay-solution")
image.delete()
self.client.logout()
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_detail_view_revision_solution_overlay(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg', "Test revision description")
revision = self._get_last_image_revision()
Solution.objects.create(
status=Solver.SUCCESS,
content_object=revision
)
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id(), "r": revision.label}))
self.assertContains(response, "hover-overlay-solution")
image.delete()
self.client.logout()
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_detail_view_revision_revision_overlay(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg', "Test revision description")
revision = self._get_last_image_revision()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg', "Test revision description")
revision2 = self._get_last_image_revision()
Solution.objects.create(
status=Solver.SUCCESS,
content_object=revision
)
revision.mouse_hover_image = "REVISION__%s" % revision2.label
revision.save()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id(), "r": revision.label}))
self.assertContains(response, "hover-overlay-revision-revision")
self.assertNotContains(response, "hover-overlay-solution")
image.delete()
self.client.logout()
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_detail_view_revision_inverted_overlay(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg', "Test revision description")
revision = self._get_last_image_revision()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg', "Test revision description")
revision2 = self._get_last_image_revision()
Solution.objects.create(
status=Solver.SUCCESS,
content_object=revision
)
revision.mouse_hover_image = MouseHoverImage.INVERTED
revision.save()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id(), "r": revision.label}))
self.assertContains(response, "hover-overlay-revision-inverted")
self.assertNotContains(response, "hover-overlay-solution")
image.delete()
self.client.logout()
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_detail_view(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.subject_type = SubjectType.DEEP_SKY
image.save(keep_deleted=True)
today = time.strftime('%Y-%m-%d')
# Basic view
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(re.search(r'data-id="%s"\s+data-alias="%s"' % (image.pk, "regular"), response.content))
# Image resolution
self.assertContains(response, "<strong class=\"card-label\">Resolution:</strong> 340x280")
# Revision redirect
self._do_upload_revision(image, 'astrobin/fixtures/test_smaller.jpg')
revision = self._get_last_image_revision()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertRedirects(
response,
reverse('image_detail', kwargs={'id': image.get_id(), 'r': revision.label}),
status_code=302,
target_status_code=200)
# Correct revision displayed
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id(), 'r': 'B'}))
self.assertIsNotNone(re.search(
r'data-id="%d"\s+data-alias="%s"\s+data-revision="%s"' % (image.pk, "regular", "B"),
response.content))
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "gallery"), response.content))
# Revision resolution differs from original
self.assertContains(response, "<strong class=\"card-label\">Resolution:</strong> 200x165")
# Revision description displayed
desc = "Test revision description"
revision.description = desc
revision.save(keep_deleted=True)
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id(), 'r': 'B'}))
self.assertContains(response, desc)
# If description is set to empty text, then it's gone
revision.description = ''
revision.save(keep_deleted=True)
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id(), 'r': 'B'}))
self.assertNotContains(response, desc)
self.assertNotContains(response, '<h3>%s</h3>' % revision.label, html=True)
# Correct revision displayed in gallery
response = self.client.get(reverse('user_page', kwargs={'username': 'test'}))
self.assertIsNotNone(re.search(
r'data-id="%d"\s+data-alias="%s"\s+data-revision="%s"' % (image.pk, "gallery", "final"),
response.content))
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id(), 'r': '0'}))
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "regular"), response.content))
self.assertIsNotNone(re.search(
r'data-id="%d"\s+data-alias="%s"\s+data-revision="%s"' % (image.pk, "gallery", "B"),
response.content))
# Inverted displayed
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id(), 'r': '0'}) + "?mod=inverted")
self.assertIsNotNone(
re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "regular_inverted"), response.content))
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id(), 'r': 'B'}) + "?mod=inverted")
self.assertIsNotNone(re.search(
r'data-id="%d"\s+data-alias="%s"\s+data-revision="%s"' % (image.pk, "regular_inverted", "B"),
response.content))
revision.delete()
# DSA data
filter, created = Filter.objects.get_or_create(name="Test filter")
dsa, created = DeepSky_Acquisition.objects.get_or_create(
image=image,
date=today,
number=10,
duration=1200,
filter=filter,
binning=1,
iso=3200,
gain=1.00,
sensor_cooling=-20,
darks=10,
flats=10,
flat_darks=10,
bias=0,
bortle=1,
mean_sqm=20.0,
mean_fwhm=1,
temperature=10)
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context[0]['image_type'], 'deep_sky')
dsa.delete()
# SSA data
ssa, created = SolarSystem_Acquisition.objects.get_or_create(
image=image,
date=today,
frames=1000,
fps=60,
focal_length=5000,
cmi=3,
cmii=3,
cmiii=3,
seeing=1,
transparency=1)
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context[0]['image_type'], 'solar_system')
ssa.delete()
# Test whether the Like button is active: image owner can't like
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertEqual(response.context[0]['user_can_like'], False)
# Test whether the Like button is active: index 0 can like
self.client.logout()
self.client.login(username='test2', password='password')
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertEqual(response.context[0]['user_can_like'], True)
# Spam images should be 404
image.moderator_decision = 2
image.save(keep_deleted=True)
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertEqual(response.status_code, 404)
# Except for moderators, they can see them
moderators, created = Group.objects.get_or_create(name='image_moderators')
self.user2.groups.add(moderators)
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertEqual(response.status_code, 200)
self.user2.groups.remove(moderators)
# And except for superusers
self.user2.is_superuser = True
self.user2.save()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertEqual(response.status_code, 200)
self.user2.is_superuser = False
self.user2.save()
# Anon users get 404 of course
self.client.logout()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertEqual(response.status_code, 404)
def test_image_detail_view_revision_redirect_to_original_if_no_revisions(self):
image = Generators.image()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id(), 'r': 'B'}))
self.assertRedirects(response, "/%s/0/" % image.hash)
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_detail_view_revision_redirect_to_final_revision_if_missing(self):
image = Generators.image(is_final=False)
b = Generators.imageRevision(image=image, is_final=True)
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id(), 'r': 'C'}))
self.assertRedirects(response, "/%s/%s/" % (image.hash, b.label))
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_detail_view_revision_redirect_to_final_revision_if_deleted(self):
image = Generators.image(is_final=False)
b = Generators.imageRevision(image=image, is_final=False)
c = Generators.imageRevision(image=image, is_final=True, label='C')
b.delete()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id(), 'r': b.label}))
self.assertRedirects(response, "/%s/%s/" % (image.hash, c.label))
def test_image_7_digit_gain(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.subject_type = SubjectType.DEEP_SKY
image.save(keep_deleted=True)
today = time.strftime('%Y-%m-%d')
us = Generators.premium_subscription(self.user, "AstroBin Ultimate 2020+")
# DSA data
dsa, created = DeepSky_Acquisition.objects.get_or_create(
image=image,
date=today,
number=10,
duration=1200,
gain=12345.67,
)
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "(gain: 12345.67)")
dsa.delete()
image.delete()
us.delete()
def test_image_0_gain(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.subject_type = SubjectType.DEEP_SKY
image.save(keep_deleted=True)
today = time.strftime('%Y-%m-%d')
us = Generators.premium_subscription(self.user, "AstroBin Ultimate 2020+")
# DSA data
dsa, created = DeepSky_Acquisition.objects.get_or_create(
image=image,
date=today,
number=10,
duration=1200,
gain=0,
)
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "(gain: 0.00)")
dsa.delete()
image.delete()
us.delete()
def test_image_no_binning(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.subject_type = SubjectType.DEEP_SKY
image.save(keep_deleted=True)
today = time.strftime('%Y-%m-%d')
us = Generators.premium_subscription(self.user, "AstroBin Ultimate 2020+")
# DSA data
dsa, created = DeepSky_Acquisition.objects.get_or_create(
image=image,
date=today,
number=10,
duration=1200,
)
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, '10x1200"')
self.assertNotContains(response, 'bin 0x0')
dsa.delete()
image.delete()
us.delete()
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_flag_thumbs_view(self):
self.user.is_superuser = True
self.user.save()
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
response = self.client.post(
reverse('image_flag_thumbs', kwargs={'id': image.get_id()}))
self.assertRedirects(
response,
reverse('image_detail', kwargs={
'id': image.get_id(),
'r': 'B',
}),
status_code=302,
target_status_code=200)
revision.delete()
image.delete()
self.client.logout()
self.user.is_superuser = False
self.user.save()
@patch("astrobin.tasks.retrieve_thumbnail")
def test_image_thumb_view(self, retrieve_thumbnail):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
response = self.client.get(
reverse('image_thumb', kwargs={
'id': image.get_id(),
'alias': 'regular'
}))
self.assertEqual(response.status_code, 200)
image.delete()
@patch("astrobin.tasks.retrieve_thumbnail")
def test_image_rawthumb_view(self, retrieve_thumbnail):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
opts = {
'id': image.get_id(),
'alias': 'regular'
}
def get_expected_url(image):
thumb = image.thumbnail_raw(opts['alias'], 'final', animated=False, insecure=False)
return thumb.url
response = self.client.get(reverse('image_rawthumb', kwargs=opts), follow=True)
# 404 because we don't serve that /media/static file, that's fine.
self.assertRedirects(response, get_expected_url(image))
# Set the watermark to some non ASCII symbol
image.watermark_text = "©"
image.watermark = True
image.save(keep_deleted=True)
image = Image.objects.get(pk=image.pk)
response = self.client.get(reverse('image_rawthumb', kwargs=opts), follow=True)
self.assertRedirects(response, get_expected_url(image))
image.delete()
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_full_view(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
response = self.client.get(reverse('image_full', kwargs={'id': image.get_id()}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context[0]['alias'], 'hd')
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "hd"), response.content))
# Revision redirect
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
response = self.client.get(reverse('image_full', kwargs={'id': image.get_id()}))
self.assertRedirects(
response,
reverse('image_full', kwargs={'id': image.get_id(), 'r': revision.label}),
status_code=302,
target_status_code=200)
# Correct revision displayed
response = self.client.get(reverse('image_full', kwargs={'id': image.get_id(), 'r': 'B'}))
self.assertIsNotNone(re.search(
r'data-id="%d"\s+data-alias="%s"\s+data-revision="%s"' % (image.pk, "hd", "B"),
response.content))
response = self.client.get(reverse('image_full', kwargs={'id': image.get_id(), 'r': '0'}))
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "hd"), response.content))
revision.delete()
# Mods
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?mod=inverted")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context[0]['mod'], 'inverted')
self.assertEqual(response.context[0]['alias'], 'hd_inverted')
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "hd_inverted"), response.content))
image.delete()
def test_image_real_view_owner(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
def test_image_real_view_owner_limitation_everybody(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.full_size_display_limitation = FullSizeDisplayLimitation.EVERYBODY
image.save()
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
def test_image_real_view_owner_limitation_paying(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.full_size_display_limitation = FullSizeDisplayLimitation.PAYING_MEMBERS_ONLY
image.save()
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
def test_image_real_view_owner_limitation_members(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.full_size_display_limitation = FullSizeDisplayLimitation.MEMBERS_ONLY
image.save()
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
def test_image_real_view_owner_limitation_me(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.full_size_display_limitation = FullSizeDisplayLimitation.ME_ONLY
image.save()
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
def test_image_real_view_owner_limitation_nobody(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.full_size_display_limitation = FullSizeDisplayLimitation.NOBODY
image.save()
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertNotEqual('real', response.context[0]['alias'])
self.assertIsNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
def test_image_real_view_visitor(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
def test_image_real_view_visitor_limitation_everybody(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
image.full_size_display_limitation = FullSizeDisplayLimitation.EVERYBODY
image.save()
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
def test_image_real_view_visitor_limitation_paying(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
image.full_size_display_limitation = FullSizeDisplayLimitation.PAYING_MEMBERS_ONLY
image.save()
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertNotEqual('real', response.context[0]['alias'])
self.assertIsNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
def test_image_real_view_visitor_limitation_members(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
image.full_size_display_limitation = FullSizeDisplayLimitation.MEMBERS_ONLY
image.save()
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertNotEqual('real', response.context[0]['alias'])
self.assertIsNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
def test_image_real_view_visitor_limitation_me(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
image.full_size_display_limitation = FullSizeDisplayLimitation.ME_ONLY
image.save()
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertNotEqual('real', response.context[0]['alias'])
self.assertIsNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
def test_image_real_view_visitor_limitation_nobody(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
image.full_size_display_limitation = FullSizeDisplayLimitation.NOBODY
image.save()
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertNotEqual('real', response.context[0]['alias'])
self.assertIsNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
def test_image_real_view_free(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
def test_image_real_view_free_limitation_everybody(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
image.full_size_display_limitation = FullSizeDisplayLimitation.EVERYBODY
image.save()
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
def test_image_real_view_free_limitation_paying(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
image.full_size_display_limitation = FullSizeDisplayLimitation.PAYING_MEMBERS_ONLY
image.save()
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertNotEqual('real', response.context[0]['alias'])
self.assertIsNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
def test_image_real_view_free_limitation_members(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
image.full_size_display_limitation = FullSizeDisplayLimitation.MEMBERS_ONLY
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
def test_image_real_view_free_limitation_me(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
image.full_size_display_limitation = FullSizeDisplayLimitation.ME_ONLY
image.save()
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertNotEqual('real', response.context[0]['alias'])
self.assertIsNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
def test_image_real_view_free_limitation_nobody(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
image.full_size_display_limitation = FullSizeDisplayLimitation.NOBODY
image.save()
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertNotEqual('real', response.context[0]['alias'])
self.assertIsNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
def test_image_real_view_lite(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
us = Generators.premium_subscription(self.user2, "AstroBin Lite")
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
us.delete()
def test_image_real_view_lite_limitation_everybody(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
image.full_size_display_limitation = FullSizeDisplayLimitation.EVERYBODY
image.save()
us = Generators.premium_subscription(self.user2, "AstroBin Lite")
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
us.delete()
def test_image_real_view_lite_limitation_paying(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
image.full_size_display_limitation = FullSizeDisplayLimitation.PAYING_MEMBERS_ONLY
image.save()
us = Generators.premium_subscription(self.user2, "AstroBin Lite")
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
us.delete()
def test_image_real_view_lite_limitation_members(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
image.full_size_display_limitation = FullSizeDisplayLimitation.MEMBERS_ONLY
image.save()
us = Generators.premium_subscription(self.user2, "AstroBin Lite")
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
us.delete()
def test_image_real_view_lite_limitation_me(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
image.full_size_display_limitation = FullSizeDisplayLimitation.ME_ONLY
image.save()
us = Generators.premium_subscription(self.user2, "AstroBin Lite")
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertNotEqual('real', response.context[0]['alias'])
self.assertIsNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
us.delete()
def test_image_real_view_lite_limitation_nobody(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
image.full_size_display_limitation = FullSizeDisplayLimitation.NOBODY
image.save()
us = Generators.premium_subscription(self.user2, "AstroBin Lite")
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertNotEqual('real', response.context[0]['alias'])
self.assertIsNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
us.delete()
def test_image_real_view_lite_autorenew(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
us = Generators.premium_subscription(self.user2, "AstroBin Lite (autorenew)")
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
us.delete()
def test_image_real_view_lite_2020(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self.client.logout()
us = Generators.premium_subscription(self.user2, "AstroBin Lite 2020+")
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
us.delete()
def test_image_real_view_premium(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
us = Generators.premium_subscription(self.user2, "AstroBin Premium")
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
us.delete()
def test_image_real_view_premium_autorenew(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
us = Generators.premium_subscription(self.user2, "AstroBin Premium (autorenew)")
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
us.delete()
def test_image_real_view_premium_2020(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
us = Generators.premium_subscription(self.user2, "AstroBin Premium 2020+")
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
us.delete()
def test_image_real_view_ultimate_2020(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
us = Generators.premium_subscription(self.user2, "AstroBin Ultimate 2020+")
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
us.delete()
def test_image_real_view_ultimate_2020_owner(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
us = Generators.premium_subscription(self.user, "AstroBin Ultimate 2020+")
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
us.delete()
def test_image_real_view_premium_owner(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
us = Generators.premium_subscription(self.user, "AstroBin Premium")
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
us.delete()
def test_image_real_view_premium_autorenew_owner(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
us = Generators.premium_subscription(self.user, "AstroBin Premium (autorenew)")
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
us.delete()
def test_image_real_view_lite_owner(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
us = Generators.premium_subscription(self.user, "AstroBin Lite")
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
us.delete()
def test_image_real_view_lite_autorenew_owner(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
us = Generators.premium_subscription(self.user, "AstroBin Lite (autorenew)")
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
us.delete()
def test_image_real_view_lite_2020_owner(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
us = Generators.premium_subscription(self.user, "AstroBin Lite 2020+")
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
us.delete()
def test_image_real_view_premium_2020_owner(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
us = Generators.premium_subscription(self.user, "AstroBin Premium 2020+")
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id()}) + "?real")
self.assertEqual(200, response.status_code)
self.assertEqual('real', response.context[0]['alias'])
self.assertIsNotNone(re.search(r'data-id="%d"\s+data-alias="%s"' % (image.pk, "real"), response.content))
image.delete()
us.delete()
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_upload_revision_process_view(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
# Test file with invalid extension
response = self._do_upload_revision(image, 'astrobin/fixtures/invalid_file')
self.assertRedirects(
response,
reverse('image_detail', kwargs={'id': image.get_id()}),
status_code=302,
target_status_code=200)
self._assert_message(response, "error unread", "Invalid image")
# Test file with invalid content
response = self._do_upload_revision(image, 'astrobin/fixtures/invalid_file.jpg')
self.assertRedirects(
response,
reverse('image_detail', kwargs={'id': image.get_id()}),
status_code=302,
target_status_code=200)
self._assert_message(response, "error unread", "Invalid image")
# Test successful upload
response = self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
image = self._get_last_image()
revision = self._get_last_image_revision()
self.assertRedirects(
response,
reverse('image_edit_revision', kwargs={'id': revision.pk}),
status_code=302,
target_status_code=200)
self._assert_message(response, "success unread", "Image uploaded")
self.assertEqual(1, image.revisions.count())
self.assertEqual('B', revision.label)
# Now delete B and see that the new one gets C because B is soft-deleted
revision.delete()
with self.assertRaises(ImageRevision.DoesNotExist):
revision = ImageRevision.objects.get(pk=revision.pk)
revision = ImageRevision.all_objects.get(pk=revision.pk)
self.assertNotEqual(None, revision.deleted)
self.assertEqual(0, ImageRevision.objects.filter(image=image).count())
image = Image.objects.get(pk=image.pk)
self.assertEqual(0, image.revisions.count())
response = self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
self.assertRedirects(
response,
reverse('image_edit_revision', kwargs={'id': revision.pk}),
status_code=302,
target_status_code=200)
self._assert_message(response, "success unread", "Image uploaded")
self.assertEqual(1, ImageRevision.objects.filter(image=image).count())
image = Image.objects.get(pk=image.pk)
self.assertEqual(1, image.revisions.count())
self.assertEqual('C', revision.label)
revision.delete()
image.delete()
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_edit_make_final_view(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
image = self._get_last_image()
response = self.client.get(
reverse('image_edit_make_final', kwargs={'id': image.get_id()}),
follow=True)
self.assertRedirects(
response,
reverse('image_detail', kwargs={'id': image.get_id()}),
status_code=302,
target_status_code=200)
image = self._get_last_image()
revision = self._get_last_image_revision()
self.assertEqual(image.is_final, True)
self.assertEqual(image.revisions.all()[0].is_final, False)
revision.delete()
self.client.logout()
# Test with wrong user
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_edit_make_final', kwargs={'id': image.get_id()}))
self.assertEqual(response.status_code, 403)
self.client.logout()
image.delete()
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_edit_revision_make_final_view(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
# Upload revision B
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
# Upload revision C
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
# Check that C is final
image = self._get_last_image()
c = image.revisions.order_by('-label')[0]
b = image.revisions.order_by('-label')[1]
self.assertEqual(image.is_final, False)
self.assertEqual(c.is_final, True)
self.assertEqual(b.is_final, False)
# Make B final
response = self.client.get(
reverse('image_edit_revision_make_final', kwargs={'id': b.id}),
follow=True)
self.assertRedirects(
response,
reverse('image_detail', kwargs={'id': image.get_id(), 'r': b.label}),
status_code=302,
target_status_code=200)
# Check that B is now final
image = self._get_last_image()
c = image.revisions.order_by('-label')[0]
b = image.revisions.order_by('-label')[1]
self.assertEqual(image.is_final, False)
self.assertEqual(c.is_final, False)
self.assertEqual(b.is_final, True)
c.delete()
self.client.logout()
# Test with wrong user
self.client.login(username='test2', password='password')
response = self.client.get(
reverse('image_edit_revision_make_final', kwargs={'id': b.id}))
self.assertEqual(response.status_code, 403)
self.client.logout()
b.delete()
image.delete()
def test_image_edit_basic_view(self):
def post_data(image):
return {
'title': "Test title",
'link': "http://www.example.com",
'link_to_fits': "http://www.example.com/fits",
'acquisition_type': 'EAA',
'data_source': 'OTHER',
'subject_type': SubjectType.OTHER,
'locations': [x.pk for x in image.user.userprofile.location_set.all()],
'description': "Image description",
'allow_comments': True
}
def get_url(args=None):
return reverse('image_edit_basic', args=args)
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.subject_type = SubjectType.DEEP_SKY
image.save(keep_deleted=True)
self.client.logout()
# GET
self.client.login(username='test2', password='password')
response = self.client.get(get_url((image.get_id(),)))
self.assertEqual(response.status_code, 403)
# POST
response = self.client.post(get_url((image.get_id(),)), post_data(image), follow=True)
self.assertEqual(response.status_code, 403)
self.client.logout()
# GET
self.client.login(username='test', password='password')
response = self.client.get(get_url((image.get_id(),)))
self.assertEqual(response.status_code, 200)
# POST
location, created = Location.objects.get_or_create(
name="Test location")
self.user.userprofile.location_set.add(location)
response = self.client.post(get_url((image.get_id(),)), post_data(image), follow=True)
self.assertRedirects(
response,
reverse('image_detail', kwargs={'id': image.get_id()}),
status_code=302,
target_status_code=200)
image = Image.objects_including_wip.get(pk=image.pk)
self.assertEqual(image.title, "Test title")
self.assertEqual(image.link, "http://www.example.com")
self.assertEqual(image.link_to_fits, "http://www.example.com/fits")
self.assertEqual(image.acquisition_type, 'EAA')
self.assertEqual(image.subject_type, SubjectType.OTHER)
self.assertEqual(image.solar_system_main_subject, None)
self.assertEqual(image.locations.count(), 1)
self.assertEqual(image.locations.all().first().pk, image.user.userprofile.location_set.all().first().pk)
self.assertEqual(image.description, "Image description")
self.assertEqual(image.allow_comments, True)
# Test that groups are updated
group1 = AstroBinGroup.objects.create(
name="group1", creator=self.user, owner=self.user,
category=100)
group2 = AstroBinGroup.objects.create(
name="group2", creator=self.user, owner=self.user,
category=100)
group3 = AstroBinGroup.objects.create(
name="group3", creator=self.user, owner=self.user,
category=100, autosubmission=True)
response = self.client.get(get_url((image.get_id(),)))
self.assertContains(response, "group1")
self.assertContains(response, "group2")
self.assertNotContains(response, "group3")
response = self.client.get(image.get_absolute_url())
self.assertContains(response, "Acquisition type")
self.assertContains(response, "Electronically-Assisted Astronomy (EAA, e.g. based on a live video feed)")
data = post_data(image)
data.update({"groups": [group1.pk]})
response = self.client.post(get_url((image.get_id(),)), data, follow=True)
image = Image.objects_including_wip.get(pk=image.pk)
self.assertTrue(group1 in image.part_of_group_set.all())
self.assertFalse(group2 in image.part_of_group_set.all())
data.update({"groups": [group1.pk, group2.pk]})
response = self.client.post(get_url((image.get_id(),)), data, follow=True)
image = Image.objects_including_wip.get(pk=image.pk)
self.assertTrue(group1 in image.part_of_group_set.all())
self.assertTrue(group2 in image.part_of_group_set.all())
data.update({"groups": [group2.pk]})
response = self.client.post(get_url((image.get_id(),)), data, follow=True)
image = Image.objects_including_wip.get(pk=image.pk)
self.assertFalse(group1 in image.part_of_group_set.all())
self.assertTrue(group2 in image.part_of_group_set.all())
data.update({"groups": []})
response = self.client.post(get_url((image.get_id(),)), data, follow=True)
image = Image.objects_including_wip.get(pk=image.pk)
self.assertFalse(group1 in image.part_of_group_set.all())
self.assertFalse(group2 in image.part_of_group_set.all())
group1.delete()
group2.delete()
group3.delete()
# Invalid form
response = self.client.post(get_url((image.get_id(),)), {})
self.assertContains(response, "This field is required");
self.client.logout()
# Anonymous GET
response = self.client.get(get_url((image.get_id(),)))
self.assertRedirects(
response,
'/accounts/login/?next=' + get_url((image.get_id(),)),
status_code=302,
target_status_code=200)
# Anonymous POST
response = self.client.post(get_url((image.get_id(),)), post_data(image), follow=True)
self.assertRedirects(
response,
'/accounts/login/?next=' + get_url((image.get_id(),)),
status_code=302,
target_status_code=200)
image.delete()
def test_image_edit_basic_view_replacing_image_deletes_solution(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
Solution.objects.create(
status=Solver.SUCCESS,
content_object=image
)
data = {
'image_file': open('astrobin/fixtures/test.jpg', 'rb'),
'title': "Test title",
'link': "http://www.example.com",
'link_to_fits': "http://www.example.com/fits",
'acquisition_type': 'EAA',
'data_source': 'OTHER',
'subject_type': SubjectType.OTHER,
'locations': [],
'description': "Image description",
'allow_comments': True
}
self.assertIsNotNone(image.solution)
self.client.post(reverse('image_edit_basic', args=(image.get_id(),)), data, follow=True)
self.assertIsNone(image.solution)
def test_image_edit_watermark_view(self):
def post_data(image):
return {
'image_id': image.get_id(),
'watermark': True,
'watermark_text': "Watermark test",
'watermark_position': 0,
'watermark_size': 'S',
'watermark_opacity': 100
}
def get_url(args=None):
return reverse('image_edit_watermark', args=args)
def post_url(args=None):
return reverse('image_edit_save_watermark', args=args)
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.title = "Test title"
image.save(keep_deleted=True)
self.client.logout()
# GET
self.client.login(username='test2', password='password')
response = self.client.get(get_url((image.get_id(),)))
self.assertEqual(response.status_code, 403)
# POST
response = self.client.post(
post_url(),
post_data(image),
follow=True)
self.assertEqual(response.status_code, 403)
self.client.logout()
# GET
self.client.login(username='test', password='password')
response = self.client.get(get_url((image.get_id(),)))
self.assertEqual(response.status_code, 200)
# POST
response = self.client.post(
post_url(),
post_data(image),
follow=True)
self.assertRedirects(
response,
reverse('image_detail', kwargs={'id': image.get_id()}),
status_code=302,
target_status_code=200)
image = Image.objects.get(pk=image.pk)
self.assertEqual(image.watermark, True)
self.assertEqual(image.watermark_text, "Watermark test")
self.assertEqual(image.watermark_position, 0)
self.assertEqual(image.watermark_size, 'S')
self.assertEqual(image.watermark_opacity, 100)
# Missing image_id in post
response = self.client.post(post_url(), {})
self.assertEqual(response.status_code, 404)
# Invalid form
response = self.client.post(post_url(), {'image_id': image.get_id()})
self.assertEqual(response.status_code, 200)
self._assert_message(response, "error unread", "errors processing the form")
self.client.logout()
# Anonymous GET
response = self.client.get(get_url((image.get_id(),)))
self.assertRedirects(
response,
'/accounts/login/?next=' +
get_url((image.get_id(),)),
status_code=302,
target_status_code=200)
# Anonymous POST
response = self.client.post(
post_url(),
post_data(image),
follow=True)
self.assertRedirects(
response,
'/accounts/login/?next=' + post_url(),
status_code=302,
target_status_code=200)
image.delete()
def test_image_edit_gear_view(self):
def post_data(image):
return {
'image_id': image.get_id(),
'imaging_telescopes': ','.join(["%d" % x.pk for x in self.imaging_telescopes]),
'guiding_telescopes': ','.join(["%d" % x.pk for x in self.guiding_telescopes]),
'mounts': ','.join(["%d" % x.pk for x in self.mounts]),
'imaging_cameras': ','.join(["%d" % x.pk for x in self.imaging_cameras]),
'guiding_cameras': ','.join(["%d" % x.pk for x in self.guiding_cameras]),
'focal_reducers': ','.join(["%d" % x.pk for x in self.focal_reducers]),
'software': ','.join(["%d" % x.pk for x in self.software]),
'filters': ','.join(["%d" % x.pk for x in self.filters]),
'accessories': ','.join(["%d" % x.pk for x in self.accessories])
}
def get_url(args=None):
return reverse('image_edit_gear', args=args)
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.title = "Test title"
image.save(keep_deleted=True)
self.client.logout()
# GET
self.client.login(username='test2', password='password')
response = self.client.get(get_url((image.get_id(),)))
self.assertEqual(response.status_code, 403)
# POST
response = self.client.post(
get_url((image.get_id(),)),
post_data(image),
follow=True)
self.assertEqual(response.status_code, 403)
self.client.logout()
# GET
self.client.login(username='test', password='password')
response = self.client.get(get_url((image.get_id(),)))
self.assertEqual(response.status_code, 200)
# No gear
self.user.userprofile.telescopes.clear()
self.user.userprofile.cameras.clear()
response = self.client.get(get_url((image.get_id(),)))
self.assertContains(response, "Can't see anything here?")
self.user.userprofile.telescopes = self.imaging_telescopes + self.guiding_telescopes
self.user.userprofile.cameras = self.imaging_cameras + self.guiding_cameras
# Check that the user's other images are available to copy from
self._do_upload('astrobin/fixtures/test.jpg')
other_1 = self._get_last_image();
other_1.title = "Other 1";
other_1.save(keep_deleted=True)
self._do_upload('astrobin/fixtures/test.jpg', wip=True);
other_2 = self._get_last_image();
other_2.title = "Other 2";
other_2.save(keep_deleted=True)
response = self.client.get(get_url((image.get_id(),)))
other_images = Image.objects_including_wip \
.filter(user=self.user) \
.exclude(pk=image.pk)
for i in other_images:
self.assertContains(
response,
'<option value="%d">%s</option>' % (i.pk, i.title),
html=True)
other_1.delete()
other_2.delete()
# POST
response = self.client.post(
get_url((image.get_id(),)),
post_data(image),
follow=True)
self.assertEqual(response.status_code, 200)
self._assert_message(response, "success unread", "Form saved")
image = Image.objects.get(pk=image.pk)
self.assertEqual(list(image.imaging_telescopes.all()), self.imaging_telescopes)
self.assertEqual(list(image.guiding_telescopes.all()), self.guiding_telescopes)
self.assertEqual(list(image.mounts.all()), self.mounts)
self.assertEqual(list(image.imaging_cameras.all()), self.imaging_cameras)
self.assertEqual(list(image.guiding_cameras.all()), self.guiding_cameras)
self.assertEqual(list(image.focal_reducers.all()), self.focal_reducers)
self.assertEqual(list(image.software.all()), self.software)
self.assertEqual(list(image.filters.all()), self.filters)
self.assertEqual(list(image.accessories.all()), self.accessories)
# No data
response = self.client.post(get_url((image.get_id(),)), {}, follow=True)
self.assertRedirects(response, reverse('image_detail', args=(image.get_id(),)))
self.client.logout()
# Anonymous GET
response = self.client.get(get_url((image.get_id(),)))
self.assertRedirects(
response,
'/accounts/login/?next=' +
get_url((image.get_id(),)),
status_code=302,
target_status_code=200)
# Anonymous POST
response = self.client.post(
get_url((image.get_id(),)),
post_data(image),
follow=True)
self.assertRedirects(
response,
'/accounts/login/?next=' + get_url((image.get_id(),)),
status_code=302,
target_status_code=200)
image.delete()
def test_image_edit_acquisition_view(self):
today = time.strftime('%Y-%m-%d')
def post_data_deep_sky_simple(image):
return {
'image_id': image.get_id(),
'edit_type': 'deep_sky',
'advanced': 'false',
'date': today,
'number': 10,
'duration': 1200,
}
def post_data_deep_sky_advanced(image):
return {
'deepsky_acquisition_set-TOTAL_FORMS': 1,
'deepsky_acquisition_set-INITIAL_FORMS': 0,
'image_id': image.get_id(),
'edit_type': 'deep_sky',
'advanced': 'true',
'deepsky_acquisition_set-0-date': today,
'deepsky_acquisition_set-0-number': 10,
'deepsky_acquisition_set-0-duration': 1200,
'deepsky_acquisition_set-0-binning': 1,
'deepsky_acquisition_set-0-iso': 3200,
'deepsky_acquisition_set-0-gain': 1,
'deepsky_acquisition_set-0-sensor_cooling': -20,
'deepsky_acquisition_set-0-darks': 10,
'deepsky_acquisition_set-0-flats': 10,
'deepsky_acquisition_set-0-flat_darks': 10,
'deepsky_acquisition_set-0-bias': 0,
'deepsky_acquisition_set-0-bortle': 1,
'deepsky_acquisition_set-0-mean_sqm': 20.0,
'deepsky_acquisition_set-0-mean_fwhm': 1,
'deepsky_acquisition_set-0-temperature': 10
}
def post_data_solar_system(image):
return {
'image_id': image.get_id(),
'edit_type': 'solar_system',
'date': today,
'frames': 1000,
'fps': 100,
'focal_length': 5000,
'cmi': 1.0,
'cmii': 2.0,
'cmiii': 3.0,
'seeing': 1,
'transparency': 1,
'time': "00:00"
}
def get_url(args=None):
return reverse('image_edit_acquisition', args=args)
def post_url(args=None):
return reverse('image_edit_save_acquisition', args=args)
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.title = "Test title"
image.save(keep_deleted=True)
self.client.logout()
# GET with wrong user
self.client.login(username='test2', password='password')
response = self.client.get(get_url((image.get_id(),)))
self.assertEqual(response.status_code, 403)
# POST with wrong user
response = self.client.post(
post_url(),
post_data_deep_sky_simple(image),
follow=True)
self.assertEqual(response.status_code, 403)
# Reset with wrong user
response = self.client.get(
reverse('image_edit_acquisition_reset', args=(image.get_id(),)))
self.assertEqual(response.status_code, 403)
self.client.logout()
# GET
self.client.login(username='test', password='password')
response = self.client.get(get_url((image.get_id(),)))
self.assertEqual(response.status_code, 200)
# GET with existing DSA
dsa, created = DeepSky_Acquisition.objects.get_or_create(
image=image,
date=today)
response = self.client.get(get_url((image.get_id(),)))
self.assertEqual(response.status_code, 200)
# GET with existing DSA in advanced mode
dsa.advanced = True
dsa.save()
response = self.client.get(get_url((image.get_id(),)))
self.assertEqual(response.status_code, 200)
# Test the add_more argument for the formset
response = self.client.get(get_url((image.get_id(),)) + "?add_more")
self.assertEqual(response.status_code, 200)
dsa.delete()
# GET with existing SSA
ssa, created = SolarSystem_Acquisition.objects.get_or_create(
image=image,
date=today)
response = self.client.get(get_url((image.get_id(),)))
self.assertEqual(response.status_code, 200)
ssa.delete()
# GET with edit_type in request.GET
response = self.client.get(get_url((image.get_id(),)) + "?edit_type=deep_sky")
self.assertEqual(response.status_code, 200)
# Reset
response = self.client.get(
reverse('image_edit_acquisition_reset', args=(image.get_id(),)))
self.assertEqual(response.status_code, 200)
# POST basic deep sky
response = self.client.post(
post_url(),
post_data_deep_sky_simple(image),
follow=True)
self.assertRedirects(
response,
reverse('image_detail', kwargs={'id': image.get_id()}),
status_code=302,
target_status_code=200)
self.assertEquals(image.acquisition_set.count(), 1)
dsa = DeepSky_Acquisition.objects.filter(image=image)[0]
post_data = post_data_deep_sky_simple(image)
self.assertEqual(dsa.date.strftime("%Y-%m-%d"), post_data['date'])
self.assertEqual(dsa.number, post_data['number'])
self.assertEqual(dsa.duration, post_data['duration'])
dsa.delete()
# POST basic deep sky invalid form
post_data = post_data_deep_sky_simple(image)
post_data['number'] = "foo"
response = self.client.post(post_url(), post_data)
self.assertEqual(response.status_code, 200)
self._assert_message(response, "error unread", "errors processing the form")
self.assertEquals(image.acquisition_set.count(), 0)
# POST advanced deep sky
response = self.client.post(
post_url(),
post_data_deep_sky_advanced(image),
follow=True)
self.assertRedirects(
response,
reverse('image_detail', kwargs={'id': image.get_id()}),
status_code=302,
target_status_code=200)
self.assertEquals(image.acquisition_set.count(), 1)
dsa = DeepSky_Acquisition.objects.filter(image=image)[0]
post_data = post_data_deep_sky_advanced(image)
self.assertEqual(dsa.date.strftime("%Y-%m-%d"), post_data['deepsky_acquisition_set-0-date'])
self.assertEqual(dsa.number, post_data['deepsky_acquisition_set-0-number'])
self.assertEqual(dsa.duration, post_data['deepsky_acquisition_set-0-duration'])
self.assertEqual(dsa.binning, post_data['deepsky_acquisition_set-0-binning'])
self.assertEqual(dsa.iso, post_data['deepsky_acquisition_set-0-iso'])
self.assertEqual(dsa.gain, post_data['deepsky_acquisition_set-0-gain'])
self.assertEqual(dsa.sensor_cooling, post_data['deepsky_acquisition_set-0-sensor_cooling'])
self.assertEqual(dsa.darks, post_data['deepsky_acquisition_set-0-darks'])
self.assertEqual(dsa.flats, post_data['deepsky_acquisition_set-0-flats'])
self.assertEqual(dsa.flat_darks, post_data['deepsky_acquisition_set-0-flat_darks'])
self.assertEqual(dsa.bias, post_data['deepsky_acquisition_set-0-bias'])
self.assertEqual(dsa.bortle, post_data['deepsky_acquisition_set-0-bortle'])
self.assertEqual(dsa.mean_sqm, post_data['deepsky_acquisition_set-0-mean_sqm'])
self.assertEqual(dsa.mean_fwhm, post_data['deepsky_acquisition_set-0-mean_fwhm'])
self.assertEqual(dsa.temperature, post_data['deepsky_acquisition_set-0-temperature'])
dsa.delete()
# POST advanced deep sky with "add_mode"
post_data = post_data_deep_sky_advanced(image)
post_data['add_more'] = True
response = self.client.post(post_url(), post_data)
self.assertEqual(response.status_code, 200)
self.assertEquals(image.acquisition_set.count(), 1)
image.acquisition_set.all().delete()
# POST advanced deep sky invalid form
post_data = post_data_deep_sky_advanced(image)
post_data['deepsky_acquisition_set-0-number'] = "foo"
response = self.client.post(post_url(), post_data)
self.assertEqual(response.status_code, 200)
self._assert_message(response, "error unread", "errors processing the form")
self.assertEquals(image.acquisition_set.count(), 0)
# POST with missing image_id
response = self.client.post(post_url(), {}, follow=True)
self.assertEqual(response.status_code, 404)
# POST with invalid SSA from
post_data = post_data_solar_system(image)
post_data['frames'] = "foo"
response = self.client.post(post_url(), post_data, follow=True)
self.assertEqual(response.status_code, 200)
self._assert_message(response, "error unread", "errors processing the form")
self.assertEquals(image.acquisition_set.count(), 0)
# POST with existing SSA
ssa, created = SolarSystem_Acquisition.objects.get_or_create(
image=image,
date=today)
response = self.client.post(
post_url(), post_data_solar_system(image), follow=True)
self.assertRedirects(
response,
reverse('image_detail', kwargs={'id': image.get_id()}),
status_code=302,
target_status_code=200)
self.assertEquals(image.acquisition_set.count(), 1)
ssa = SolarSystem_Acquisition.objects.filter(image=image)[0]
post_data = post_data_solar_system(image)
self.assertEqual(ssa.date.strftime("%Y-%m-%d"), post_data['date'])
self.assertEqual(ssa.frames, post_data['frames'])
self.assertEqual(ssa.fps, post_data['fps'])
self.assertEqual(ssa.focal_length, post_data['focal_length'])
self.assertEqual(ssa.cmi, post_data['cmi'])
self.assertEqual(ssa.cmii, post_data['cmii'])
self.assertEqual(ssa.cmiii, post_data['cmiii'])
self.assertEqual(ssa.seeing, post_data['seeing'])
self.assertEqual(ssa.transparency, post_data['transparency'])
self.assertEqual(ssa.time, post_data['time'])
self.client.logout()
image.delete()
def test_image_edit_license_view(self):
def post_data(image):
return {
'image_id': image.get_id(),
'license': License.ATTRIBUTION_NO_DERIVS,
}
def get_url(args=None):
return reverse('image_edit_license', args=args)
def post_url(args=None):
return reverse('image_edit_save_license', args=args)
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.title = "Test title"
image.save(keep_deleted=True)
self.client.logout()
# GET with wrong user
self.client.login(username='test2', password='password')
response = self.client.get(get_url((image.get_id(),)))
self.assertEqual(response.status_code, 403)
# POST with wrong user
response = self.client.post(post_url(), post_data(image))
self.assertEqual(response.status_code, 403)
self.client.logout()
# GET
self.client.login(username='test', password='password')
response = self.client.get(get_url((image.get_id(),)))
self.assertEqual(response.status_code, 200)
# POST with missing image_id
response = self.client.post(post_url(), {})
self.assertEqual(response.status_code, 404)
# POST invalid form
data = post_data(image)
data['license'] = "foo"
response = self.client.post(post_url(), data, follow=True)
self.assertEqual(response.status_code, 200)
self._assert_message(response, "error unread", "errors processing the form")
# POST
response = self.client.post(post_url(), post_data(image), follow=True)
self.assertRedirects(
response,
reverse('image_detail', kwargs={'id': image.get_id()}),
status_code=302,
target_status_code=200)
self._assert_message(response, "success unread", "Form saved")
image = Image.objects.get(pk=image.pk)
self.assertEquals(image.license, License.ATTRIBUTION_NO_DERIVS)
self.client.logout()
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_edit_revision_view(self):
def post_data():
return {
'description': "Updated revision description",
}
def get_url(args=None):
return reverse('image_edit_revision', args=args)
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.title = "Test title"
image.save(keep_deleted=True)
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg', "Test revision description")
revision = self._get_last_image_revision()
self.client.logout()
# GET with wrong user
self.client.login(username='test2', password='password')
response = self.client.get(get_url((revision.pk,)))
self.assertEqual(response.status_code, 403)
# POST with wrong user
response = self.client.post(get_url((revision.pk,)), post_data())
self.assertEqual(response.status_code, 403)
self.client.logout()
# GET missing revision
self.client.login(username='test', password='password')
response = self.client.get(get_url((999,)))
self.assertEqual(response.status_code, 404)
# GET
self.client.login(username='test', password='password')
response = self.client.get(get_url((revision.pk,)))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Test revision description")
# POST
response = self.client.post(get_url((revision.pk,)), post_data(), follow=True)
self.assertRedirects(
response,
reverse('image_detail', kwargs={'id': image.get_id(), 'r': revision.label}),
status_code=302,
target_status_code=200)
self._assert_message(response, "success unread", "Form saved")
revision = ImageRevision.objects.get(pk=revision.pk)
self.assertEquals(revision.description, "Updated revision description")
self.client.logout()
def test_image_revision_keeps_mouse_hover_from_image(self):
image = Generators.image(user=self.user)
image.mouse_hover_image = MouseHoverImage.INVERTED
image.save(keep_deleted=True)
revision = Generators.imageRevision(image=image)
self.client.login(username='test', password='password')
response = self.client.get(reverse('image_edit_revision', args=(revision.pk,)))
self.assertContains(response, '<option value="' + MouseHoverImage.INVERTED + '" selected>')
def test_image_revision_keeps_plate_solving_settings_from_image(self):
image = Generators.image(user=self.user)
solution = PlateSolvingGenerators.solution(image)
settings = PlateSolvingGenerators.settings(blind=False)
advanced_settings = PlateSolvingGenerators.advanced_settings(scaled_font_size='S')
solution.settings = settings
solution.advanced_settings = advanced_settings
solution.save()
image.save(keep_deleted=True)
revision = Generators.imageRevision(image=image)
self.assertIsNotNone(revision.solution)
self.assertIsNotNone(revision.solution.settings)
self.assertIsNotNone(revision.solution.advanced_settings)
self.assertFalse(revision.solution.settings.blind)
self.assertEquals('S', revision.solution.advanced_settings.scaled_font_size)
def test_image_delete_has_permanently_deleted_text(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
response = self.client.get(reverse('image_detail', args=(image.get_id(),)))
self.assertContains(response, "The image will be permanently")
def test_image_delete_has_permanently_deleted_text_premium(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
Generators.premium_subscription(image.user, "AstroBin Premium 2020+")
response = self.client.get(reverse('image_detail', args=(image.get_id(),)))
self.assertContains(response, "The image will be permanently")
def test_image_delete_has_trash_text_ultimate(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
Generators.premium_subscription(image.user, "AstroBin Ultimate 2020+")
response = self.client.get(reverse('image_detail', args=(image.get_id(),)))
self.assertContains(response, "The image will be moved to the trash")
def test_image_delete_view(self):
def post_url(args=None):
return reverse('image_delete', args=args)
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self.client.logout()
# Try with anonymous user
response = self.client.post(post_url((image.get_id(),)))
self.assertRedirects(
response,
'/accounts/login/?next=' + post_url((image.get_id(),)),
status_code=302,
target_status_code=200)
# POST with wrong user
self.client.login(username='test2', password='password')
response = self.client.post(post_url((image.get_id(),)))
self.assertEqual(response.status_code, 403)
self.client.logout()
# Test deleting WIP image
self.client.login(username='test', password='password')
image.is_wip = True
image.save(keep_deleted=True)
response = self.client.post(post_url((image.get_id(),)))
self.assertRedirects(
response,
reverse('user_page', kwargs={'username': image.user.username}),
status_code=302,
target_status_code=200)
self.assertEquals(Image.objects_including_wip.filter(pk=image.pk).count(), 0)
# Test for success
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
response = self.client.post(post_url((image.get_id(),)))
self.assertRedirects(
response,
reverse('user_page', kwargs={'username': image.user.username}),
status_code=302,
target_status_code=200)
self.assertEquals(Image.objects_including_wip.filter(pk=image.pk).count(), 0)
self.client.logout()
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_delete_revision_view(self):
def post_url(args=None):
return reverse('image_delete_revision', args=args)
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
self.client.logout()
# Try with anonymous user
response = self.client.post(post_url((revision.pk,)))
self.assertRedirects(
response,
'/accounts/login/?next=' + post_url((revision.pk,)),
status_code=302,
target_status_code=200)
# POST with wrong user
self.client.login(username='test2', password='password')
response = self.client.post(post_url((revision.pk,)))
self.assertEqual(response.status_code, 403)
self.client.logout()
# Test for success
self.client.login(username='test', password='password')
response = self.client.post(post_url((revision.pk,)))
self.assertRedirects(
response,
reverse('image_detail', kwargs={'id': image.get_id()}),
status_code=302,
target_status_code=200)
self.assertEquals(ImageRevision.objects.filter(pk=revision.pk).count(), 0)
self.assertTrue(image.is_final)
self.assertFalse(ImageRevision.deleted_objects.get(pk=revision.pk).is_final)
self.client.logout()
image.delete()
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_delete_original_view(self):
def post_url(args=None):
return reverse('image_delete_original', args=args)
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self.client.logout()
# POST with wrong user
self.client.login(username='test2', password='password')
response = self.client.post(post_url((image.get_id(),)))
self.assertEqual(response.status_code, 403)
self.client.logout()
# Test when there are no revisions
self.client.login(username='test', password='password')
response = self.client.post(post_url((image.get_id(),)))
self.assertEquals(400, response.status_code)
self.assertEquals(Image.objects.filter(pk=image.pk).count(), 1)
# Test for success when image was not final
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
response = self.client.post(post_url((image.get_id(),)))
self.assertRedirects(
response,
reverse('image_detail', kwargs={'id': image.get_id()}),
status_code=302,
target_status_code=200)
self.assertEquals(ImageRevision.objects.filter(image=image).count(), 0)
image.delete()
# Test for success when image was final
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
image = Image.objects.get(pk=image.pk)
image.is_final = True
image.save(keep_deleted=True)
revision.is_final = False
revision.save(keep_deleted=True)
response = self.client.post(post_url((image.get_id(),)))
self.assertRedirects(
response,
reverse('image_detail', kwargs={'id': image.get_id()}),
status_code=302,
target_status_code=200)
self.assertEquals(ImageRevision.objects.filter(image=image).count(), 0)
image.delete()
self.client.logout()
def test_image_delete_other_versions_view_wrong_user(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self.client.logout()
self.client.login(username='test2', password='password')
response = self.client.post(reverse('image_delete_other_versions', args=(image.pk,)))
self.assertEqual(403, response.status_code)
def test_image_delete_other_versions_view_no_revisions(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self.client.login(username='test', password='password')
response = self.client.post(reverse('image_delete_other_versions', args=(image.pk,)))
self.assertEquals(400, response.status_code)
self.assertEquals(1, Image.objects.filter(pk=image.pk).count())
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_delete_other_versions_view_on_original_with_one_final_revision(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.description = "foo"
image.save(keep_deleted=True)
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
revision.description = "bar"
revision.save(keep_deleted=True)
response = self.client.post(reverse('image_delete_other_versions', args=(image.pk,)), follow=True)
self.assertEquals(200, response.status_code)
image = Image.objects_including_wip.get(pk=image.pk)
self.assertEquals(0, image.revisions.count())
self.assertEquals("foo", image.description)
self.assertTrue(image.is_final)
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_delete_other_versions_view_on_original_with_two_revisions_one_of_which_is_final(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.description = "foo"
image.save(keep_deleted=True)
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
revision.description = "bar1"
revision.save(keep_deleted=True)
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
revision.description = "bar2"
revision.save(keep_deleted=True)
response = self.client.post(reverse('image_delete_other_versions', args=(image.pk,)), follow=True)
self.assertEquals(200, response.status_code)
image = Image.objects_including_wip.get(pk=image.pk)
self.assertEquals(0, image.revisions.count())
self.assertTrue(image.is_final)
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_delete_other_versions_view_on_original_with_two_revisions_none_of_which_is_final(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.description = "foo"
image.save(keep_deleted=True)
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
revision.description = "bar1"
revision.save(keep_deleted=True)
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
revision.description = "bar2"
revision.save(keep_deleted=True)
image.revisions.update(is_final=False)
image.is_final = True
image.save(keep_deleted=True)
response = self.client.post(reverse('image_delete_other_versions', args=(image.pk,)), follow=True)
self.assertEquals(200, response.status_code)
image = Image.objects_including_wip.get(pk=image.pk)
self.assertEquals(0, image.revisions.count())
self.assertTrue(image.is_final)
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_delete_other_versions_view_on_final_revision(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.description = "foo"
image.save(keep_deleted=True)
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
revision.description = "bar"
revision.save(keep_deleted=True)
response = self.client.post(
reverse('image_delete_other_versions', args=(image.pk,)),
{
'revision': 'B'
},
follow=True)
self.assertEquals(200, response.status_code)
image = Image.objects_including_wip.get(pk=image.pk)
self.assertEquals(0, image.revisions.count())
self.assertEquals("foo\nbar", image.description)
self.assertTrue(image.is_final)
def test_image_promote_view(self):
def post_url(args=None):
return reverse('image_promote', args=args)
# Upload a WIP image and a public image
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
public_image = self._get_last_image()
self._do_upload('astrobin/fixtures/test.jpg', True)
wip_image = self._get_last_image()
# user2 follows user
self.client.logout()
self.client.login(username='test2', password='password')
response = self.client.post(
reverse('toggleproperty_ajax_add'),
{
'property_type': 'follow',
'object_id': self.user.pk,
'content_type_id': ContentType.objects.get_for_model(User).pk,
})
self.assertEqual(response.status_code, 200)
# GET with wrong user
response = self.client.post(post_url((public_image.get_id(),)))
self.assertEqual(response.status_code, 403)
self.client.logout()
# Test public image
self.client.login(username='test', password='password')
response = self.client.post(post_url((public_image.get_id(),)), follow=True)
self.assertEqual(response.status_code, 200)
image = Image.objects.get(pk=public_image.pk)
self.assertEquals(image.is_wip, False)
# Test WIP image
self.assertIsNone(wip_image.published)
self.assertTrue(wip_image.is_wip)
response = self.client.post(post_url((wip_image.get_id(),)), follow=True)
self.assertEqual(response.status_code, 200)
wip_image = Image.objects.get(pk=wip_image.pk)
self.assertFalse(wip_image.is_wip)
self.assertIsNotNone(wip_image.published)
# Test that previously published images don't trigger a notification
wip_image.is_wip = True
wip_image.save(keep_deleted=True)
response = self.client.post(post_url((wip_image.get_id(),)), follow=True)
self.assertEqual(response.status_code, 200)
wip_image = Image.objects.get(pk=wip_image.pk)
self.assertFalse(wip_image.is_wip)
self.assertIsNotNone(wip_image.published)
# Test that skip_notifications doesn't trigger a notification
wip_image.is_wip = True
wip_image.save(keep_deleted=True)
response = self.client.post(post_url((wip_image.get_id(),)), data={'skip_notifications': 'on'}, follow=True)
self.assertEqual(response.status_code, 200)
wip_image = Image.objects.get(pk=wip_image.pk)
self.assertFalse(wip_image.is_wip)
self.assertIsNotNone(wip_image.published)
image.delete()
# Test the `published` property
self._do_upload('astrobin/fixtures/test.jpg', True)
image = self._get_last_image()
self.assertTrue(image.is_wip)
self.assertIsNone(image.published)
response = self.client.post(post_url((image.get_id(),)))
image = Image.objects.get(pk=image.pk)
self.assertIsNotNone(image.published)
# The `published` field does not get updated the second time we make
# this image public.
published = image.published
image.is_wip = True
image.save(keep_deleted=True)
response = self.client.post(post_url((image.get_id(),)))
image = Image.objects.get(pk=image.pk)
self.assertEqual(published, image.published)
image.delete()
self.client.logout()
def test_image_demote_view(self):
def post_url(args=None):
return reverse('image_demote', args=args)
# Upload an image
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
# GET with wrong user
self.client.logout()
self.client.login(username='test2', password='password')
response = self.client.post(post_url((image.get_id(),)))
self.assertEqual(response.status_code, 403)
self.client.logout()
self.client.login(username='test', password='password')
# Test when image was not WIP
response = self.client.post(post_url((image.get_id(),)))
image = Image.objects_including_wip.get(pk=image.pk)
self.assertEquals(image.is_wip, True)
# Test when image was WIP
response = self.client.post(post_url((image.get_id(),)))
image = Image.objects_including_wip.get(pk=image.pk)
self.assertEquals(image.is_wip, True)
# Test that we can't get the image via the regular manager
self.assertEquals(Image.objects.filter(pk=image.pk).count(), 0)
self.client.logout()
image.delete()
@patch('astrobin.models.UserProfile.get_scores')
def test_image_moderation(self, get_scores):
get_scores.return_value = {'user_scores_index': 0}
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.title = "TEST IMAGE"
image.save(keep_deleted=True)
# As the test user does not have a high enough Image Index, the
# image should be in the moderation queue.
self.assertEquals(image.moderator_decision, 0)
self.assertEquals(image.moderated_when, None)
self.assertEquals(image.moderated_by, None)
# The image should not appear on the front page when logged out
self.client.logout()
response = self.client.get(reverse('index'))
self.assertEquals(image.title in response.content, False)
# Nor when logged in
self.client.login(username='test', password='password')
response = self.client.get(reverse('index'))
self.assertEquals(image.title in response.content, False)
# TODO: test image promotion
def test_image_updated_after_toggleproperty(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.title = "TEST IMAGE"
image.save(keep_deleted=True)
updated = image.updated
prop = ToggleProperty.objects.create_toggleproperty('like', image, self.user2)
image = self._get_last_image()
self.assertNotEquals(updated, image.updated)
updated = image.updated
prop = ToggleProperty.objects.create_toggleproperty('bookmark', image, self.user2)
image = self._get_last_image()
self.assertNotEquals(updated, image.updated)
updated = image.updated
prop.delete()
image = self._get_last_image()
self.assertNotEquals(updated, image.updated)
image.delete()
self.client.logout()
def test_image_updated_after_acquisition_saved(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.title = "TEST IMAGE"
image.save(keep_deleted=True)
updated = image.updated
today = time.strftime('%Y-%m-%d')
response = self.client.post(
reverse('image_edit_save_acquisition'),
{
'image_id': image.get_id(),
'edit_type': 'deep_sky',
'advanced': 'false',
'date': today,
'number': 10,
'duration': 1200
},
follow=True)
image = self._get_last_image()
self.assertNotEquals(updated, image.updated)
image.delete()
self.client.logout()
def test_image_updated_after_comment(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.title = "TEST IMAGE"
image.save(keep_deleted=True)
updated = image.updated
comment = NestedComment.objects.create(
content_object=image,
author=self.user2,
text="Test")
image = self._get_last_image()
self.assertNotEquals(updated, image.updated)
image.delete()
self.client.logout()
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_softdelete(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.delete()
self.assertFalse(Image.objects.filter(pk=image.pk).exists())
self.assertTrue(Image.all_objects.filter(pk=image.pk).exists())
image.undelete()
self.assertTrue(Image.objects.filter(pk=image.pk).exists())
self._do_upload_revision(image, 'astrobin/fixtures/test_smaller.jpg')
revision = self._get_last_image_revision()
image = Image.objects.get(pk=image.pk)
self.assertEquals(1, image.revisions.count())
revision.delete()
with self.assertRaises(ImageRevision.DoesNotExist):
revision = ImageRevision.objects.get(pk=revision.pk)
image = Image.objects.get(pk=image.pk)
self.assertEquals(0, image.revisions.count())
self.assertFalse(ImageRevision.objects.filter(pk=revision.pk).exists())
self.assertTrue(ImageRevision.all_objects.filter(pk=revision.pk).exists())
def test_image_corrupted_goes_to_404_if_anon(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
image.corrupted = True
image.save()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertEquals(404, response.status_code)
def test_image_corrupted_goes_to_404_if_anon_and_r0(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
image.corrupted = True
image.save()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id(), 'r': '0'}), follow=True)
self.assertEquals(404, response.status_code)
def test_image_corrupted_goes_to_edit_if_owner(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.corrupted = True
image.save()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}), follow=True)
self.assertRedirects(response, reverse('image_edit_basic', kwargs={'id': image.get_id()}) + '?corrupted')
def test_image_corrupted_goes_to_edit_if_owner_and_r0(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.corrupted = True
image.save()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id(), 'r': '0'}), follow=True)
self.assertRedirects(response, reverse('image_edit_basic', kwargs={'id': image.get_id()}) + '?corrupted')
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_revision_corrupted_goes_to_404_if_anon(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
revision.corrupted = True
revision.save()
self.client.logout()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id(), 'r': revision.label}))
self.assertEquals(404, response.status_code)
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_revision_corrupted_ok_if_anon_and_r0(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
revision.corrupted = True
revision.save()
self.client.logout()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id(), 'r': '0'}))
self.assertEquals(200, response.status_code)
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_revision_corrupted_ok_if_owner_and_r0(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
revision.corrupted = True
revision.save()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id(), 'r': '0'}))
self.assertEquals(200, response.status_code)
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_revision_corrupted_goes_to_edit_revision_if_owner(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
revision.corrupted = True
revision.save()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id(), 'r': revision.label}))
self.assertRedirects(response, reverse('image_edit_revision', kwargs={'id': revision.pk}) + '?corrupted')
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_corrupted_ok_if_final_revision(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.corrupted = True
image.save()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
self.client.logout()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}), follow=True)
self.assertEquals(200, response.status_code)
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id(), 'r': revision.label}))
self.assertEquals(200, response.status_code)
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_corrupted_404_if_non_final_revision_and_anon(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
image.corrupted = True
image.is_final = True
image.save()
revision.is_final = False
revision.save()
self.client.logout()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}), follow=True)
self.assertEquals(404, response.status_code)
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_corrupted_goes_to_edit_if_non_final_revision_and_owner(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
image.corrupted = True
image.is_final = True
image.save()
revision.is_final = False
revision.save()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}), follow=True)
self.assertRedirects(response, reverse('image_edit_basic', kwargs={'id': image.get_id()}) + '?corrupted')
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_corrupted_ok_if_non_final_revision_direct_link_and_anon(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
image.corrupted = True
image.is_final = True
image.save()
revision.is_final = False
revision.save()
self.client.logout()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id(), 'r': revision.label}))
self.assertEquals(200, response.status_code)
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_corrupted_ok_if_non_final_revision_direct_link_and_owner(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
image.corrupted = True
image.is_final = True
image.save()
revision.is_final = False
revision.save()
response = self.client.get(
reverse('image_detail', kwargs={'id': image.get_id(), 'r': revision.label}))
self.assertEquals(200, response.status_code)
#
def test_image_full_corrupted_goes_to_404_if_anon(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
image.corrupted = True
image.save()
response = self.client.get(reverse('image_full', kwargs={'id': image.get_id()}))
self.assertEquals(404, response.status_code)
def test_image_full_corrupted_goes_to_404_if_anon_and_r0(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self.client.logout()
image = self._get_last_image()
image.corrupted = True
image.save()
response = self.client.get(reverse('image_full', kwargs={'id': image.get_id(), 'r': '0'}), follow=True)
self.assertEquals(404, response.status_code)
def test_image_full_corrupted_goes_to_edit_if_owner(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.corrupted = True
image.save()
response = self.client.get(reverse('image_full', kwargs={'id': image.get_id()}), follow=True)
self.assertRedirects(response, reverse('image_edit_basic', kwargs={'id': image.get_id()}) + '?corrupted')
def test_image_full_corrupted_goes_to_edit_if_owner_and_r0(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.corrupted = True
image.save()
response = self.client.get(reverse('image_full', kwargs={'id': image.get_id(), 'r': '0'}), follow=True)
self.assertRedirects(response, reverse('image_edit_basic', kwargs={'id': image.get_id()}) + '?corrupted')
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_full_revision_corrupted_goes_to_404_if_anon(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
revision.corrupted = True
revision.save()
self.client.logout()
response = self.client.get(reverse('image_full', kwargs={'id': image.get_id(), 'r': revision.label}))
self.assertEquals(404, response.status_code)
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_full_revision_corrupted_ok_if_anon_and_r0(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
revision.corrupted = True
revision.save()
self.client.logout()
response = self.client.get(reverse('image_full', kwargs={'id': image.get_id(), 'r': '0'}))
self.assertEquals(200, response.status_code)
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_full_revision_corrupted_ok_if_owner_and_r0(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
revision.corrupted = True
revision.save()
response = self.client.get(reverse('image_full', kwargs={'id': image.get_id(), 'r': '0'}))
self.assertEquals(200, response.status_code)
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_full_revision_corrupted_goes_to_edit_revision_if_owner(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
revision.corrupted = True
revision.save()
response = self.client.get(reverse('image_full', kwargs={'id': image.get_id(), 'r': revision.label}))
self.assertRedirects(response, reverse('image_edit_revision', kwargs={'id': revision.pk}) + '?corrupted')
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_full_corrupted_ok_if_final_revision(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
image.corrupted = True
image.save()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
self.client.logout()
response = self.client.get(reverse('image_full', kwargs={'id': image.get_id()}), follow=True)
self.assertEquals(200, response.status_code)
response = self.client.get(reverse('image_full', kwargs={'id': image.get_id(), 'r': revision.label}))
self.assertEquals(200, response.status_code)
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_full_corrupted_404_if_non_final_revision_and_anon(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
image.corrupted = True
image.is_final = True
image.save()
revision.is_final = False
revision.save()
self.client.logout()
response = self.client.get(reverse('image_full', kwargs={'id': image.get_id()}), follow=True)
self.assertEquals(404, response.status_code)
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_full_corrupted_goes_to_edit_if_non_final_revision_and_owner(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
image.corrupted = True
image.is_final = True
image.save()
revision.is_final = False
revision.save()
response = self.client.get(reverse('image_full', kwargs={'id': image.get_id()}), follow=True)
self.assertRedirects(response, reverse('image_edit_basic', kwargs={'id': image.get_id()}) + '?corrupted')
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_full_corrupted_ok_if_non_final_revision_direct_link_and_anon(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
image.corrupted = True
image.is_final = True
image.save()
revision.is_final = False
revision.save()
self.client.logout()
response = self.client.get(reverse('image_full', kwargs={'id': image.get_id(), 'r': revision.label}))
self.assertEquals(200, response.status_code)
@override_settings(PREMIUM_MAX_REVISIONS_FREE_2020=sys.maxsize)
def test_image_full_corrupted_ok_if_non_final_revision_direct_link_and_owner(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload_revision(image, 'astrobin/fixtures/test.jpg')
revision = self._get_last_image_revision()
image.corrupted = True
image.is_final = True
image.save()
revision.is_final = False
revision.save()
response = self.client.get(
reverse('image_full', kwargs={'id': image.get_id(), 'r': revision.label}))
self.assertEquals(200, response.status_code)
def test_image_platesolving_not_available_on_free(self):
image = Generators.image()
image.user = self.user
image.subject_type = SubjectType.DEEP_SKY
image.save()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertNotContains(response, "id=\"platesolving-status\"")
image.delete()
def test_image_platesolving_available_on_lite(self):
image = Generators.image()
image.user = self.user
image.subject_type = SubjectType.DEEP_SKY
image.save()
us = Generators.premium_subscription(self.user, "AstroBin Lite")
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertContains(response, "id=\"platesolving-status\"")
image.delete()
def test_image_platesolving_available_on_premium(self):
image = Generators.image()
image.user = self.user
image.subject_type = SubjectType.DEEP_SKY
image.save()
us = Generators.premium_subscription(self.user, "AstroBin Premium")
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertContains(response, "id=\"platesolving-status\"")
image.delete()
def test_image_platesolving_available_on_lite_2020(self):
image = Generators.image()
image.user = self.user
image.subject_type = SubjectType.DEEP_SKY
image.save()
us = Generators.premium_subscription(self.user, "AstroBin Lite 2020+")
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertContains(response, "id=\"platesolving-status\"")
image.delete()
def test_image_platesolving_available_on_premium_2020(self):
image = Generators.image()
image.user = self.user
image.subject_type = SubjectType.DEEP_SKY
image.save()
us = Generators.premium_subscription(self.user, "AstroBin Premium 2020+")
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertContains(response, "id=\"platesolving-status\"")
image.delete()
def test_image_platesolving_available_on_ultimate_2020(self):
image = Generators.image()
image.user = self.user
image.subject_type = SubjectType.DEEP_SKY
image.save()
us = Generators.premium_subscription(self.user, "AstroBin Ultimate 2020+")
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertContains(response, "id=\"platesolving-status\"")
image.delete()
def test_image_gear_list_is_hidden(self):
image = Generators.image()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertNotContains(response, "<div class=\"gear\">")
image.delete()
def test_image_gear_list_is_shown(self):
image = Generators.image()
telescope = Generators.telescope()
image.imaging_telescopes.add(telescope)
image.subject_type = SubjectType.DEEP_SKY
image.save()
response = self.client.get(reverse('image_detail', kwargs={'id': image.get_id()}))
self.assertContains(response, "<div class=\"gear\">")
telescope.delete()
image.delete()
def test_image_designated_iotd_submitters(self):
group = Group.objects.create(name='iotd_submitters')
for i in range(10):
user = Generators.user()
user.groups.add(group)
image = Generators.image()
self.assertEquals(2, image.designated_iotd_submitters.count())
def test_image_designated_iotd_reviewers(self):
group = Group.objects.create(name='iotd_reviewers')
for i in range(10):
user = Generators.user()
user.groups.add(group)
image = Generators.image()
self.assertEquals(2, image.designated_iotd_reviewers.count())
| agpl-3.0 | -8,282,067,776,369,709,000 | 39.8256 | 120 | 0.617361 | false |
zetaops/zengine | tests/test_channel_management.py | 1 | 17190 | # -*- coding: utf-8 -*-
"""
"""
# Copyright (C) 2015 ZetaOps Inc.
#
# This file is licensed under the GNU General Public License v3
# (GPLv3). See LICENSE.txt for details.
import time
from zengine.lib.test_utils import BaseTestCase
from zengine.models import User
from zengine.messaging.model import Channel, Subscriber, Message
from pyoko.db.adapter.db_riak import BlockDelete
import random
class TestCase(BaseTestCase):
def test_channel_management(self):
# with BlockDelete(Channel):
# for channel in Channel.objects.filter(typ=15):
# channel.delete()
# for s in Subscriber.objects.filter(channel=channel):
# s.delete()
# for m in Message.objects.filter(channel=channel):
# m.delete()
ch, sb, msg = create_test_data()
time.sleep(2)
# INCORRECT_OPERATIONS_CONTROLS
user = User.objects.get(username='super_user')
self.prepare_client('channel_management', user=user)
resp = self.client.post()
channel_list = resp.json['forms']['model']["ChannelList"]
assert 'wf_meta' in resp.json
assert resp.json['wf_meta']['name'] == 'channel_management'
assert resp.json['wf_meta']['current_step'] == 'ChannelList'
assert resp.json['forms']['schema']["title"] == 'Public Channel List'
assert len(channel_list) == Channel.objects.filter(typ=15).count()
resp = self.client.post(cmd="create_new_channel", form={'new_channel': 1, })
assert resp.json['msgbox']['title'] == 'Incorrect Operation'
assert 'new channel' in resp.json['msgbox']['msg']
resp = self.client.post(cmd="choose_existing_channel", form={'existing_channel': 1})
assert resp.json['msgbox']['title'] == 'Incorrect Operation'
assert 'existing channel' in resp.json['msgbox']['msg']
resp = self.client.post(cmd="find_chosen_channel", form={'find_chosen_channel': 1})
assert resp.json['msgbox']['title'] == 'Incorrect Operation'
assert 'split operation' in resp.json['msgbox']['msg']
channel_list = resp.json['forms']['model']["ChannelList"]
channel_list[0]['choice'] = True
channel_list[1]['choice'] = True
resp = self.client.post(cmd="find_chosen_channel",
form={'ChannelList': channel_list, 'find_chosen_channel': 1})
assert resp.json['msgbox']['title'] == 'Incorrect Operation'
assert 'split operation' in resp.json['msgbox']['msg']
# MERGE_AT_NEW_CHANNEL
channel_list = resp.json['forms']['model']["ChannelList"]
# Two channels are chosen.
channel_list[0]['choice'] = True
channel_list[1]['choice'] = True
# Subscriber counts of channels are taken.
subs_ch1 = Subscriber.objects.filter(channel_id=channel_list[0]['key']).count()
subs_ch2 = Subscriber.objects.filter(channel_id=channel_list[1]['key']).count()
resp = self.client.post(cmd="create_new_channel",
form={'ChannelList': channel_list, 'new_channel': 1})
# 'Specify' word is expected at form title.
assert 'Specify' in resp.json['forms']['schema']['title']
# New's channel features are specified.
resp = self.client.post(flow="find_target_channel",
form={'description': "New_Trial_Channel", 'forward': 1,
'name': 'New_Channel',
'owner_id': "HjgPuHelltHC9USbj8wqd286vbS"})
# It is checked come back again to channel screen.
assert resp.json['forms']['schema']["title"] == 'Public Channel List'
# Successful Operation title is checked.
assert resp.json['msgbox']['title'] == 'Successful Operation'
assert channel_list[0]['name'] and channel_list[1]['name'] and 'New_Channel' in \
resp.json['msgbox']['msg']
# Channel names and new created channel key are taken.
channel_name_list, new_channel_key = find_channel_name_list(
resp.json['forms']['model']["ChannelList"], 'New_Channel')
ch.append(new_channel_key)
msg.extend([msg.key for msg in Message.objects.filter(channel_id=new_channel_key)])
# It is checked that 'New Channel' is there and chosen channels aren't there.
assert 'New_Channel' in channel_name_list
# Channel's owner is controlled.
assert "HjgPuHelltHC9USbj8wqd286vbS" == Channel.objects.get('new_channel').owner.key
assert channel_list[0]['name'] and channel_list[1]['name'] not in channel_name_list
# New channel's subscriber count should be as much as chosen two channels.
assert Subscriber.objects.filter(channel_id=new_channel_key).count() == subs_ch1 + subs_ch2
# Two chosen channels are deleted and new channel is created.
# Channel count should be decrease one.
assert len(resp.json['forms']['model']["ChannelList"]) == len(channel_list) - 1
# The messages are tested for deletion.
assert Message.objects.filter(typ=15, channel_id=channel_list[0]['key']).count() == 0
assert Message.objects.filter(typ=15, channel_id=channel_list[1]['key']).count() == 0
# MERGE_WITH_AN_EXISTING_CHANNEL
channel_list = resp.json['forms']['model']["ChannelList"]
# One channel is selected.
channel_list[0]['choice'] = True
# Subscriber count of channel is taken.
chosen_channel_count = Subscriber.objects.filter(channel_id=channel_list[0]['key']).count()
resp = self.client.post(cmd="choose_existing_channel",
form={'ChannelList': channel_list, 'existing_channel': 1})
assert 'wf_meta' in resp.json
assert resp.json['wf_meta']['name'] == 'channel_management'
assert resp.json['wf_meta']['current_step'] == 'ChooseExistingChannel'
# Channel choosing screen is expected.
assert 'Choose a Channel' in resp.json['forms']['schema']['title']
exist_channel_list = resp.json['forms']['model']["ChannelList"]
# It is checked that it is not shown on the screen.
assert len(exist_channel_list) == len(channel_list) - 1
# Existing channel is selected.
exist_channel_list[0]['choice'] = True
# Existing channel's subscriber count is taken.
exs_channel_first_count = Subscriber.objects.filter(
channel_id=exist_channel_list[0]['key']).count()
resp = self.client.post(form={'ChannelList': exist_channel_list, 'choose': 1})
# It is checked come back again to channel screen.
assert resp.json['forms']['schema']["title"] == 'Public Channel List'
# Successful Operation title is checked.
assert resp.json['msgbox']['title'] == 'Successful Operation'
# It is checked that two channels name's at the message.
assert channel_list[0]['name'] and exist_channel_list[0]['name'] in resp.json['msgbox'][
'msg']
channel_name_list, new_channel_key = find_channel_name_list(
resp.json['forms']['model']["ChannelList"], '')
# It is checked that chosen channel name is not in screen,
# exist channel is still there.
assert exist_channel_list[0]['name'] in channel_name_list
assert channel_list[0]['name'] not in channel_name_list
# Existing channel's updated subscriber count is taken.
assert Subscriber.objects.filter(channel_id=exist_channel_list[0][
'key']).count() == chosen_channel_count + exs_channel_first_count
# One chosen channel should be deleted. Thus, channel count should be decrease one.
assert len(resp.json['forms']['model']["ChannelList"]) == len(channel_list) - 1
# The messages are tested for deletion.
assert Message.objects.filter(typ=15, channel_id=channel_list[0]['key']).count() == 0
# SPLIT CHANNEL
channel_list, chosen_channel = find_channel_to_choose(
resp.json['forms']['model']["ChannelList"])
# One channel is selected to split.
# Chosen channels's subscriber and message counts are taken.
split_ch_subs_count = Subscriber.objects.filter(channel_id=chosen_channel['key']).count()
split_ch_msg_count = Message.objects.filter(channel_id=chosen_channel['key']).count()
resp = self.client.post(cmd="find_chosen_channel",
form={'ChannelList': channel_list, 'find_chosen_channel': 1})
# Chosen's channel subscribers are expected.
assert 'Subscribers' in resp.json['forms']['schema']['title']
subscriber_list = resp.json['forms']['model']["SubscriberList"]
# Subscriber count at screen and at database should be equal.
assert len(subscriber_list) == Subscriber.objects.filter(channel_id=chosen_channel['key'],
typ=15).count()
# SPLIT_OPERATION_INCORRECT_OPERATIONS
resp = self.client.post(cmd="create_new_channel", form={'new_channel': 1})
assert resp.json['msgbox']['title'] == 'Incorrect Operation'
assert 'one subscriber' in resp.json['msgbox']['msg']
resp = self.client.post(cmd="create_new_channel", form={'existing_channel': 1})
assert resp.json['msgbox']['title'] == 'Incorrect Operation'
assert 'one subscriber' in resp.json['msgbox']['msg']
# SPLIT_OPERATION_TO_NEW_CHANNEL
subscriber_list[0]['choice'] = True
subscriber_list[1]['choice'] = True
resp = self.client.post(cmd="create_new_channel",
form={'SubscriberList': subscriber_list, 'new_channel': 1})
# New Create Channel screen is expected.
assert 'Specify' in resp.json['forms']['schema']['title']
# New channel's features are specified.
resp = self.client.post(flow="find_target_channel",
form={'description': "New_Split_Channel", 'forward': 1,
'name': 'New_Split_Channel',
'owner_id': 'HjgPuHelltHC9USbj8wqd286vbS'})
# It is checked come back again to channel screen.
assert resp.json['forms']['schema']["title"] == 'Public Channel List'
# Successful Operation title is checked.
assert resp.json['msgbox']['title'] == 'Successful Operation'
# Success operation message should contain two channels.
assert chosen_channel['name'] and 'New_Split_Channel' in resp.json['msgbox']['msg']
channel_name_list, new_channel_key = find_channel_name_list(
resp.json['forms']['model']["ChannelList"], 'New_Split_Channel')
ch.append(new_channel_key)
msg.extend([m.key for m in Message.objects.filter(channel_id=new_channel_key)])
# Two channels should be in channel name list.
assert chosen_channel['name'] and 'New_Split_Channel' in channel_name_list
# New channel's subscriber and message counts are taken.
new_ch_subs_count = Subscriber.objects.filter(channel_id=new_channel_key).count()
new_ch_msg_count = Message.objects.filter(channel_id=new_channel_key).count()
# Splitted channel updated subsriber count should be equal to difference between first
# subscriber count and new channel's subscriber count.
assert Subscriber.objects.filter(
channel_id=chosen_channel['key']).count() == split_ch_subs_count - new_ch_subs_count
# Splitted channel and new channel's message histories should be equal.
assert new_ch_msg_count == split_ch_msg_count
# New channel is created, channel count should increase one.
assert len(resp.json['forms']['model']["ChannelList"]) == len(channel_list) + 1
# SPLIT_OPERATION_TO_EXISTING_CHANNEL
channel_list, chosen_channel = find_channel_to_choose(
resp.json['forms']['model']["ChannelList"])
# One channel is selected to split.
chosen_channel['choice'] = True
split_ch_subs_count = Subscriber.objects.filter(channel_id=chosen_channel['key']).count()
resp = self.client.post(cmd="find_chosen_channel",
form={'ChannelList': channel_list, 'find_chosen_channel': 1})
subscriber_list = resp.json['forms']['model']["SubscriberList"]
# Two subscribers are selected.
subscriber_list[0]['choice'] = True
subscriber_list[1]['choice'] = True
resp = self.client.post(cmd="choose_existing_channel",
form={'SubscriberList': subscriber_list, 'existing_channel': 1})
# Channel choosing screen is expected.
assert 'Choose a Channel' in resp.json['forms']['schema']['title']
# Selectable channel count should be less than channel count. Not being itself.
exist_channel_list = resp.json['forms']['model']["ChannelList"]
assert len(exist_channel_list) == len(channel_list) - 1
# One existing channel is selected.
exist_channel_list[0]['choice'] = True
# Existing channel's subscriber count is taken.
exs_channel_first_count = Subscriber.objects.filter(
channel_id=exist_channel_list[0]['key']).count()
resp = self.client.post(form={'ChannelList': exist_channel_list, 'choose': 1})
# It is checked come back again to channel screen.
assert resp.json['forms']['schema']["title"] == 'Public Channel List'
# Successful Operation title is checked.
assert resp.json['msgbox']['title'] == 'Successful Operation'
assert chosen_channel['name'] and exist_channel_list[0]['name'] in resp.json['msgbox'][
'msg']
channel_name_list, new_channel_key = find_channel_name_list(
resp.json['forms']['model']["ChannelList"])
# Two channels should be screen.
assert chosen_channel['name'] and exist_channel_list[0]['name'] in channel_name_list
# Existing channel's updated subscriber count should increase 2.
assert Subscriber.objects.filter(
channel_id=exist_channel_list[0]['key']).count() == exs_channel_first_count + 2
# Splitted channel's updated subscriber count should decrease 2.
assert Subscriber.objects.filter(
channel_id=chosen_channel['key']).count() == split_ch_subs_count - 2
# Channel count at screen should remain same.
assert len(channel_list) == len(resp.json['forms']['model']["ChannelList"])
delete_test_data(ch, sb, msg)
def find_channel_name_list(form_info, name=None):
"""
Args:
form_info: form which contains channel info. (name, choice, key)
name(str): channel name
Returns:
channel_name_list(list): Name list of channels in form
new_channel_key(str): New created channel's key.
"""
channel_name_list = []
new_channel_key = ''
for channel in form_info:
channel_name_list.append(channel['name'])
if name and name in channel['name']:
new_channel_key = channel['key']
return channel_name_list, new_channel_key
def find_channel_to_choose(channel_list):
"""
A channel which has at least two subscriber is found and choice of channel
is updated to True.
Args:
channel_list: form which contains channel info. (name, choice, key)
Returns:
channel_list: updated with choice True
chosen_channel:(object) A channel which has at least 2 subscriber.
"""
for i, c in enumerate(channel_list):
if Subscriber.objects.filter(typ=15, channel_id=c['key']).count() >= 2:
channel_list[i]['choice'] = True
chosen_channel = channel_list[i]
return channel_list, chosen_channel
def create_test_data():
# Channels, subscribers and messages are created for test environment.
ch = sb = msg = []
a = [u for u in User.objects.all() if u.username != None]
for i in range(5):
c = Channel(name="%i Class" % random.randrange(1000, 9000), owner=random.choice(a),
typ=15).save()
ch.append(c.key)
for i in range(2):
u = random.choice(a)
s = Subscriber(channel=c, typ=15, name=u.username, user=u).save()
sb.append(s.key)
for i in range(2):
m = Message(channel=c, typ=15, sender=random.choice(a),
receiver=random.choice(a), msg_title=str(random.randrange(1, 1000)),
body=str(random.randrange(1, 1000))).save()
msg.append(m.key)
return ch, sb, msg
def delete_test_data(ch, sb, msg):
# Created channels, subscribers and messages are deleted.
with BlockDelete(Channel):
Channel.objects.filter(key__in=ch).delete()
with BlockDelete(Subscriber):
Subscriber.objects.filter(key__in=sb).delete()
with BlockDelete(Message):
Message.objects.filter(key__in=msg).delete()
| gpl-3.0 | 7,119,566,577,026,506,000 | 47.286517 | 99 | 0.615998 | false |
AtalM2/iAtal | src/python/classes.py | 1 | 1384 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2012
#
# This file is part of iAtal.
#
# iAtal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# iAtal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with iAtal. If not, see <http://www.gnu.org/licenses/>.
#Defines a sensor.
class sensor:
def __init__(self,map_, level_, range_):
self.map_ = map_
self.level_ = level_
self.range_ = range_
#gets the item in range on the map
def activate(self):
return self.map_.getItem(self.level_ , self.range_)
class compass:
def __init__(self,map_):
self.map_ = map_
def activate(self):
return self.map_.compass()
#defines an actuator
class actuator:
def __init__(self,map_, level_, range_,newContent_):
self.map_ = map_
self.level_ = level_
self.range_ = range_
self.newContent_ = newContent_
#Set the new item on the map
def activate(self):
self.map_.setItem(self.level_, self.range_, self.newContent_)
| gpl-3.0 | 8,569,586,578,108,410,000 | 29.086957 | 70 | 0.703035 | false |
Daniel-CA/odoo-addons | __unported__/avanzosc_module_doc/wizard/create_module_documentation.py | 1 | 3050 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2008-2013 AvanzOSC S.L. All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp.osv import orm, fields
from openerp.tools.translate import _
class create_extra_documentation(orm.TransientModel):
_name = 'module.doc.create'
def create_documentation(self, cr, uid, ids, context=None):
doc_obj = self.pool.get('module.doc')
mod_obj = self.pool.get('ir.module.module')
for id in ids:
search_ids = doc_obj.search(cr, uid, [('module_id', '=', id)],
context=context)
if not search_ids:
created_id = doc_obj.create(cr, uid, {'module_id': id},
context=context)
name = doc_obj.onchange_module_id(cr, uid, [created_id], id,
context=context)['value']['name']
doc_obj.write(cr, uid, created_id, {'name': name},
context=context)
mod_obj.write(cr, uid, id, {'doc_id': created_id},
context=context)
else:
for search_id in search_ids:
doc_obj.write(cr, uid, search_id, {'has_info': True},
context=context)
mod_obj.write(cr, uid, id, {'doc_id': search_id},
context=context)
return {
'name': _('Extra documentation'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'module.doc',
'type': 'ir.actions.act_window',
}
def create_documentation_all(self, cr, uid, ids, context):
mod_obj = self.pool.get('ir.module.module')
all_ids = mod_obj.search(cr, uid, [])
return self.create_documentation(cr, uid, all_ids, context)
def create_documentation_installed(self, cr, uid, ids, context):
mod_obj = self.pool.get('ir.module.module')
installed_ids = mod_obj.search(cr, uid, [('state', '=', 'installed')])
return self.create_documentation(cr, uid, installed_ids, context)
| agpl-3.0 | -3,631,337,101,992,934,000 | 44.522388 | 83 | 0.535738 | false |
neuromat/nira | person/views.py | 1 | 5872 | # -*- coding: utf-8 -*-
from collections import Counter
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from django.shortcuts import render, redirect
from django.utils.translation import ugettext_lazy as _
from person.models import CitationName, Person
prep = ['e', 'da', 'do', 'de', 'dos', 'E', 'Da', 'Do', 'De', 'Dos']
def name_with_first_letters(names, with_prep):
letters = ''
last_name = names[-1]
last_name_with_prep = names[-2]+' '+last_name
for name in names:
if name != last_name and name not in prep:
letters += name[0]
if not with_prep:
return last_name+','+' '+letters
else:
return last_name_with_prep+','+' '+letters
def names_without_last_name(names, with_prep):
last_name = names[-1]
last_name_with_prep = names[-2]+' '+last_name
citation_name = [name for name in names if name != last_name and name not in prep]
citation_name = ' '.join(citation_name)
if not with_prep:
return last_name+','+' '+citation_name
else:
return last_name_with_prep+','+' '+citation_name
def first_name_and_first_letter(names, with_prep):
first_letter = ''
first_name = names[0]
last_name = names[-1]
last_name_with_prep = names[-2]+' '+last_name
for name in names:
if name != first_name and name != last_name and name not in prep:
first_letter += name[0]
if not with_prep:
if first_letter != '':
citation_name = first_name+' '+first_letter
return last_name+','+' '+citation_name
else:
citation_name = first_name
return last_name+','+' '+citation_name
else:
if first_letter != '':
citation_name = first_name+' '+first_letter
return last_name_with_prep+','+' '+citation_name
else:
citation_name = first_name
return last_name_with_prep+','+' '+citation_name
def generate_citation_names(person):
# Get full name and id from person.
full_name = person.full_name
person_id = person.pk
# Split the full name.
split_name = full_name.split()
# Maybe the user has a default citation
citation_default = CitationName.objects.filter(person_id=person_id, default_name=True)
# Get the first letter of the name except the last name
# letters = name_with_first_letters(split_name)
citation_01 = name_with_first_letters(split_name, False)
# Get names without last name
# almost_full_name = names_without_last_name(split_name)
citation_02 = names_without_last_name(split_name, False)
# Get first name and first letter of the middle name
# first_name_letter_middle_name = first_name_and_first_letter(split_name)
citation_03 = first_name_and_first_letter(split_name, False)
# Imagine a person called João Carlos da Silva.
# Here the citation would be "Silva, JC"
if citation_default:
citation_name_01 = CitationName(person_id=person_id, name=citation_01)
if CitationName.objects.filter(person_id=person_id, name=citation_name_01).exists() is False:
citation_name_01.save()
else:
citation_name_01 = CitationName(person_id=person_id, name=citation_01, default_name=True)
if CitationName.objects.filter(person_id=person_id, name=citation_name_01).exists() is False:
citation_name_01.save()
# Here the citation would be "Silva, João Carlos"
citation_name_02 = CitationName(person_id=person_id, name=citation_02)
if CitationName.objects.filter(person_id=person_id, name=citation_name_02).exists() is False:
citation_name_02.save()
# Here the citation would be "Silva, João C"
citation_name_03 = CitationName(person_id=person_id, name=citation_03)
if CitationName.objects.filter(person_id=person_id, name=citation_name_03).exists() is False:
citation_name_03.save()
# Here the last name will be "da Silva"
if split_name[-2] in prep:
# last_name_with_prep = split_name[-2]+' '+last_name
prep_01 = name_with_first_letters(split_name, True)
prep_02 = names_without_last_name(split_name, True)
prep_03 = first_name_and_first_letter(split_name, True)
# Here the citation would be "da Silva, JC"
citation_name_prep = CitationName(person_id=person_id, name=prep_01)
if CitationName.objects.filter(person_id=person_id, name=citation_name_prep).exists() is False:
citation_name_prep.save()
# Here the citation would be "da Silva, João Carlos"
citation_name_prep_02 = CitationName(person_id=person_id, name=prep_02)
if CitationName.objects.filter(person_id=person_id, name=citation_name_prep_02).exists() is False:
citation_name_prep_02.save()
# Here the citation would be "da Silva, João C"
citation_name_prep_03 = CitationName(person_id=person_id, name=prep_03)
if CitationName.objects.filter(person_id=person_id, name=citation_name_prep_03).exists() is False:
citation_name_prep_03.save()
@login_required
def citation_names(request):
# Create citation names for each person
for person in Person.objects.all():
generate_citation_names(person)
messages.success(request, _('Successfully updated citation names.'))
return redirect(reverse('admin:index'))
@login_required
def researchers(request):
list_of_researchers = Person.objects.all()
list_of_roles = []
for research in list_of_researchers:
list_of_roles.append(str(research.role))
table_of_roles = (Counter(list_of_roles)).items()
context = {'list_of_researchers': list_of_researchers, 'table_of_roles': table_of_roles}
return render(request, 'report/person/researchers.html', context)
| mpl-2.0 | -4,193,132,889,697,596,000 | 36.608974 | 106 | 0.656724 | false |
Hguimaraes/gtzan.keras | src/gtzan/data/make_dataset.py | 1 | 3938 | import os
import librosa
import itertools
import numpy as np
import pandas as pd
from scipy.stats import kurtosis
from scipy.stats import skew
def get_features(y, sr, n_fft = 1024, hop_length = 512):
# Features to concatenate in the final dictionary
features = {'centroid': None, 'roloff': None, 'flux': None, 'rmse': None,
'zcr': None, 'contrast': None, 'bandwidth': None, 'flatness': None}
# Count silence
if 0 < len(y):
y_sound, _ = librosa.effects.trim(y, frame_length=n_fft, hop_length=hop_length)
features['sample_silence'] = len(y) - len(y_sound)
# Using librosa to calculate the features
features['centroid'] = librosa.feature.spectral_centroid(y, sr=sr, n_fft=n_fft, hop_length=hop_length).ravel()
features['roloff'] = librosa.feature.spectral_rolloff(y, sr=sr, n_fft=n_fft, hop_length=hop_length).ravel()
features['zcr'] = librosa.feature.zero_crossing_rate(y, frame_length=n_fft, hop_length=hop_length).ravel()
features['rmse'] = librosa.feature.rms(y, frame_length=n_fft, hop_length=hop_length).ravel()
features['flux'] = librosa.onset.onset_strength(y=y, sr=sr).ravel()
features['contrast'] = librosa.feature.spectral_contrast(y, sr=sr).ravel()
features['bandwidth'] = librosa.feature.spectral_bandwidth(y, sr=sr, n_fft=n_fft, hop_length=hop_length).ravel()
features['flatness'] = librosa.feature.spectral_flatness(y, n_fft=n_fft, hop_length=hop_length).ravel()
# MFCC treatment
mfcc = librosa.feature.mfcc(y, n_fft = n_fft, hop_length = hop_length, n_mfcc=13)
for idx, v_mfcc in enumerate(mfcc):
features['mfcc_{}'.format(idx)] = v_mfcc.ravel()
# Get statistics from the vectors
def get_moments(descriptors):
result = {}
for k, v in descriptors.items():
result['{}_max'.format(k)] = np.max(v)
result['{}_min'.format(k)] = np.min(v)
result['{}_mean'.format(k)] = np.mean(v)
result['{}_std'.format(k)] = np.std(v)
result['{}_kurtosis'.format(k)] = kurtosis(v)
result['{}_skew'.format(k)] = skew(v)
return result
dict_agg_features = get_moments(features)
dict_agg_features['tempo'] = librosa.beat.tempo(y, sr=sr)[0]
return dict_agg_features
"""
@description: Method to split a song into multiple songs using overlapping windows
"""
def splitsongs(X, overlap = 0.5):
# Empty lists to hold our results
temp_X = []
# Get the input song array size
xshape = X.shape[0]
chunk = 33000
offset = int(chunk*(1.-overlap))
# Split the song and create new ones on windows
spsong = [X[i:i+chunk] for i in range(0, xshape - chunk + offset, offset)]
for s in spsong:
if s.shape[0] != chunk:
continue
temp_X.append(s)
return np.array(temp_X)
"""
@description: Method to convert a list of songs to a np array of melspectrograms
"""
def to_melspectrogram(songs, n_fft=1024, hop_length=256):
# Transformation function
melspec = lambda x: librosa.feature.melspectrogram(x, n_fft=n_fft,
hop_length=hop_length, n_mels=128)[:,:,np.newaxis]
# map transformation of input songs to melspectrogram using log-scale
tsongs = map(melspec, songs)
# np.array([librosa.power_to_db(s, ref=np.max) for s in list(tsongs)])
return np.array(list(tsongs))
def make_dataset_ml(args):
signal, sr = librosa.load(args.song, sr=None)
# Append the result to the data structure
features = get_features(signal, sr)
song = pd.DataFrame([features])
return song
def make_dataset_dl(args):
# Convert to spectrograms and split into small windows
signal, sr = librosa.load(args.song, sr=None)
# Convert to dataset of spectograms/melspectograms
signals = splitsongs(signal)
# Convert to "spec" representation
specs = to_melspectrogram(signals)
return specs
| mit | -2,235,305,071,240,203,500 | 35.472222 | 116 | 0.647029 | false |
MridulS/GraphSpace | graphs/forms.py | 1 | 3011 | '''
See https://docs.djangoproject.com/en/dev/topics/forms/ for details.
'''
from django import forms
from graphs.util import db
class LoginForm(forms.Form):
'''
Login Form used to show login fields in GraphSpace webpages.
This form is located within the top navbar.
'''
# attrs to specify extra html attributes
user_id = forms.CharField(max_length=100, required=False, widget=forms.TextInput(attrs={'placeholder': 'Email', 'class': 'form-control', 'size': '13', 'id': 'email'}))
pw = forms.CharField(required=False, widget=forms.PasswordInput(attrs={'placeholder': 'Password', 'class': 'form-control', 'size': '13', 'id': 'pw'}))
class SearchForm(forms.Form):
'''
Search form used to perform search on GraphSpace
'''
def __init__(self, *args, **kwargs):
'''
Initialize the form. A keyword argument 'placeholder' may be
given.
This can be customized to specify additional parameters if it
needs to.
'''
if 'placeholder' in kwargs:
self.placeholder = kwargs.pop('placeholder')
# must be called after 'placeholder' is popped from kwargs
super(SearchForm, self).__init__(*args, **kwargs)
self.fields['search'].widget = forms.TextInput(attrs={'placeholder': self.placeholder, 'class': 'form-control', 'type': 'text', 'name': 'search'})
else:
super(SearchForm, self).__init__(*args, **kwargs)
self.fields['search'].widget = forms.TextInput(attrs={'class': 'form-control', 'type': 'text', 'name': 'search'})
search = forms.CharField(required=False, label='', max_length=100)
class RegisterForm(forms.Form):
'''
Register form to help create an account for a new user.
'''
user_id = forms.CharField(required=False, label='Email', max_length=100,
widget=forms.TextInput(attrs={'class': 'form-control', 'type': 'text', 'size': '25', 'id': 'user_id'}))
password = forms.CharField(required=False, label='Password', widget=forms.PasswordInput(attrs={'class': 'form-control', 'size': '25', 'id': 'password'}))
verify_password = forms.CharField(required=False, label='Verify Password', widget=forms.PasswordInput(attrs={'class': 'form-control', 'size': '25', 'id': 'verify_password'}))
def clean_user_id(self):
'''
Form validation to check if the user id already exist
in the database.
https://docs.djangoproject.com/en/1.6/ref/forms/validation/#cleaning-a-specific-field-attribute
'''
cleaned_data = super(RegisterForm, self).clean()
user_id = cleaned_data["user_id"]
check_user = db.emailExists(user_id)
if check_user == None:
return user_id
else:
return None
def clean(self):
'''
Form validation to check if two passwords provided are
equivalent.
https://docs.djangoproject.com/en/1.6/ref/forms/validation/#cleaning-a-specific-field-attribute
'''
cleaned_data = super(RegisterForm, self).clean()
pw = cleaned_data.get("password")
vpw = cleaned_data.get("verify_password")
if pw and vpw:
if pw != vpw:
raise forms.ValidationError("Passwords do not match.")
return cleaned_data | gpl-2.0 | 9,170,254,614,169,942,000 | 34.857143 | 175 | 0.68914 | false |
onnudilol/vetcalc | calc/views.py | 1 | 8644 | from django.shortcuts import render
from common.models import Injection, CRI
from calc.forms import CalcInjForm, CRISimpleForm, CRIAdvancedForm, CRIInsulinForm, CRICPRForm, CRIMetoclopramideForm
from collections import OrderedDict
def calc_injection(request):
"""Calculates injection dosages based on weight.
GET parameters:
weight: weight in lbs
Contxt:
calculated dose rounded to 3 decimal places
"""
meds = Injection.objects.all()
rx = dict()
# default displayed dosage of 0.0 mLs
for med in meds:
rx[med] = 0.0
rx_ordered = OrderedDict(sorted(rx.items(), key=lambda t: t[0].name))
if request.method == 'GET' and request.is_ajax():
form = CalcInjForm(data=request.GET)
if form.is_valid():
weight = float(request.GET['weight'])
for med in meds:
rx_ordered[med] = round(med.factor * weight, 3)
return render(request, 'calc/injection.html', {'rx': rx_ordered,
'form': CalcInjForm(),
'navbar': 'calc'})
def calc_cri_simple(request):
"""Calculates simple CRI dosages based on weight.
GET parameters:
weight: weight in kgs
Context:
rx: calculated dosages rounded to 3 decimal places
"""
meds = CRI.objects.filter(calc_type='ez')
form = CRISimpleForm()
rx = dict()
bolus = dict()
# zipped list of rates to dosage with default displayed dosages of 0.0 mL
for med in meds:
rx[med] = list(zip([rate for rate in med.rates],
[0.0 * rate for rate in med.rates]))
if request.method == 'GET' and request.is_ajax():
form = CRISimpleForm(data=request.GET)
if form.is_valid():
weight = float(request.GET['weight'])
for med in meds:
rx[med] = list(zip([rate for rate in med.rates],
[round(weight * med.factor * rate, 3) for rate in med.rates]))
# bolus is calculated for diltiazem
bolus = {'mg': round(weight * 0.25, 3), 'mL': round(weight * 0.05, 3)}
return render(request, 'calc/cri_simple.html', {'navbar': 'calc',
'form': form,
'rx': rx,
'bolus': bolus})
def calc_cri_advanced(request):
"""Calculates complex CRI dosages based on multiple inputs.
GET parameters:
weight: weight in kgs
rate: current cri rate
volume: current iv volume in mL
infusion: target infusion rate
Context:
rx: calculated dosages rounded to 3 decimal places
"""
meds = CRI.objects.filter(calc_type='adv')
form = CRIAdvancedForm()
rx = dict()
for med in meds:
rx[med] = dict()
if request.method == 'GET' and request.is_ajax():
form = CRIAdvancedForm(data=request.GET)
if form.is_valid():
weight = float(request.GET['weight'])
rate = float(request.GET['rate'])
volume = float(request.GET['volume'])
infusion = float(request.GET['infusion'])
for med in meds:
rx[med] = {'maint': round((weight * 30 * 2.2)/24, 3),
'maint_plus': round((weight * 30 + 70)/24, 3),
'add': round(((weight * infusion * med.factor) / (rate/60)) * volume, 3)}
return render(request, 'calc/cri_advanced.html', {'navbar': 'calc',
'form': form,
'rx': rx})
def calc_cri_insulin(request):
"""Calculates CRI dosages for insulin
GET parameters:
weight: weight in kgs
rate: current rate
volume: current iv vol in mLs
replacement: target replacement rate
Context:
rx: calculated dosages rounded to 3 decimal places
"""
form = CRIInsulinForm()
rx = dict()
if request.method == 'GET' and request.is_ajax():
form = CRIInsulinForm(data=request.GET)
if form.is_valid():
weight = float(request.GET['weight'])
rate = float(request.GET['rate'])
volume = float(request.GET['volume'])
replacement = float(request.GET['replacement'])
phosphorus = ((weight * replacement/3) * volume)/rate
rx = {'maint': round((weight * 2.2 * 30)/24, 3),
'maint_plus': round((weight * 30 + 70)/24, 3),
'units_dog': round(((weight * 2.2) / (rate * 24)) * volume, 3),
'units_cat': round((weight * 1.1) / (rate * 24) * volume, 3),
'phosphorus': round(phosphorus, 3),
'phosphorus_excess': round(phosphorus * 4.4 * 1000 / volume, 3)}
return render(request, 'calc/cri_insulin.html', {'navbar': 'calc',
'form': form,
'rx': rx})
def calc_cri_cpr(request):
"""Calculates CRI dosages for post CPR maintenance
GET parameters:
weight: weight in kg
rate: current rate
volume: current iv vol in mL
dobutamine: target dobutamine rate
dopamine: target dopamine rate
lidocaine: target lidocaine rate
Context:
rx: calculated cri dosages rounded to 3 decimal places
"""
form = CRICPRForm()
rx = dict()
if request.method == 'GET' and request.is_ajax():
form = CRICPRForm(data=request.GET)
if form.is_valid():
weight = float(request.GET['weight'])
rate = float(request.GET['rate'])
volume = float(request.GET['volume'])
dobutamine = float(request.GET['dobutamine'])
dopamine = float(request.GET['dopamine'])
lidocaine = float(request.GET['lidocaine'])
rx = {'maint': round((weight * 2.2 * 30)/24, 3),
'maint_plus': round((weight * 30 + 70)/24, 3),
'dose_dobutamine': round(((weight * dobutamine) / 12500)/(rate/60) * volume, 3),
'dose_dopamine': round((weight * dopamine / 40000)/(rate/60) * volume, 3),
'dose_lidocaine': round((weight * lidocaine / 20000)/(rate/60) * volume, 3),
'dose_epinephrine': round((weight/1000)/(rate/60) * volume, 3),
'dose_mannitol': round(weight * 4, 3),
'dose_solumedrol': round(weight * 30, 3)}
return render(request, 'calc/cri_cpr.html', {'navbar': 'calc',
'form': form,
'rx': rx})
def calc_cri_metoclopramide(request):
"""Calculates CRI dosages for metoclopramide
GET parameters:
weight: weight in kg
rate: current rate
volume: current iv volume in mLs
infusion: target infusion rate
Context:
rx: calculated cri dosages rounded to 3 decimal places
"""
form = CRIMetoclopramideForm()
rx = dict()
if request.method == 'GET' and request.is_ajax():
form = CRIMetoclopramideForm(data=request.GET)
if form.is_valid():
weight = float(request.GET['weight'])
rate = float(request.GET['rate'])
volume = float(request.GET['volume'])
infusion = float(request.GET['infusion'])
dose = (weight * infusion / 5)/(rate * 24) * volume
rx = {'maint': round((weight * 2.2 * 30)/24, 3),
'maint_plus': round((weight * 30 + 70)/24, 3),
'dose': round(dose, 3),
'concentration': round(dose * 5 / volume, 3)}
if request.GET['inc_infusion'] and request.GET['inc_volume']:
inc_volume = float(request.GET['inc_volume'])
inc_infusion = float(request.GET['inc_infusion'])
dose_inc_infusion = inc_infusion + infusion
rx['inc_infusion'] = round(dose_inc_infusion, 3)
rx['inc_dose'] = round(((dose_inc_infusion * weight / (rate * 24)) - (dose * 5 / volume)) * inc_volume / 5, 3)
rx['inc_rate'] = round((dose_inc_infusion * weight)/((dose * 5)/volume)/24, 3)
return render(request, 'calc/cri_metoclopramide.html', {'navbar': 'calc',
'form': form,
'rx': rx})
| mit | -8,529,278,159,513,784,000 | 33.854839 | 126 | 0.523022 | false |
nelhage/taktician | python/test/train/test_features.py | 1 | 2650 | import tak.train
import tak.ptn
import tak.symmetry
import numpy as np
class TestFeatures(object):
def extra_planes(self, feat):
return feat[:,:,14:]
def is_onehot(self, m, axis=2):
return np.all(np.sum(m, axis) == 1)
def test_zero_features(self):
b = tak.Position.from_config(tak.Config(size=5))
f = tak.train.features(b)
assert f.shape == tak.train.feature_shape(5)
assert np.all(f[:,:,:14] == 0)
assert np.all(f[:,:,16] == 1)
def test_basic_features(self):
b = tak.ptn.parse_tps(
'1,x4/x5/x5/x5/x4,2 1 2')
f = tak.train.features(b)
assert np.sum(f[:,:,0]) == 1
assert np.sum(f[:,:,1]) == 1
assert f[0,4,0] == 1.0
assert f[4,0,1] == 1.0
assert np.all(f[:,:,2:14] == 0)
b1 = tak.ptn.parse_tps(
'1,x4/x5/x5/x5/x4,2 2 2')
f1 = tak.train.features(b1)
assert np.sum(f1[:,:,0]) == 1
assert np.sum(f1[:,:,1]) == 1
assert f1[0,4,1] == 1.0
assert f1[4,0,0] == 1.0
def test_flats(self):
f = tak.train.features(
tak.ptn.parse_tps(
'1,x4/x5/x5/x5/x4,2 1 2'))
ext = self.extra_planes(f)
assert self.is_onehot(
ext[:,:, tak.train.FeaturePlane.FLATS:tak.train.FeaturePlane.FLATS_MAX],
)
assert np.all(ext[:,:, tak.train.FeaturePlane.FLATS + 3] == 1)
f = tak.train.features(
tak.ptn.parse_tps(
'1,1,x3/x5/x5/x5/x4,2 1 2'))
ext = self.extra_planes(f)
assert self.is_onehot(
ext[:,:, tak.train.FeaturePlane.FLATS:tak.train.FeaturePlane.FLATS_MAX],
)
assert np.all(ext[:,:, tak.train.FeaturePlane.FLATS + 4] == 1)
f = tak.train.features(
tak.ptn.parse_tps(
'1,1,1,1,1/1,1,1,1,1/x5/x5/x4,2 1 2'))
ext = self.extra_planes(f)
assert self.is_onehot(
ext[:,:, tak.train.FeaturePlane.FLATS:tak.train.FeaturePlane.FLATS_MAX],
)
assert np.all(ext[:,:, tak.train.FeaturePlane.FLATS_MAX-1] == 1)
f = tak.train.features(
tak.ptn.parse_tps(
'1,1,1,1,1/1,1,1,1,1/x5/x5/x4,2 2 2'))
ext = self.extra_planes(f)
assert self.is_onehot(
ext[:,:, tak.train.FeaturePlane.FLATS:tak.train.FeaturePlane.FLATS_MAX],
)
assert np.all(ext[:,:, tak.train.FeaturePlane.FLATS] == 1)
def test_symmetry_features(self):
pos = tak.ptn.parse_tps("2,x,21S,2,2,2/2,2C,2,1S,x2/x3,2,x2/1,11112,1121,1C,x2/x2,1S,12,1,1/x3,1,x,1 1 20")
feat = tak.train.Featurizer(pos.size)
manual = [
feat.features(tak.symmetry.transform_position(sym, pos))
for sym in tak.symmetry.SYMMETRIES
]
computed = feat.features_symmetries(pos)
for i in range(len(manual)):
assert np.all(manual[i] == computed[i])
| mit | -8,654,246,127,363,407,000 | 29.113636 | 111 | 0.593585 | false |
daniel-j-h/libosrmc | bindings/osrm_or-tools.py | 1 | 1925 | #!/usr/bin/env python2
from __future__ import print_function
import sys
import random
from osrmcpy import OSRM, Coordinate
from ortools.constraint_solver.pywrapcp import RoutingParameters, RoutingModel, RoutingSearchParameters
# Integration with Google's or-tools for Traveling Salesman Problems
def main():
if len(sys.argv) != 2:
sys.exit('Usage: {} monaco.osrm'.format(sys.argv[0]))
osrm = OSRM(sys.argv[1])
# 100 random coordinates (n^2 table, dummy from and to coordinate)
n = 100
first = 0
last = n - 1
# Area in Monaco dataset to sample from
bottom_left = Coordinate(longitude=7.413194, latitude=43.731056)
top_right = Coordinate(longitude=7.421639, latitude=43.735440)
random_coordinate = lambda: Coordinate(longitude=random.uniform(bottom_left.longitude, top_right.longitude),
latitude=random.uniform(bottom_left.latitude, top_right.latitude))
table = osrm.table([random_coordinate() for _ in range(n)])
if table:
params = RoutingParameters()
RoutingModel.SetGlobalParameters(params)
routing = RoutingModel(n, 1, [first], [last])
parameters = RoutingSearchParameters()
parameters.first_solution = 'PathCheapestArc'
parameters.no_lns = True
parameters.no_tsp = False
distance = lambda s, t: table[s][t]
routing.SetArcCostEvaluatorOfAllVehicles(distance)
solution = routing.SolveWithParameters(parameters, None)
if solution:
print('Solution: {0:.0f} seconds'.format(solution.ObjectiveValue()))
# solution can be unpacked here into routes by means of:
# routing.Start, routing.IsEnd, routing.NextVar, assignment.Value
else:
print('No solution found')
else:
print('Unable to get response from Table service')
if __name__ == '__main__':
main()
| mit | -8,368,498,982,474,790,000 | 30.557377 | 112 | 0.65974 | false |
openp2pdesign/OpenMetaDesignApp | openmetadesign.py | 1 | 41157 | # -*- coding: utf-8 -*-
#
# Open MetaDesign 0.1
#
# Author: Massimo Menichinelli
# Website:
# http://openmetadesign.org
# http://openp2pdesign.org
#
# License: GPL v.3
#
import os
import wx
import wx.lib.mixins.inspection
import wx.lib.scrolledpanel as scrolled
import thread
#from github import Github
from modules.classes import *
from modules.render import *
#from modules.githubanalysis import *
#from modules.networkrender import *
from modules.imageviewer import *
from modules.mdwriter import *
temp = project()
currentFile = ""
currentFolder = ""
githubUsername = ""
githubPassword = ""
class GitHubLogin(wx.Dialog):
def __init__(self, parent, ID, size=wx.DefaultSize, pos=wx.DefaultPosition):
pre = wx.PreDialog()
pre.SetExtraStyle(wx.DIALOG_EX_CONTEXTHELP)
pre.Create(parent, ID, "Login to GitHub", pos, size)
self.PostCreate(pre)
sizer = wx.BoxSizer(wx.VERTICAL)
box = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, -1, "Username:")
box.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
self.text1 = wx.TextCtrl(self, -1, "", size=(80,-1))
box.Add(self.text1, 1, wx.ALIGN_CENTRE|wx.ALL, 5)
sizer.Add(box, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
box = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, -1, "Password:")
box.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
self.text2 = wx.TextCtrl(self, -1, "", style=wx.TE_PASSWORD, size=(80,-1))
box.Add(self.text2, 1, wx.ALIGN_CENTRE|wx.ALL, 5)
sizer.Add(box, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
btnsizer = wx.StdDialogButtonSizer()
btn1 = wx.Button(self, wx.ID_OK)
btn1.SetDefault()
btnsizer.AddButton(btn1)
btn2 = wx.Button(self, wx.ID_CANCEL)
btnsizer.AddButton(btn2)
btnsizer.Realize()
sizer.Add(btnsizer, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
self.Bind(wx.EVT_BUTTON, self.onOK, btn1)
self.SetSizer(sizer)
sizer.Fit(self)
def onOK(self,event):
global githubUsername
global githubPassword
githubUsername = self.text1.GetValue()
githubPassword = self.text2.GetValue()
self.Close(True)
self.Destroy()
class FlowTab(wx.Panel):
def __init__(self, parent,pagename="Flow"):
wx.Panel.__init__(self, parent)
box = wx.BoxSizer(wx.VERTICAL)
self.actors = []
self.flowtype = ["Financial flow",
"Physical resources flow",
"Information flow"]
label1 = wx.StaticText(self, label="Flow type:")
box.Add(label1, flag=wx.ALL|wx.EXPAND, border=10)
self.tc1 = wx.Choice(self, -1, choices = self.flowtype)
box.Add(self.tc1, flag=wx.ALL, border=10)
label2 = wx.StaticText(self, label="What does flow? (Less than 15 characters)")
box.Add(label2, flag=wx.ALL|wx.EXPAND, border=10)
self.tc2 = wx.TextCtrl(self, size=(100,20))
self.tc2.SetMaxLength(15)
box.Add(self.tc2, flag=wx.ALL, border=10)
label3 = wx.StaticText(self, label="First actor of the flow:")
box.Add(label3, flag=wx.ALL|wx.EXPAND, border=10)
self.tc3 = wx.Choice(self, -1, choices = self.actors)
box.Add(self.tc3, flag=wx.ALL, border=10)
label31 = wx.StaticText(self, label="Please update and leave the field above about actors to refresh the list")
box.Add(label31, flag=wx.ALL|wx.EXPAND, border=10)
label4 = wx.StaticText(self, label="Second actor of the flow:")
box.Add(label4, flag=wx.ALL|wx.EXPAND, border=10)
self.tc4 = wx.Choice(self, -1, choices = self.actors)
box.Add(self.tc4, flag=wx.ALL, border=10)
label41 = wx.StaticText(self, label="Please update and leave the field above about actors to refresh the list")
box.Add(label41, flag=wx.ALL|wx.EXPAND, border=10)
self.flowdirection = ["Both directions",
"From the first actor to the second one",
"From the second actor to the first one"]
label5 = wx.StaticText(self, label="Direction of the flow:")
box.Add(label5, flag=wx.ALL|wx.EXPAND, border=10)
self.tc5 = wx.Choice(self, -1, choices = self.flowdirection)
box.Add(self.tc5, flag=wx.ALL, border=10)
self.SetSizer(box)
class StepPage(scrolled.ScrolledPanel):
def __init__(self, parent,pagename="Step"):
scrolled.ScrolledPanel.__init__(self, parent, -1,size=(570,400),name=pagename)
self.panel = wx.Panel(self, -1)
self.box = wx.BoxSizer(wx.VERTICAL)
self.participationlevels = ["None",
"Indirect",
"Consultative",
"Shared control",
"Full control"]
label1 = wx.StaticText(self, label="The title of this step in the design process:")
self.box.Add(label1, flag=wx.ALL|wx.EXPAND, border=10)
self.tc1 = wx.TextCtrl(self, size=(530,20), style=wx.TE_MULTILINE)
self.box.Add(self.tc1, flag=wx.ALL|wx.EXPAND, border=10)
label2 = wx.StaticText(self, label="Participation of the community in the Open Design process:")
self.box.Add(label2, flag=wx.ALL|wx.EXPAND, border=10)
self.tc2 = wx.Choice(self, -1, choices = self.participationlevels)
self.Bind(wx.EVT_CHOICE, self.onChoice, self.tc2)
self.box.Add(self.tc2, flag=wx.ALL, border=10)
label3 = wx.StaticText(self, label="Tools used in this step of the Open Design process:")
self.box.Add(label3, flag=wx.ALL|wx.EXPAND, border=10)
self.tc3 = wx.TextCtrl(self, size=(530,80), style=wx.TE_MULTILINE)
self.box.Add(self.tc3, flag=wx.ALL|wx.EXPAND, border=10)
label4 = wx.StaticText(self, label="Rules in use in this step of the Open Design process:")
self.box.Add(label4, flag=wx.ALL|wx.EXPAND, border=10)
self.tc4 = wx.TextCtrl(self, size=(530,80), style=wx.TE_MULTILINE)
self.box.Add(self.tc4, flag=wx.ALL|wx.EXPAND, border=10)
label5 = wx.StaticText(self, label="Actors in this step of the Open Design process (separate them with a comma):")
self.box.Add(label5, flag=wx.ALL|wx.EXPAND, border=10)
self.tc5 = wx.TextCtrl(self, size=(530,80), style=wx.TE_MULTILINE)
self.box.Add(self.tc5, flag=wx.ALL|wx.EXPAND, border=10)
self.tc5.Bind(wx.EVT_KILL_FOCUS, self.onUpdateCtrl)
buttons = wx.BoxSizer(wx.HORIZONTAL)
self.flowsnumber = 1
self.flowmessage = "Number of flows in the step: " + str(self.flowsnumber)
self.label6 = wx.StaticText(self, label=self.flowmessage)
buttons.Add(self.label6, flag=wx.ALL|wx.EXPAND, border=10)
addflow = wx.Button(self, 20, "Add a flow")
buttons.Add(addflow, flag=wx.ALL, border=10)
addflow.Bind(wx.EVT_BUTTON, self.onAddFlow, addflow)
removeflow = wx.Button(self, 20, "Remove the current flow")
buttons.Add(removeflow, flag=wx.ALL, border=10)
removeflow.Bind(wx.EVT_BUTTON, self.onRemoveFlow, removeflow)
self.box.Add(buttons,flag=wx.ALL|wx.EXPAND, border=10)
self.tabs = {}
self.nestednb = wx.Notebook(self)
self.tabs[0] = FlowTab(self.nestednb)
self.nestednb.AddPage(self.tabs[0], "Flow n. 1")
self.box.Add(self.nestednb,2,wx.EXPAND, border=10)
self.SetSizer(self.box)
self.SetAutoLayout(1)
self.SetupScrolling()
def onUpdateCtrl(self,event):
# was: (1,self.flowsnumber+1)
for k in range(1,self.flowsnumber+1):
self.tabs[k].actors = [x.strip() for x in self.tc5.GetValue().split(',')]
self.tabs[k].tc3.SetItems(self.tabs[k].actors)
self.tabs[k].tc4.SetItems(self.tabs[k].actors)
def onUpdateCtrlLoadFile(self):
# was: (1,self.flowsnumber+1)
for k in range(1,self.flowsnumber+1):
self.tabs[k].actors = [x.strip() for x in self.tc5.GetValue().split(',')]
self.tabs[k].tc3.SetItems(self.tabs[k].actors)
self.tabs[k].tc4.SetItems(self.tabs[k].actors)
def onChoice(self, event):
choice = event.GetString()
print choice
def onRemoveFlow(self, event):
if self.flowsnumber >= 0:
self.flowsnumber -= 1
self.nestednb.DeletePage(self.nestednb.GetSelection())
del self.tabs[self.nestednb.GetSelection()+1]
self.flowmessage = "Number of flows in the step: " + str(self.flowsnumber)
self.label6.SetLabel(self.flowmessage)
for j in range(self.flowsnumber+1):
self.nestednb.SetPageText(j, "Flow: "+str(j+1))
else:
pass
def onAddFlow(self, event):
self.flowsnumber += 1
self.flowmessage = "Number of flows in the step: " + str(self.flowsnumber)
self.label6.SetLabel(self.flowmessage)
self.tabs[self.flowsnumber] = FlowTab(self.nestednb)
self.tabs[self.flowsnumber].actors = [x.strip() for x in self.tc5.GetValue().split(',')]
self.tabs[self.flowsnumber].tc3.SetItems(self.tabs[self.flowsnumber].actors)
self.tabs[self.flowsnumber].tc4.SetItems(self.tabs[self.flowsnumber].actors)
self.nestednb.AddPage(self.tabs[self.flowsnumber], "Flow n. " + str(self.flowsnumber))
print "OK",self.tabs
class WelcomePage(scrolled.ScrolledPanel):
def __init__(self, parent):
scrolled.ScrolledPanel.__init__(self, parent, -1,size=(570,400),name="Welcome")
box = wx.BoxSizer(wx.VERTICAL)
self.bitmap = wx.Bitmap('images/welcome.png')
wx.EVT_PAINT(self, self.OnPaint)
self.SetSizer(box)
self.SetAutoLayout(1)
self.SetupScrolling()
def OnPaint(self, event):
dc = wx.PaintDC(self)
dc.DrawBitmap(self.bitmap, 60, 20)
class GeneralPage(scrolled.ScrolledPanel):
def __init__(self, parent):
scrolled.ScrolledPanel.__init__(self, parent, -1,size=(570,400),name="General Information")
box = wx.BoxSizer(wx.VERTICAL)
self.licenses = ["Creative Commons - Attribution (CC BY)",
"Creative Commons - Attribution Share Alike (CC BY-SA)",
"Creative Commons - Attribution No Derivatives (CC BY-ND)",
"Creative Commons - Attribution Non-Commercial (CC BY-NC)",
"Creative Commons - Attribution Non-Commercial Share Alike (CC BY-NC-SA)",
"Creative Commons - Attribution Non-Commercial No Derivatives (CC BY-NC-ND)",
"Creative Commons - No Rights Reserved (CC0)"]
label1 = wx.StaticText(self, label="The title of the Open Design project:")
box.Add(label1, flag=wx.ALL|wx.EXPAND, border=10)
self.tc1 = wx.TextCtrl(self, size=(530,40), style=wx.TE_MULTILINE)
box.Add(self.tc1, flag=wx.ALL|wx.EXPAND, border=10)
label2 = wx.StaticText(self, label="Version of the Open Design project:")
box.Add(label2, flag=wx.ALL|wx.EXPAND, border=10)
self.tc2 = wx.TextCtrl(self, size=(530,20), style=wx.TE_MULTILINE)
box.Add(self.tc2, flag=wx.ALL|wx.EXPAND, border=10)
label3 = wx.StaticText(self, label="Founders of the Open Design project:")
box.Add(label3, flag=wx.ALL|wx.EXPAND, border=10)
self.tc3 = wx.TextCtrl(self, size=(530,80), style=wx.TE_MULTILINE)
box.Add(self.tc3, flag=wx.ALL|wx.EXPAND, border=10)
label4 = wx.StaticText(self, label="License of the Open Design process (not the project!):")
box.Add(label4, flag=wx.ALL|wx.EXPAND, border=10)
self.tc4 = wx.Choice(self, -1, choices = self.licenses)
box.Add(self.tc4, flag=wx.ALL|wx.EXPAND, border=10)
label5 = wx.StaticText(self, label="The online repository on GitHub for this project:")
box.Add(label5, flag=wx.ALL|wx.EXPAND, border=10)
self.tc5 = wx.TextCtrl(self, size=(530,20), style=wx.TE_MULTILINE)
box.Add(self.tc5, flag=wx.ALL|wx.EXPAND, border=10)
self.Bind(wx.EVT_CHOICE, self.onChoice, self.tc4)
self.SetSizer(box)
self.SetAutoLayout(1)
self.SetupScrolling()
def onChoice(self, event):
choice = event.GetString()
temp.license = choice
class BusinessModelPage(scrolled.ScrolledPanel):
def __init__(self, parent):
scrolled.ScrolledPanel.__init__(self, parent, -1,size=(570,400),name="Business Model")
box = wx.BoxSizer(wx.VERTICAL)
label1 = wx.StaticText(self, label="Value proposition:")
box.Add(label1, flag=wx.ALL|wx.EXPAND, border=10)
self.tc1 = wx.TextCtrl(self, size=(550,120), style=wx.TE_MULTILINE)
box.Add(self.tc1, flag=wx.ALL|wx.EXPAND, border=10)
label2 = wx.StaticText(self, label="Customer segments:")
box.Add(label2, flag=wx.ALL|wx.EXPAND, border=10)
self.tc2 = wx.TextCtrl(self, size=(550,120), style=wx.TE_MULTILINE)
box.Add(self.tc2, flag=wx.ALL|wx.EXPAND, border=10)
label3 = wx.StaticText(self, label="Customer relationships:")
box.Add(label3, flag=wx.ALL|wx.EXPAND, border=10)
self.tc3 = wx.TextCtrl(self, size=(550,120), style=wx.TE_MULTILINE)
box.Add(self.tc3, flag=wx.ALL|wx.EXPAND, border=10)
label4 = wx.StaticText(self, label="Channels:")
box.Add(label4, flag=wx.ALL|wx.EXPAND, border=10)
self.tc4 = wx.TextCtrl(self, size=(550,120), style=wx.TE_MULTILINE)
box.Add(self.tc4, flag=wx.ALL|wx.EXPAND, border=10)
label5 = wx.StaticText(self, label="Key partners:")
box.Add(label5, flag=wx.ALL|wx.EXPAND, border=10)
self.tc5 = wx.TextCtrl(self, size=(550,120), style=wx.TE_MULTILINE)
box.Add(self.tc5, flag=wx.ALL|wx.EXPAND, border=10)
label6 = wx.StaticText(self, label="Key activities:")
box.Add(label6, flag=wx.ALL|wx.EXPAND, border=10)
self.tc6 = wx.TextCtrl(self, size=(550,120), style=wx.TE_MULTILINE)
box.Add(self.tc6, flag=wx.ALL|wx.EXPAND, border=10)
label7 = wx.StaticText(self, label="Key resources:")
box.Add(label7, flag=wx.ALL|wx.EXPAND, border=10)
self.tc7 = wx.TextCtrl(self, size=(550,120), style=wx.TE_MULTILINE)
box.Add(self.tc7, flag=wx.ALL|wx.EXPAND, border=10)
label8 = wx.StaticText(self, label="Revenue stream:")
box.Add(label8, flag=wx.ALL|wx.EXPAND, border=10)
self.tc8 = wx.TextCtrl(self, size=(550,120), style=wx.TE_MULTILINE)
box.Add(self.tc8, flag=wx.ALL|wx.EXPAND, border=10)
label9 = wx.StaticText(self, label="Cost structure:")
box.Add(label9, flag=wx.ALL|wx.EXPAND, border=10)
self.tc9 = wx.TextCtrl(self, size=(550,120), style=wx.TE_MULTILINE)
box.Add(self.tc9, flag=wx.ALL|wx.EXPAND, border=10)
self.SetSizer(box)
self.SetAutoLayout(1)
self.SetupScrolling()
class CommunityPage(scrolled.ScrolledPanel):
def __init__(self, parent):
scrolled.ScrolledPanel.__init__(self, parent, -1,size=(570,400),name="Community Analysis")
box = wx.BoxSizer(wx.VERTICAL)
label1 = wx.StaticText(self, label="The locality of the community:")
box.Add(label1, flag=wx.ALL|wx.EXPAND, border=10)
self.tc1 = wx.TextCtrl(self, size=(550,120), style=wx.TE_MULTILINE)
box.Add(self.tc1, flag=wx.ALL|wx.EXPAND, border=10)
label2 = wx.StaticText(self, label="The main activity of the community:")
box.Add(label2, flag=wx.ALL|wx.EXPAND, border=10)
self.tc2 = wx.TextCtrl(self, size=(550,120), style=wx.TE_MULTILINE)
box.Add(self.tc2, flag=wx.ALL|wx.EXPAND, border=10)
label3 = wx.StaticText(self, label="Who is doing the activity:")
box.Add(label3, flag=wx.ALL|wx.EXPAND, border=10)
self.tc3 = wx.TextCtrl(self, size=(550,120), style=wx.TE_MULTILINE)
box.Add(self.tc3, flag=wx.ALL|wx.EXPAND, border=10)
label4 = wx.StaticText(self, label="The object of the activity:")
box.Add(label4, flag=wx.ALL|wx.EXPAND, border=10)
self.tc4 = wx.TextCtrl(self, size=(550,120), style=wx.TE_MULTILINE)
box.Add(self.tc4, flag=wx.ALL|wx.EXPAND, border=10)
label5 = wx.StaticText(self, label="The outcome of the activity:")
box.Add(label5, flag=wx.ALL|wx.EXPAND, border=10)
self.tc5 = wx.TextCtrl(self, size=(550,120), style=wx.TE_MULTILINE)
box.Add(self.tc5, flag=wx.ALL|wx.EXPAND, border=10)
label6 = wx.StaticText(self, label="The needs of the community:")
box.Add(label6, flag=wx.ALL|wx.EXPAND, border=10)
self.tc6 = wx.TextCtrl(self, size=(550,120), style=wx.TE_MULTILINE)
box.Add(self.tc6, flag=wx.ALL|wx.EXPAND, border=10)
label7 = wx.StaticText(self, label="The tools of the activity:")
box.Add(label7, flag=wx.ALL|wx.EXPAND, border=10)
self.tc7 = wx.TextCtrl(self, size=(550,120), style=wx.TE_MULTILINE)
box.Add(self.tc7, flag=wx.ALL|wx.EXPAND, border=10)
label8 = wx.StaticText(self, label="The rules of the activity:")
box.Add(label8, flag=wx.ALL|wx.EXPAND, border=10)
self.tc8 = wx.TextCtrl(self, size=(550,120), style=wx.TE_MULTILINE)
box.Add(self.tc8, flag=wx.ALL|wx.EXPAND, border=10)
label9 = wx.StaticText(self, label="The roles within the activity:")
box.Add(label9, flag=wx.ALL|wx.EXPAND, border=10)
self.tc9 = wx.TextCtrl(self, size=(550,120), style=wx.TE_MULTILINE)
box.Add(self.tc9, flag=wx.ALL|wx.EXPAND, border=10)
label10 = wx.StaticText(self, label="The larger context of the activity:")
box.Add(label10, flag=wx.ALL|wx.EXPAND, border=10)
self.tc10 = wx.TextCtrl(self, size=(550,120), style=wx.TE_MULTILINE)
box.Add(self.tc10, flag=wx.ALL|wx.EXPAND, border=10)
self.SetSizer(box)
self.SetAutoLayout(1)
self.SetupScrolling()
class Main(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, title = u"Open MetaDesign", size=(620, 400))
self.SetMinSize( self.GetSize() )
self.currentDirectory = os.getcwd()
pannel = wx.Panel(self)
vbox = wx.BoxSizer(wx.VERTICAL)
# Initializing the notebook
self.pages = {}
self.pageCounter = 3
self.pageTitleCounter = 1
self.nb = wx.Notebook(pannel, -1)
self.page0 = WelcomePage(self.nb)
self.page1 = GeneralPage(self.nb)
self.page2 = CommunityPage(self.nb)
self.page3 = BusinessModelPage(self.nb)
self.nb.AddPage(self.page0, "Welcome!")
self.nb.AddPage(self.page1, "General Information")
self.nb.AddPage(self.page2, "Community Analysis")
self.nb.AddPage(self.page3, "Business Model")
#self.addNotebookPage()
self.pageCounter += 1
pageTitle = "Step: {0}".format(str(self.pageTitleCounter))
self.pages[self.pageTitleCounter] = StepPage(self.nb, pageTitle)
self.nb.AddPage(self.pages[self.pageTitleCounter], pageTitle)
vbox.Add(self.nb, 2, flag=wx.EXPAND)
pannel.SetSizer(vbox)
# Initializing the Menu
self.statusBar = self.CreateStatusBar( 1, wx.ST_SIZEGRIP, wx.ID_ANY )
self.m_menubar1 = wx.MenuBar( 0 )
self.m_menu1 = wx.Menu()
self.m_menuItem1 = wx.MenuItem( self.m_menu1, wx.ID_ANY, u"Initialize a project", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu1.AppendItem( self.m_menuItem1 )
self.m_menuItem2 = wx.MenuItem( self.m_menu1, wx.ID_ANY, u"Open", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu1.AppendItem( self.m_menuItem2 )
self.m_menuItem3 = wx.MenuItem( self.m_menu1, wx.ID_ANY, u"Save", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu1.AppendItem( self.m_menuItem3 )
self.m_menuItem4 = wx.MenuItem( self.m_menu1, wx.ID_ANY, u"Save As", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu1.AppendItem( self.m_menuItem4 )
self.m_menuItem5 = wx.MenuItem( self.m_menu1, 12, u"Exit", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu1.AppendItem( self.m_menuItem5 )
self.m_menubar1.Append( self.m_menu1, u"File" )
self.m_menu2 = wx.Menu()
self.m_menuItem6 = wx.MenuItem( self.m_menu2, 13, u"Add a step in the Open Design process", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu2.AppendItem( self.m_menuItem6 )
self.m_menuItem7 = wx.MenuItem( self.m_menu2, 14, u"Remove the current step from the Open Design process", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu2.AppendItem( self.m_menuItem7 )
self.m_menubar1.Append( self.m_menu2, u"Edit" )
#self.m_menu4 = wx.Menu()
#self.m_menuItem12 = wx.MenuItem( self.m_menu4, 20, u"Analyse the GitHub repository of the project", wx.EmptyString, wx.ITEM_NORMAL )
#self.m_menu4.AppendItem( self.m_menuItem12 )
#self.m_menubar1.Append( self.m_menu4, u"Analyse" )
self.m_menu3 = wx.Menu()
self.m_menuItem8 = wx.MenuItem( self.m_menu3, 15, u"View the participation in the Open Design process", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu3.AppendItem( self.m_menuItem8 )
self.m_menuItem9 = wx.MenuItem( self.m_menu3, 16, u"View the business model of the Open Design project and process", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu3.AppendItem( self.m_menuItem9 )
self.m_menuItem10 = wx.MenuItem( self.m_menu3, 17, u"View the actors and the flows of the Open Design process", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu3.AppendItem( self.m_menuItem10 )
#self.m_menuItem11 = wx.MenuItem( self.m_menu3, 18, u"View the interactions in the Open Design process", wx.EmptyString, wx.ITEM_NORMAL )
#self.m_menu3.AppendItem( self.m_menuItem11 )
#self.m_menuItem12 = wx.MenuItem( self.m_menu3, wx.ID_ANY, u"View the whole canvas of the Open Design process", wx.EmptyString, wx.ITEM_NORMAL )
#self.m_menu3.AppendItem( self.m_menuItem12 )
self.m_menubar1.Append( self.m_menu3, u"View" )
self.m_menu4 = wx.Menu()
self.m_menuItem13 = wx.MenuItem( self.m_menu4, wx.ID_ANY, u"About", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu4.AppendItem( self.m_menuItem13 )
self.m_menubar1.Append( self.m_menu4, u"Help" )
self.SetMenuBar( self.m_menubar1 )
# Set events for the Menu
self.Bind(wx.EVT_MENU, self.onInitialize, self.m_menuItem1)
self.Bind(wx.EVT_MENU, self.onOpenFile, self.m_menuItem2)
self.Bind(wx.EVT_MENU, self.onSaveFile, self.m_menuItem3)
self.Bind(wx.EVT_MENU, self.onSaveFileAs, self.m_menuItem4)
self.Bind(wx.EVT_MENU, self.onQuit, self.m_menuItem5)
self.Bind(wx.EVT_MENU, self.onStepInsert, self.m_menuItem6)
self.Bind(wx.EVT_MENU, self.onStepRemove, self.m_menuItem7)
self.Bind(wx.EVT_MENU, self.onAbout, self.m_menuItem13)
#self.Bind(wx.EVT_MENU, self.onStart, self.m_menuItem12)
self.Bind(wx.EVT_MENU, self.onViewParticipation, self.m_menuItem8)
self.Bind(wx.EVT_MENU, self.onViewBusiness, self.m_menuItem9)
self.Bind(wx.EVT_MENU, self.onViewActorsFlows, self.m_menuItem10)
#self.Bind(wx.EVT_MENU, self.onViewNetwork, self.m_menuItem11)
self.Show()
# Multithreading and wxPython, from http://wiki.wxpython.org/LongRunningTasks
def onStart(self, evt):
# Prompt for GitHub username and login at the beginning
logdlg = GitHubLogin(self, -1, size=(350, 200))
logdlg.ShowModal()
logdlg.Destroy()
self.statusBar.SetStatusText('Analysing your GitHub repository...')
thread.start_new_thread(self.longRunning, ())
def onLongRunDone(self):
self.statusBar.SetStatusText("Github repository analysed and saved")
def longRunning(self):
global githubUsername
global githubPassword
global currentFolder
global temp
urlparts = temp.repo.split('/')
if urlparts[2] != "github.com":
dlg = wx.MessageDialog( self, "The link of the repository is not correct. Please insert the link of a repository on GitHub.", "Error", wx.OK)
dlg.ShowModal()
dlg.Destroy()
github_mining(temp,githubUsername,githubPassword, currentFolder)
wx.CallAfter(self.onLongRunDone)
def onAbout(self,event):
dlg = wx.MessageDialog( self, "An open source app for designing the process of an Open Design project.\nLicense: GPL v.3\nhttp://www.openmetadesign.org", "About Open MetaDesign v. 0.1", wx.OK)
dlg.ShowModal()
dlg.Destroy()
def onViewBusiness(self,event):
self.statusBar.SetStatusText('Generating your business model canvas...')
self.SaveFile()
thisFile = currentFolder + "/business_model_canvas.png"
business_model_render(temp,thisFile)
self.statusBar.SetStatusText('Business model canvas generated.')
app = ImageViewerApp(thisFile, "The business model of the Open Design project")
app.MainLoop()
def onViewParticipation(self,event):
self.statusBar.SetStatusText('Generating your participation process...')
self.SaveFile()
thisFile = currentFolder + "/participation_process.png"
process_participation_render(temp,thisFile)
self.statusBar.SetStatusText('Participation process generated.')
app = ImageViewerApp(thisFile, "The participation in the Open Design process")
app.MainLoop()
def onViewActorsFlows(self,event):
self.statusBar.SetStatusText('Generating your actors and flows system...')
self.SaveFile()
thisFile = currentFolder + "/actors_flows_system.png"
actors_flows_system_render(temp,thisFile)
self.statusBar.SetStatusText('Actors and flows system generated.')
app = ImageViewerApp(thisFile, "The actors and flows in the Open Design process")
app.MainLoop()
def onViewNetwork(self,event):
thisFile = currentFolder + "/network_interactions.png"
thisGraph = currentFolder + "/github_social_interactions_analysis.graphml"
# Here check if thisGraph exists! else dialog that warns to first analyse the graph
if not os.path.isfile(thisGraph):
dlg = wx.MessageDialog( self, "You haven't analysed your repository yet.\nPlease analyse it by choosing Analyse > Analyse the GitHub repository of the project", "Error", wx.OK)
dlg.ShowModal()
dlg.Destroy()
else:
self.statusBar.SetStatusText('Generating your network of interactions...')
self.SaveFile()
network_render(thisGraph,thisFile)
self.statusBar.SetStatusText('Network of interactions generated.')
app = ImageViewerApp(thisFile, "The interactions that take place in the Open Design process")
app.MainLoop()
def onInitialize(self,event):
dlg = wx.DirDialog(self, "Choose a repository directory:",style=wx.DD_DEFAULT_STYLE)
if dlg.ShowModal() == wx.ID_OK:
mypath = dlg.GetPath() + "/metadesign"
if not os.path.isdir(mypath):
os.makedirs(mypath)
self.statusBar.SetStatusText("Project initiated successfully in "+mypath)
# Save current initialized project
self.SaveFile()
# Save file
global currentFile
global currentFolder
initializedFile = "metadesign.meta"
currentFile = mypath + "/"+initializedFile
currentFolder = mypath
temp.save(currentFile)
dlg.Destroy()
def onOpenFile(self, event):
dlg = wx.FileDialog(self, message="Choose a file",defaultDir=self.currentDirectory, defaultFile="",wildcard="*.meta",style=wx.OPEN | wx.CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
paths = dlg.GetPaths()
# Load the project in the current file
temp.load(paths[0])
global currentFile
global currentFolder
currentFolder = os.path.dirname(paths[0])
currentFile = paths[0]
# Erase existing pages
for j in range(self.pageCounter+1):
self.nb.DeletePage(0)
self.page0 = WelcomePage(self.nb)
self.page1 = GeneralPage(self.nb)
self.page2 = CommunityPage(self.nb)
self.page3 = BusinessModelPage(self.nb)
self.nb.AddPage(self.page0, "Welcome!")
self.nb.AddPage(self.page1, "General Information")
self.nb.AddPage(self.page2, "Community Analysis")
self.nb.AddPage(self.page3, "Business Model")
# Update the values in the GUI
self.page1.tc1.SetValue(temp.title)
self.page1.tc2.SetValue(temp.version)
self.page1.tc3.SetValue(", ".join(temp.founders))
self.page1.tc4.SetStringSelection(temp.license)
self.page1.tc5.SetValue(temp.repo)
self.page2.tc1.SetValue(temp.community.locality)
self.page2.tc2.SetValue(temp.community.activity)
self.page2.tc3.SetValue(temp.community.subject)
self.page2.tc4.SetValue(temp.community.object)
self.page2.tc5.SetValue(temp.community.outcome)
self.page2.tc6.SetValue(temp.community.needs)
self.page2.tc7.SetValue(temp.community.tools)
self.page2.tc8.SetValue(temp.community.rules)
self.page2.tc9.SetValue(temp.community.roles)
self.page2.tc10.SetValue(temp.community.context)
self.page3.tc1.SetValue(temp.businessmodel.valueproposition)
self.page3.tc2.SetValue(temp.businessmodel.customersegments)
self.page3.tc3.SetValue(temp.businessmodel.customerrelationships)
self.page3.tc4.SetValue(temp.businessmodel.channels)
self.page3.tc5.SetValue(temp.businessmodel.keypartners)
self.page3.tc6.SetValue(temp.businessmodel.keyactivities)
self.page3.tc7.SetValue(temp.businessmodel.keyresources)
self.page3.tc8.SetValue(temp.businessmodel.revenuestreams)
self.page3.tc9.SetValue(temp.businessmodel.coststructure)
# Remove existing step pages before loading the new ones
self.pageCounter = 4
self.pageTitleCounter = 0
del self.pages
self.pages = {}
# Load and recreate step pages
for j in range(len(temp.steps)):
self.pageTitleCounter += 1
pageTitle = "Step: {0}".format(str(self.pageTitleCounter))
self.pages[self.pageTitleCounter] = StepPage(self.nb, pageTitle)
self.nb.AddPage(self.pages[self.pageTitleCounter], pageTitle)
self.pageCounter += 1
self.pages[self.pageTitleCounter].tc1.SetValue(temp.steps[j].title)
self.pages[self.pageTitleCounter].tc2.SetStringSelection(temp.steps[j].participation)
self.pages[self.pageTitleCounter].tc3.SetValue(temp.steps[j].tools)
self.pages[self.pageTitleCounter].tc4.SetValue(temp.steps[j].rules)
self.pages[self.pageTitleCounter].tc5.SetValue(", ".join(temp.steps[j].actors))
# Delete the first default flow before loading the flows
self.pages[self.pageTitleCounter].nestednb.DeletePage(0)
del self.pages[self.pageTitleCounter].tabs[0]
#del self.pages[j].tabs[self.pages[j].nestednb.GetSelection()]
# Load the flows
for k in range(len(temp.steps[j].flows)):
self.pages[self.pageTitleCounter].flowmessage = "Number of flows in the step: " + str(len(temp.steps[j].flows))
self.pages[self.pageTitleCounter].label6.SetLabel(self.pages[self.pageTitleCounter].flowmessage)
self.pages[self.pageTitleCounter].tabs[k+1] = FlowTab(self.pages[self.pageTitleCounter].nestednb)
self.pages[self.pageTitleCounter].tabs[k+1].actors = temp.steps[j].actors
self.pages[self.pageTitleCounter].tc5.SetValue(", ".join(temp.steps[j].actors))
self.pages[self.pageTitleCounter].tabs[k+1].tc3.SetItems(self.pages[self.pageTitleCounter].tabs[k+1].actors)
self.pages[self.pageTitleCounter].tabs[k+1].tc4.SetItems(self.pages[self.pageTitleCounter].tabs[k+1].actors)
self.pages[self.pageTitleCounter].nestednb.AddPage(self.pages[self.pageTitleCounter].tabs[k+1], "Flow n. " + str(k+1))
#self.pageTitleCounter = k+2
#self.pages[j].flowsnumber += 1
self.pages[self.pageTitleCounter].tabs[k+1].tc1.SetStringSelection(temp.steps[j].flows[k].type)
self.pages[self.pageTitleCounter].tabs[k+1].tc2.SetValue(temp.steps[j].flows[k].what)
for f in range(self.pages[self.pageTitleCounter].flowsnumber):
load = [x.strip() for x in self.pages[self.pageTitleCounter].tc5.GetValue().split(',')]
self.pages[self.pageTitleCounter].tabs[f+1].tc3.SetItems(load)
self.pages[self.pageTitleCounter].tabs[f+1].tc4.SetItems(load)
self.pages[self.pageTitleCounter].tabs[f+1].tc3.SetStringSelection(temp.steps[j].flows[k].actor1)
self.pages[self.pageTitleCounter].tabs[f+1].tc4.SetStringSelection(temp.steps[j].flows[k].actor2)
self.pages[self.pageTitleCounter].tabs[f+1].tc5.SetStringSelection(temp.steps[j].flows[k].direction)
self.pages[self.pageTitleCounter].flowsnumber +=1
self.pages[self.pageTitleCounter].flowsnumber = len(temp.steps[j].flows)
self.statusBar.SetStatusText("Loaded successfully file "+currentFile)
dlg.Destroy()
def SaveFile(self):
# Load the current values for General information
temp.title = self.page1.tc1.GetValue()
temp.version = self.page1.tc2.GetValue()
temp.founders = [x.strip() for x in self.page1.tc3.GetValue().split(',')]
temp.license = self.page1.licenses[self.page1.tc4.GetCurrentSelection()]
temp.repo = self.page1.tc5.GetValue()
# Add automatically url of license
if temp.license == "Creative Commons - Attribution (CC BY)":
temp.licenseurl = "http://creativecommons.org/licenses/by/3.0/"
elif temp.license == "Creative Commons - Attribution Share Alike (CC BY-SA)":
temp.licenseurl = "http://creativecommons.org/licenses/by-sa/3.0"
elif temp.license == "Creative Commons - Attribution No Derivatives (CC BY-ND)":
temp.licenseurl = "http://creativecommons.org/licenses/by-nd/3.0"
elif temp.license == "Creative Commons - Attribution Non-Commercial (CC BY-NC)":
temp.licenseurl = "http://creativecommons.org/licenses/by-nc/3.0"
elif temp.license == "Creative Commons - Attribution Non-Commercial Share Alike (CC BY-NC-SA)":
temp.licenseurl = "http://creativecommons.org/licenses/by-nc-sa/3.0"
elif temp.license == "Creative Commons - Attribution Non-Commercial No Derivatives (CC BY-NC-ND)":
temp.licenseurl = "http://creativecommons.org/licenses/by-nc-nd/3.0"
elif temp.license == "Creative Commons - No Rights Reserved (CC0)":
temp.licenseurl = "http://creativecommons.org/publicdomain/zero/1.0/"
# Load the current values for Community analysis
temp.community.locality = self.page2.tc1.GetValue()
temp.community.activity = self.page2.tc2.GetValue()
temp.community.subject = self.page2.tc3.GetValue()
temp.community.object = self.page2.tc4.GetValue()
temp.community.outcome = self.page2.tc5.GetValue()
temp.community.needs = self.page2.tc6.GetValue()
temp.community.tools = self.page2.tc7.GetValue()
temp.community.rules = self.page2.tc8.GetValue()
temp.community.roles = self.page2.tc9.GetValue()
temp.community.context = self.page2.tc10.GetValue()
# Load the current values for Business model
temp.businessmodel.valueproposition = self.page3.tc1.GetValue()
temp.businessmodel.customersegments = self.page3.tc2.GetValue()
temp.businessmodel.customerrelationships = self.page3.tc3.GetValue()
temp.businessmodel.channels = self.page3.tc4.GetValue()
temp.businessmodel.keypartners = self.page3.tc5.GetValue()
temp.businessmodel.keyactivities = self.page3.tc6.GetValue()
temp.businessmodel.keyresources = self.page3.tc7.GetValue()
temp.businessmodel.revenuestreams = self.page3.tc8.GetValue()
temp.businessmodel.coststructure = self.page3.tc9.GetValue()
self.pageCounter -= 4
#print "self.pageCounter:",self.pageCounter
# Load the current values for the Steps
for f,j in enumerate(range(1,self.pageCounter+1)):
temp.steps[f] = step()
temp.steps[f].stepnumber = j
temp.steps[f].title = self.pages[j].tc1.GetValue()
temp.steps[f].participation = self.pages[j].participationlevels[self.pages[j].tc2.GetSelection()]
temp.steps[f].tools = self.pages[j].tc3.GetValue()
temp.steps[f].rules = self.pages[j].tc4.GetValue()
temp.steps[f].actors = [x.strip() for x in self.pages[j].tc5.GetValue().split(',')]
# Load the current values for the Flows
# print "flows", self.pages[j].flowsnumber
for m,k in enumerate(range(1,self.pages[j].flowsnumber+1)):
#print "M:",m
#print "K:",k
#print "tab"
#print "tab",self.pages[j].tabs
temp.steps[f].flows[k] = flow()
temp.steps[f].flows[k].number = str(m)
temp.steps[f].flows[k].type = self.pages[j].tabs[k].flowtype[self.pages[j].tabs[k].tc1.GetSelection()]
temp.steps[f].flows[k].what = self.pages[j].tabs[k].tc2.GetValue()
temp.steps[f].flows[k].actor1 = self.pages[j].tabs[k].actors[self.pages[j].tabs[k].tc3.GetSelection()]
temp.steps[f].flows[k].actor2 = self.pages[j].tabs[k].actors[self.pages[j].tabs[k].tc4.GetSelection()]
temp.steps[f].flows[k].direction = self.pages[j].tabs[k].flowdirection[self.pages[j].tabs[k].tc5.GetSelection()]
def onSaveFile(self,event):
# Load temporary project
self.SaveFile()
# Save file
global currentFolder
global currentFile
temp.save(currentFile)
mdwrite(temp,currentFolder)
self.statusBar.SetStatusText("Saved successfully file "+currentFile)
def onSaveFileAs(self, event):
dlg = wx.FileDialog(self, message="Save file as ...", defaultDir=self.currentDirectory, defaultFile="", wildcard="*.meta", style=wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
# Load temporary project
self.SaveFile()
# Save file
global currentFile
global currentFolder
temp.save(path)
currentFile = path
currentFolder = os.path.dirname(path)
mdwrite(temp,currentFolder)
self.statusBar.SetStatusText("Saved successfully file "+currentFile)
dlg.Destroy()
def onQuit(self, event):
self.Close()
def addNotebookPage(self):
self.pageCounter += 1
self.pageTitleCounter += 1
pageTitle = "Step: {0}".format(str(self.pageTitleCounter))
self.pages[self.pageTitleCounter] = StepPage(self.nb, pageTitle)
self.nb.AddPage(self.pages[self.pageTitleCounter], pageTitle)
def onStepRemove(self, event):
if self.nb.GetSelection() > 4:
self.nb.DeletePage(self.nb.GetSelection())
del self.pages[self.pageCounter]
self.pageTitleCounter -= 1
self.pageCounter -= 1
for j in range(self.nb.GetSelection(),self.pageCounter+1):
self.nb.SetPageText(j, "Step: "+str(j-3))
else:
pass
def onStepInsert(self, event):
self.addNotebookPage()
class MyApp(wx.App, wx.lib.mixins.inspection.InspectionMixin):
def OnInit(self):
self.Init()
frame = Main()
frame.Show()
self.SetTopWindow(frame)
return True
if __name__ == "__main__":
app = MyApp(redirect=False)
app.MainLoop()
| gpl-3.0 | -4,790,389,888,538,539,000 | 47.587957 | 200 | 0.627788 | false |
tobijk/ecromedos | lib/net/ecromedos/plugins/glossary.py | 1 | 6305 | # -*- coding: utf-8 -*-
#
# Desc: This file is part of the ecromedos Document Preparation System
# Author: Tobias Koch <[email protected]>
# License: MIT
# URL: http://www.ecromedos.net
#
import sys, locale, functools
import lxml.etree as etree
from net.ecromedos.error import ECMDSPluginError
def getInstance(config):
"""Returns a plugin instance."""
return Plugin(config)
#end function
class Plugin():
def __init__(self, config):
self.glossary = [];
try:
self.__draft = config['xsl_params']['global.draft']
except KeyError:
self.__draft = "'no'"
#end function
def process(self, node, format):
"""Saves a glossary entry or sorts and builds the glossary,
depending on what type of node triggered the plugin."""
if self.__draft == "'yes'":
return node
if node.tag == "defterm":
node = self.__saveNode(node)
elif node.tag == "make-glossary":
node = self.__makeGlossary(node)
#end if
return node
#end function
def flush(self):
self.glossary = []
#end function
# PRIVATE
def __saveNode(self, node):
"""Stores a reference to the given node."""
term = node.attrib.get("sortkey", None)
if not term:
dt_node = node.find("./dt")
if dt_node is not None:
term = "".join([s for s in dt_node.itertext()])
#end if
self.glossary.append([term, node])
return node
#end function
def __makeGlossary(self, node):
"""Read configuration. Sort items. Build glossary. Build XML."""
if not self.glossary:
return node
# build configuration
config = self.__configuration(node)
# set locale
self.__setLocale(config['locale'], config['locale_encoding'],
config['locale_variant'])
# sort glossary
self.__sortGlossary(config)
# build DOM structures
glossary = self.__buildGlossary(node, config)
# reset locale
self.__resetLocale()
return glossary
#end function
def __configuration(self, node):
"""Read node attributes and build a dictionary holding
configuration information for the collator."""
# presets
properties = {
"locale": "C",
"locale_encoding": None,
"locale_variant": None,
"alphabet": "A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z"
}
# read element attributes
properties.update(dict(node.items()))
# split locale into locale/encoding/variant
if '@' in properties['locale']:
properties['locale'], properties['locale_variant'] = \
properties['locale'].split('@', 1)
if '.' in properties['locale']:
properties['locale'], properties['locale_encoding'] = \
properties['locale'].split('.', 1)
#end ifs
# parse the alphabet
alphabet = []
for ch in [x.strip() for x in properties['alphabet'].split(",")]:
if ch[0] == '[' and ch[-1] == ']':
properties['symbols'] = ch[1:-1].strip()
else:
alphabet.append(ch)
#end if
#end for
properties['alphabet'] = alphabet
return properties
#end function
def __setLocale(self, collate="C", encoding=None, variant=None):
"""Sets the locale to the specified locale, encoding and locale
variant."""
success = False
for e in [encoding, "UTF-8"]:
if success:
break
for v in [variant, ""]:
localestring = '.'.join([x for x in [collate, e] if x])
localestring = '@'.join([x for x in [localestring, v] if x])
try:
locale.setlocale(locale.LC_COLLATE, localestring)
success = True
break
except locale.Error:
pass
#end for
#end for
if not success:
msg = "Warning: cannot set locale '%s'." % collate
sys.stderr.write(msg)
#end function
def __resetLocale(self):
"""Resets LC_COLLATE to its default."""
locale.resetlocale(locale.LC_COLLATE)
#end function
def __sortGlossary(self, config):
"""Sort glossary terms."""
# create alphabet nodes
for ch in config['alphabet']:
newnode = etree.Element("glsection")
newnode.attrib["name"] = ch
self.glossary.append([ch, newnode])
#end for
# comparison function
def compare(a,b):
result = locale.strcoll(a[0], b[0])
y1 = a[1].tag
y2 = b[1].tag
if result != 0:
return result
elif y1 == y2:
return 0
elif y1 == "glsection":
return -1
elif y2 == "glsection":
return +1
else:
return 0
#end inline
self.glossary.sort(key=functools.cmp_to_key(compare))
#end function
def __buildGlossary(self, node, config):
"""Build XML DOM structure. self.glossary is a list of tuples
of the form (sortkey, node), where node can be a 'glsection' or
a 'defterm' element."""
section = etree.Element("glsection")
try:
section.attrib["name"] = config['symbols']
except KeyError:
pass
dl_node = etree.Element("dl")
section.append(dl_node)
for item in self.glossary:
if item[1].tag == "glsection":
node.append(section)
section = item[1]
dl_node = etree.Element("dl")
section.append(dl_node)
else: # defterm
dt_node = item[1].find("./dt")
dd_node = item[1].find("./dd")
dl_node.append(dt_node)
dl_node.append(dd_node)
#end if
#end for
node.append(section)
node.tag = "glossary"
return node
#end function
#end class
| mit | -4,314,297,649,408,277,000 | 27.147321 | 77 | 0.51927 | false |
fregaham/DISP | test/aplikace2/src/main.py | 1 | 1871 | # -*- coding: utf-8 -*-
# copyright (C) 2006 Marek Schmidt
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from disp.application import *
from disp.form import *
from disp.cursor import *
from disp.xmlui import *
class MojeData (Cursor):
def __init__ (self):
Cursor.__init__ (self)
self.data = []
def __iter__ (self):
return self.data.__iter__ ()
def __getitem__ (self, i):
return self.data[i]
def __len__ (self):
return len(self.data)
def add(self, a, b, c):
self.data.append ({"a":a,"b":b,"c":c})
class Main (Application):
def initialize (self):
self.title = "Hello, world"
form = Form (self)
uiloader = XMLUIBuilder ()
uiloader.loadFile ('form.xml', self, form)
self.data = MojeData ()
for i in range(100):
self.data.add (i, i*i, i*i*i)
self.table.cursor = self.data
self.radio.addOption ("Black")
self.radio.addOption ("Green")
self.radio.addOption ("Yellow")
form.open ()
def onClick (self):
self.static.text = self.edit.text + ", " + str(self.check.checked) + ", " + self.radio.option
def onSelected (self, line):
self.static.text = "A = %s, B = %s, C = %s" % (line["a"], line["b"], line["c"])
| gpl-2.0 | 1,727,076,181,453,910,800 | 27.348485 | 97 | 0.656868 | false |
ashayas/MathSitter | files/fractionDivMult.py | 1 | 12369 | #!/usr/bin/env python3
"""
@author Ashaya Sharma
@date July 22, 2015
Fraction multiplication and division
Program that randomly generates multiplication and division of fraction problems
and checks if user input matches
MathSitter Beta
"""
import random
#tkinter is the GUI library used for project documentation found at http://effbot.org/tkinterbook/entry.htm
from tkinter import *
from tkinter import messagebox
import loaderMain
from fractions import *
class FractionArithmetic2(object):
"""
Generates a random fraction multiplication or division problem to solve of the format numerator in range (1-10) denominator in range (1-10)
@ return multiplication expression
"""
def generateExpression(self):
expression = ""
numerator1 = random.randint(1,10)
denominator1 = random.randint(1,10)
numerator2 = random.randint(1,10)
denominator2 = random.randint(1,10)
self.computerNumerator1.set(numerator1)
self.computerNumerator2.set(numerator2)
self.computerDenominator1.set(denominator1)
self.computerDenominator2.set(denominator2)
tossup = random.randint(0,1)
if tossup == 0:
expression = expression + str(numerator1) + '/' + str(denominator1) + '*' + str(numerator2) + '/' + str(denominator2)
else:
expression = expression + str(numerator1) + '/' + str(denominator1) + '÷' + str(numerator2) + '/' + str(denominator2)
self.divisionFlag.set(tossup)
return expression;
"""
@param question expression to be simplified and user's simplified expression
@return true if user simplified product matches question expression and false otherwise
"""
def checkExpression(self, userFraction):
#Multiply the fractions or divide according to what the random flag was set to
if (int(self.divisionFlag.get()) == 0):
answer = Fraction(int(self.computerNumerator1.get()), int(self.computerDenominator1.get())) * Fraction(int(self.computerNumerator2.get()), int(self.computerDenominator2.get()))
else:
answer = Fraction(int(self.computerNumerator1.get()), int(self.computerDenominator1.get())) / Fraction(int(self.computerNumerator2.get()), int(self.computerDenominator2.get()))
userNumerator = ""
userDenominator = ""
splitflag = 0;
#builds the fraction by looping through the string
if ("/" not in userFraction):
userAnswer = Fraction(int(userFraction), 1)
else:
for x in range (0, len(userFraction)):
if (userFraction[x] != '/' and splitflag == 0):
userNumerator = userNumerator + userFraction[x]
elif (userFraction[x] == '/'):
splitflag = 1
elif (userFraction[x] != '/' and splitflag == 1):
userDenominator = userDenominator + userFraction[x]
userAnswer = Fraction(int(userNumerator), int(userDenominator))
if (userAnswer == answer):
return True
else:
return False
"""
Updates score depending on correctness of user input and updates GUI
Requests new expression if user is correct
@modifies score, checkMark image, question
"""
def incrementScore(self, event):
self.attemptedQuestions = self.attemptedQuestions + 1
reducedFraction = self.userResponse.get()
#trims whitespace
reducedFraction.replace(" ", "")
answerKey = self.computerQuestion.get()
#check if the simplified expressions of both match and the user enters a valid expression and increment their score if they are right
#generate a new expression for them to solve if they keep on going
if (self.checkExpression(reducedFraction)):
tempscore = int(self.score.get())
tempscore = tempscore +1
self.questionsCorrect = self.questionsCorrect + 1
#check if score is 10 and popup congratulations message
if (tempscore == 10):
messagebox.showinfo("Congratulations!", "Well Done! You mastered the module. You can keep practicing or quit");
self.score.set(tempscore)
scoreString = str(tempscore)+ "/10"
self.scoreCount.set(scoreString)
self.computerQuestion.set(self.generateExpression());
self.checkMark.config(image = self.pictures[1])
#clear the answer textbox
self.userResponse.set("")
#if they are incorrect but have a score greater than 5, send their score down to 5 otherwise send them back to 0
else:
tempscore = int(self.score.get())
newscore = 0
if (tempscore > 5):
self.score.set(5)
newscore = 5
else:
self.score.set(0)
newscore = 0
scoreString=str(newscore)+"/10"
self.scoreCount.set(scoreString)
self.checkMark.config(image = self.pictures[2])
self.computerQuestion.set(answerKey)
#Closes the program
def closeGame(self):
messagebox.showinfo("Quit", "Program closed")
self.root.destroy()
self.writeScoretoFile()
m1 = loaderMain.Loader(self.username.get());
m1.root.mainloop()
"""
Constructor sets up main GUI
"""
def __init__(self, username):
self.root = Tk()
self.root.title("Fraction Arithmetic 2")
#set window to fullscreen and focus
#w, h = self.root.winfo_screenwidth(), self.root.winfo_screenheight()
#self.root.overrideredirect(1)
#self.root.geometry("%dx%d+0+0" % (w, h))
self.root.focus_set()
self.username=StringVar();
self.username.set(username)
self.root.configure(bg = "#4C4CFF")
self.mainFrame = Frame(self.root, width = 250);
self.topFrame = Frame(self.root, width = 250);
#place widgets on screen
self.topFrame.pack();
self.mainFrame.pack();
self.score = StringVar();
self.score.set(0);
self.scoreCount = StringVar();
self.scoreCount.set("0/10");
self.userResponse = StringVar();
self.userResponse.set("")
self.computerQuestion = StringVar();
self.computerNumerator1 = StringVar();
self.computerNumerator2 = StringVar();
self.computerDenominator1 = StringVar();
self.computerDenominator2 = StringVar();
self.divisionFlag = StringVar();
self.attemptedQuestions = 0;
self.questionsCorrect = 0;
#load pictures of check mark and x
self.pictures = (PhotoImage(file = "Images/nop.gif"), PhotoImage(file = "Images/cm2.gif"), PhotoImage(file = "Images/x2.gif"));
#display an initial problem
self.computerQuestion.set(self.generateExpression());
self.GameName = Label(self.topFrame, bg = "#4C4CFF", pady = 15, width = 20, text = self.username.get()+ ": Fractions 2", font = ('Helvetica', 10), fg = "#e5e5ff");
self.GameName.pack(side = LEFT);
self.butt2 = Button(self.topFrame, text = "Close", font = ('Helvetica', 9), command = self.closeGame);
#paddingLabel1.pack(side = LEFT)
self.butt2.pack(side = RIGHT);
#set background colors
self.mainFrame.configure(bg = "#4C4CFF");
self.topFrame.configure(bg = "#4C4CFF");
#Display question (takes text from generateExpression)
self.question = Label(self.mainFrame, bg = "#4C4CFF", fg = "White", pady = 100, padx = 300, relief = GROOVE, textvariable = self.computerQuestion, font = ('Courier', 24), width = 100);
self.question.pack();
#creates an invisible layer which makes buttons centered (necessary on rpi for aesthetic purposes)
self.paddingLabel2 = Label(self.mainFrame, bg = "#4C4CFF", pady = 30, padx = 30);
self.paddingLabel2.pack();
#entry to submit answer
self.entry1 = Entry(self.mainFrame, borderwidth= 20, bg = "#4C4CF0", fg = "White", relief = RAISED, font = ('Helvetica', 20), textvariable = self.userResponse);
self.entry1.pack();
#make a variable which holds the checkmark or cross set it to the background color to start
self.checkMark = Label(self.mainFrame, image = self.pictures[0], bg ="#4C4CFF");
#creates an invisible layer which makes buttons centered (necessary on rpi for aesthetic purposes)
self.paddingLabel3 = Label(self.mainFrame, width = 75, bg="#4C4CFF");
self.button1 = Button(self.mainFrame, text = "Submit", height = 5, width = 20, font = ('Helvetica', 10));
#let user press the submit button or enter to submit their answer
self.button1.bind("<Button-1>", self.incrementScore)
self.root.bind("<Return>", self.incrementScore)
#display the user's current score
self.currentScore = Label(self.mainFrame, bg = "#4C4CFF", textvariable = self.scoreCount, font = ('Helvetica', 20), fg = "White", padx = 20);
#place widgets on screen
self.paddingLabel3.pack(side = LEFT);
self.checkMark.pack(side = LEFT);
self.button1.pack(side = LEFT);
self.currentScore.pack(side = LEFT);
'''
Function opens a text file with scores for each user and reads and then writes the updated scores from this module to the file
Appends to file if designated user has no entry yet
@modifies score text file
'''
def writeScoretoFile(self):
#the following lines of code open a file and find the line where the designated user is on
#if the user is not in the file, a line 0 is indicated
f = open("score.txt", 'r')
lineCount = 0;
lineofUser = 0;
for line in f:
a = line
lineCount = lineCount + 1;
username = "";
tempString = ""
startOfScores = 0;
scoreArray = []
for x in range (0, len(a)):
if a[x] == '\t':
startOfScores = x
break
else:
username = username + a[x]
if (username == self.username.get()):
lineofUser = lineCount
#appends to file if user is not in the file yet
if(lineofUser == 0):
f = open("score.txt", 'a')
#this is for module 11. arrange accordingly
f.write('\n' + self.username.get()+ '\t'+ "0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t" + str(self.questionsCorrect) +'\t' + str(self.attemptedQuestions) + '\t' + "0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t0\t")
f.close()
#if the user is already in the file, the following few lines create an array of the scores for all the modules and modifies the one
#corresponding to this module
else:
f = open("score.txt", 'r').readlines()
temp = f[lineofUser-1]
username2 = "";
tempString2 = ""
startOfScores = 0;
scoreArray = []
for x in range (0, len(temp)):
if temp[x] == '\t':
startOfScores = x
break
else:
username2 = username2 + temp[x]
for i in range (startOfScores, len(temp)):
if temp[i].isdigit():
tempString = tempString + temp[i]
else:
if len(tempString) > 0:
scoreArray.append(int(tempString))
tempString = ""
#scoreArray[moduleNumber] = newValue
scoreArray[20] = scoreArray[20]+self.questionsCorrect
scoreArray[21] = scoreArray[21]+self.attemptedQuestions
newString = username2
x = 0
while (x < len(scoreArray)):
newString = newString +("\t"+str(scoreArray[x])) #writes one module
x = x + 1
f[lineofUser-1] = newString
#write the updated scores to the file
out = open("score.txt", 'w')
out.writelines(f)
out.close()
#f1 = FractionArithmetic()
#f1.root.mainloop()
| mit | 6,059,192,020,498,790,000 | 37.409938 | 281 | 0.600016 | false |
jl2005/go-srs | research/tcp/tcp.server.py | 1 | 1922 | '''
================================================================================================
1. VirtualBox, Thinkpad, T430, 2CPU, 4096B/packet, S:Python, C:Python
python tcp.server.py 1990 4096
python tcp.client.py 127.0.0.1 1990 4096
----total-cpu-usage---- -dsk/total- ---net/lo-- ---paging-- ---system--
usr sys idl wai hiq siq| read writ| recv send| in out | int csw
20 5 63 0 0 12| 0 144k| 245M 245M| 0 0 |2597 2896
21 6 63 0 0 10| 0 4096B| 251M 251M| 0 0 |2714 3015
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
5157 winlin 20 0 157m 5780 2808 R 100.0 0.3 0:34.11 python tcp.client.py 1990 4096
5140 winlin 20 0 157m 5932 2824 S 28.2 0.3 0:09.84 python tcp.server.py 1990 4096
'''
import socket, sys
if len(sys.argv) <= 2:
print("Usage: %s <port> <packet_bytes>"%(sys.argv[0]))
print(" port: the listen port.")
print(" packet_bytes: the bytes for packet to send.")
print("For example:")
print(" %s %d %d"%(sys.argv[0], 1990, 4096))
sys.exit(-1)
listen_port = int(sys.argv[1])
packet_bytes = int(sys.argv[2])
print("listen_port is %d"%listen_port)
print("packet_bytes is %d"%packet_bytes)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print("setsockopt reuse-addr success.")
s.bind(('', listen_port))
print("bind socket success.")
s.listen(10)
print("listen socket success.")
b = ''
for i in range(0, packet_bytes):
b += str(i)
while True:
conn, addr = s.accept()
while True:
try:
conn.send(b)
except Exception, ex:
print("ex:%s"%ex)
break
conn.close()
| mit | 7,797,210,427,724,604,000 | 35.264151 | 172 | 0.515088 | false |
ryfeus/lambda-packs | pytorch/source/torch/_tensor_docs.py | 1 | 70644 | """Adds docstrings to Tensor functions"""
import torch._C
from torch._C import _add_docstr as add_docstr
from ._torch_docs import parse_kwargs
def add_docstr_all(method, docstr):
add_docstr(getattr(torch._C._TensorBase, method), docstr)
new_common_args = parse_kwargs("""
size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
shape of the output tensor.
dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
Default: if None, same :class:`torch.dtype` as this tensor.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if None, same :class:`torch.device` as this tensor.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
""")
add_docstr_all('new_tensor',
r"""
new_tensor(data, dtype=None, device=None, requires_grad=False) -> Tensor
Returns a new Tensor with :attr:`data` as the tensor data.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.
.. warning::
:func:`new_tensor` always copies :attr:`data`. If you have a Tensor
``data`` and want to avoid a copy, use :func:`torch.Tensor.requires_grad_`
or :func:`torch.Tensor.detach`.
If you have a numpy array and want to avoid a copy, use
:func:`torch.from_numpy`.
.. warning::
When data is a tensor `x`, :func:`new_tensor()` reads out 'the data' from whatever it is passed,
and constructs a leaf variable. Therefore ``tensor.new_tensor(x)`` is equivalent to ``x.clone().detach()``
and ``tensor.new_tensor(x, requires_grad=True)`` is equivalent to ``x.clone().detach().requires_grad_(True)``.
The equivalents using ``clone()`` and ``detach()`` are recommended.
Args:
data (array_like): The returned Tensor copies :attr:`data`.
{dtype}
{device}
{requires_grad}
Example::
>>> tensor = torch.ones((2,), dtype=torch.int8)
>>> data = [[0, 1], [2, 3]]
>>> tensor.new_tensor(data)
tensor([[ 0, 1],
[ 2, 3]], dtype=torch.int8)
""".format(**new_common_args))
add_docstr_all('new_full',
r"""
new_full(size, fill_value, dtype=None, device=None, requires_grad=False) -> Tensor
Returns a Tensor of size :attr:`size` filled with :attr:`fill_value`.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.
Args:
fill_value (scalar): the number to fill the output tensor with.
{dtype}
{device}
{requires_grad}
Example::
>>> tensor = torch.ones((2,), dtype=torch.float64)
>>> tensor.new_full((3, 4), 3.141592)
tensor([[ 3.1416, 3.1416, 3.1416, 3.1416],
[ 3.1416, 3.1416, 3.1416, 3.1416],
[ 3.1416, 3.1416, 3.1416, 3.1416]], dtype=torch.float64)
""".format(**new_common_args))
add_docstr_all('new_empty',
r"""
new_empty(size, dtype=None, device=None, requires_grad=False) -> Tensor
Returns a Tensor of size :attr:`size` filled with uninitialized data.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.
Args:
{dtype}
{device}
{requires_grad}
Example::
>>> tensor = torch.ones(())
>>> tensor.new_empty((2, 3))
tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30],
[ 3.0949e-41, 4.4842e-44, 0.0000e+00]])
""".format(**new_common_args))
add_docstr_all('new_ones',
r"""
new_ones(size, dtype=None, device=None, requires_grad=False) -> Tensor
Returns a Tensor of size :attr:`size` filled with ``1``.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.
Args:
size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
shape of the output tensor.
{dtype}
{device}
{requires_grad}
Example::
>>> tensor = torch.tensor((), dtype=torch.int32)
>>> tensor.new_ones((2, 3))
tensor([[ 1, 1, 1],
[ 1, 1, 1]], dtype=torch.int32)
""".format(**new_common_args))
add_docstr_all('new_zeros',
r"""
new_zeros(size, dtype=None, device=None, requires_grad=False) -> Tensor
Returns a Tensor of size :attr:`size` filled with ``0``.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.
Args:
size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
shape of the output tensor.
{dtype}
{device}
{requires_grad}
Example::
>>> tensor = torch.tensor((), dtype=torch.float64)
>>> tensor.new_zeros((2, 3))
tensor([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=torch.float64)
""".format(**new_common_args))
add_docstr_all('abs',
r"""
abs() -> Tensor
See :func:`torch.abs`
""")
add_docstr_all('abs_',
r"""
abs_() -> Tensor
In-place version of :meth:`~Tensor.abs`
""")
add_docstr_all('acos',
r"""
acos() -> Tensor
See :func:`torch.acos`
""")
add_docstr_all('acos_',
r"""
acos_() -> Tensor
In-place version of :meth:`~Tensor.acos`
""")
add_docstr_all('add',
r"""
add(value) -> Tensor
add(value=1, other) -> Tensor
See :func:`torch.add`
""")
add_docstr_all('add_',
r"""
add_(value) -> Tensor
add_(value=1, other) -> Tensor
In-place version of :meth:`~Tensor.add`
""")
add_docstr_all('addbmm',
r"""
addbmm(beta=1, mat, alpha=1, batch1, batch2) -> Tensor
See :func:`torch.addbmm`
""")
add_docstr_all('addbmm_',
r"""
addbmm_(beta=1, mat, alpha=1, batch1, batch2) -> Tensor
In-place version of :meth:`~Tensor.addbmm`
""")
add_docstr_all('addcdiv',
r"""
addcdiv(value=1, tensor1, tensor2) -> Tensor
See :func:`torch.addcdiv`
""")
add_docstr_all('addcdiv_',
r"""
addcdiv_(value=1, tensor1, tensor2) -> Tensor
In-place version of :meth:`~Tensor.addcdiv`
""")
add_docstr_all('addcmul',
r"""
addcmul(value=1, tensor1, tensor2) -> Tensor
See :func:`torch.addcmul`
""")
add_docstr_all('addcmul_',
r"""
addcmul_(value=1, tensor1, tensor2) -> Tensor
In-place version of :meth:`~Tensor.addcmul`
""")
add_docstr_all('addmm',
r"""
addmm(beta=1, mat, alpha=1, mat1, mat2) -> Tensor
See :func:`torch.addmm`
""")
add_docstr_all('addmm_',
r"""
addmm_(beta=1, mat, alpha=1, mat1, mat2) -> Tensor
In-place version of :meth:`~Tensor.addmm`
""")
add_docstr_all('addmv',
r"""
addmv(beta=1, tensor, alpha=1, mat, vec) -> Tensor
See :func:`torch.addmv`
""")
add_docstr_all('addmv_',
r"""
addmv_(beta=1, tensor, alpha=1, mat, vec) -> Tensor
In-place version of :meth:`~Tensor.addmv`
""")
add_docstr_all('addr',
r"""
addr(beta=1, alpha=1, vec1, vec2) -> Tensor
See :func:`torch.addr`
""")
add_docstr_all('addr_',
r"""
addr_(beta=1, alpha=1, vec1, vec2) -> Tensor
In-place version of :meth:`~Tensor.addr`
""")
add_docstr_all('all',
r"""
.. function:: all() -> bool
Returns True if all elements in the tensor are non-zero, False otherwise.
Example::
>>> a = torch.randn(1, 3).byte() % 2
>>> a
tensor([[1, 0, 0]], dtype=torch.uint8)
>>> a.all()
tensor(0, dtype=torch.uint8)
.. function:: all(dim, keepdim=False, out=None) -> Tensor
Returns True if all elements in each row of the tensor in the given
dimension :attr:`dim` are non-zero, False otherwise.
If :attr:`keepdim` is ``True``, the output tensor is of the same size as
:attr:`input` except in the dimension :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting
in the output tensor having 1 fewer dimension than :attr:`input`.
Args:
dim (int): the dimension to reduce
keepdim (bool): whether the output tensor has :attr:`dim` retained or not
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4, 2).byte() % 2
>>> a
tensor([[0, 0],
[0, 0],
[0, 1],
[1, 1]], dtype=torch.uint8)
>>> a.all(dim=1)
tensor([0, 0, 0, 1], dtype=torch.uint8)
""")
add_docstr_all('allclose',
r"""
allclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
See :func:`torch.allclose`
""")
add_docstr_all('any',
r"""
.. function:: any() -> bool
Returns True if any elements in the tensor are non-zero, False otherwise.
Example::
>>> a = torch.randn(1, 3).byte() % 2
>>> a
tensor([[0, 0, 1]], dtype=torch.uint8)
>>> a.any()
tensor(1, dtype=torch.uint8)
.. function:: any(dim, keepdim=False, out=None) -> Tensor
Returns True if any elements in each row of the tensor in the given
dimension :attr:`dim` are non-zero, False otherwise.
If :attr:`keepdim` is ``True``, the output tensor is of the same size as
:attr:`input` except in the dimension :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting
in the output tensor having 1 fewer dimension than :attr:`input`.
Args:
dim (int): the dimension to reduce
keepdim (bool): whether the output tensor has :attr:`dim` retained or not
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4, 2).byte() % 2
>>> a
tensor([[1, 0],
[0, 0],
[0, 1],
[0, 0]], dtype=torch.uint8)
>>> a.any(dim=1)
tensor([1, 0, 1, 0], dtype=torch.uint8)
""")
add_docstr_all('apply_',
r"""
apply_(callable) -> Tensor
Applies the function :attr:`callable` to each element in the tensor, replacing
each element with the value returned by :attr:`callable`.
.. note::
This function only works with CPU tensors and should not be used in code
sections that require high performance.
""")
add_docstr_all('asin', r"""
asin() -> Tensor
See :func:`torch.asin`
""")
add_docstr_all('asin_',
r"""
asin_() -> Tensor
In-place version of :meth:`~Tensor.asin`
""")
add_docstr_all('atan',
r"""
atan() -> Tensor
See :func:`torch.atan`
""")
add_docstr_all('atan2',
r"""
atan2(other) -> Tensor
See :func:`torch.atan2`
""")
add_docstr_all('atan2_',
r"""
atan2_(other) -> Tensor
In-place version of :meth:`~Tensor.atan2`
""")
add_docstr_all('atan_',
r"""
atan_() -> Tensor
In-place version of :meth:`~Tensor.atan`
""")
add_docstr_all('baddbmm',
r"""
baddbmm(beta=1, alpha=1, batch1, batch2) -> Tensor
See :func:`torch.baddbmm`
""")
add_docstr_all('baddbmm_',
r"""
baddbmm_(beta=1, alpha=1, batch1, batch2) -> Tensor
In-place version of :meth:`~Tensor.baddbmm`
""")
add_docstr_all('bernoulli',
r"""
bernoulli(*, generator=None) -> Tensor
Returns a result tensor where each :math:`\texttt{result[i]}` is independently
sampled from :math:`\text{Bernoulli}(\texttt{self[i]})`. :attr:`self` must have
floating point ``dtype``, and the result will have the same ``dtype``.
See :func:`torch.bernoulli`
""")
add_docstr_all('bernoulli_',
r"""
.. function:: bernoulli_(p=0.5, *, generator=None) -> Tensor
Fills each location of :attr:`self` with an independent sample from
:math:`\text{Bernoulli}(\texttt{p})`. :attr:`self` can have integral
``dtype``.
.. function:: bernoulli_(p_tensor, *, generator=None) -> Tensor
:attr:`p_tensor` should be a tensor containing probabilities to be used for
drawing the binary random number.
The :math:`\text{i}^{th}` element of :attr:`self` tensor will be set to a
value sampled from :math:`\text{Bernoulli}(\texttt{p\_tensor[i]})`.
:attr:`self` can have integral ``dtype``, but :attr`p_tensor` must have
floating point ``dtype``.
See also :meth:`~Tensor.bernoulli` and :func:`torch.bernoulli`
""")
add_docstr_all('bincount',
r"""
bincount(weights=None, minlength=0) -> Tensor
See :func:`torch.bincount`
""")
add_docstr_all('bmm',
r"""
bmm(batch2) -> Tensor
See :func:`torch.bmm`
""")
add_docstr_all('btrifact_with_info',
r"""
btrifact_with_info(pivot=True) -> (Tensor, Tensor, Tensor)
See :func:`torch.btrifact_with_info`
""")
add_docstr_all('btrisolve',
r"""
btrisolve(LU_data, LU_pivots) -> Tensor
See :func:`torch.btrisolve`
""")
add_docstr_all('cauchy_',
r"""
cauchy_(median=0, sigma=1, *, generator=None) -> Tensor
Fills the tensor with numbers drawn from the Cauchy distribution:
.. math::
f(x) = \dfrac{1}{\pi} \dfrac{\sigma}{(x - \text{median})^2 + \sigma^2}
""")
add_docstr_all('ceil',
r"""
ceil() -> Tensor
See :func:`torch.ceil`
""")
add_docstr_all('ceil_',
r"""
ceil_() -> Tensor
In-place version of :meth:`~Tensor.ceil`
""")
add_docstr_all('cholesky',
r"""
cholesky(upper=False) -> Tensor
See :func:`torch.cholesky`
""")
add_docstr_all('clamp',
r"""
clamp(min, max) -> Tensor
See :func:`torch.clamp`
""")
add_docstr_all('clamp_',
r"""
clamp_(min, max) -> Tensor
In-place version of :meth:`~Tensor.clamp`
""")
add_docstr_all('clone',
r"""
clone() -> Tensor
Returns a copy of the :attr:`self` tensor. The copy has the same size and data
type as :attr:`self`.
.. note::
Unlike `copy_()`, this function is recorded in the computation graph. Gradients
propagating to the cloned tensor will propagate to the original tensor.
""")
add_docstr_all('contiguous',
r"""
contiguous() -> Tensor
Returns a contiguous tensor containing the same data as :attr:`self` tensor. If
:attr:`self` tensor is contiguous, this function returns the :attr:`self`
tensor.
""")
add_docstr_all('copy_',
r"""
copy_(src, non_blocking=False) -> Tensor
Copies the elements from :attr:`src` into :attr:`self` tensor and returns
:attr:`self`.
The :attr:`src` tensor must be :ref:`broadcastable <broadcasting-semantics>`
with the :attr:`self` tensor. It may be of a different data type or reside on a
different device.
Args:
src (Tensor): the source tensor to copy from
non_blocking (bool): if ``True`` and this copy is between CPU and GPU,
the copy may occur asynchronously with respect to the host. For other
cases, this argument has no effect.
""")
add_docstr_all('cos',
r"""
cos() -> Tensor
See :func:`torch.cos`
""")
add_docstr_all('cos_',
r"""
cos_() -> Tensor
In-place version of :meth:`~Tensor.cos`
""")
add_docstr_all('cosh',
r"""
cosh() -> Tensor
See :func:`torch.cosh`
""")
add_docstr_all('cosh_',
r"""
cosh_() -> Tensor
In-place version of :meth:`~Tensor.cosh`
""")
add_docstr_all('cpu',
r"""
cpu() -> Tensor
Returns a copy of this object in CPU memory.
If this object is already in CPU memory and on the correct device,
then no copy is performed and the original object is returned.
""")
add_docstr_all('cross',
r"""
cross(other, dim=-1) -> Tensor
See :func:`torch.cross`
""")
add_docstr_all('cuda',
r"""
cuda(device=None, non_blocking=False) -> Tensor
Returns a copy of this object in CUDA memory.
If this object is already in CUDA memory and on the correct device,
then no copy is performed and the original object is returned.
Args:
device (:class:`torch.device`): The destination GPU device.
Defaults to the current CUDA device.
non_blocking (bool): If ``True`` and the source is in pinned memory,
the copy will be asynchronous with respect to the host.
Otherwise, the argument has no effect. Default: ``False``.
""")
add_docstr_all('cumprod',
r"""
cumprod(dim, dtype=None) -> Tensor
See :func:`torch.cumprod`
""")
add_docstr_all('cumsum',
r"""
cumsum(dim, dtype=None) -> Tensor
See :func:`torch.cumsum`
""")
add_docstr_all('data_ptr',
r"""
data_ptr() -> int
Returns the address of the first element of :attr:`self` tensor.
""")
add_docstr_all('dense_dim',
r"""
dense_dim() -> int
If :attr:`self` is a sparse COO tensor (i.e., with ``torch.sparse_coo`` layout),
this returns a the number of dense dimensions. Otherwise, this throws an
error.
See also :meth:`Tensor.sparse_dim`.
""")
add_docstr_all('diag',
r"""
diag(diagonal=0) -> Tensor
See :func:`torch.diag`
""")
add_docstr_all('diag_embed',
r"""
diag_embed(offset=0, dim1=-2, dim2=-1) -> Tensor
See :func:`torch.diag_embed`
""")
add_docstr_all('diagflat',
r"""
diagflat(diagonal=0) -> Tensor
See :func:`torch.diagflat`
""")
add_docstr_all('diagonal',
r"""
diagonal(offset=0, dim1=0, dim2=1) -> Tensor
See :func:`torch.diagonal`
""")
add_docstr_all('digamma',
r"""
digamma() -> Tensor
See :func:`torch.digamma`
""")
add_docstr_all('digamma_',
r"""
digamma_() -> Tensor
In-place version of :meth:`~Tensor.digamma`
""")
add_docstr_all('dim',
r"""
dim() -> int
Returns the number of dimensions of :attr:`self` tensor.
""")
add_docstr_all('dist',
r"""
dist(other, p=2) -> Tensor
See :func:`torch.dist`
""")
add_docstr_all('div',
r"""
div(value) -> Tensor
See :func:`torch.div`
""")
add_docstr_all('div_',
r"""
div_(value) -> Tensor
In-place version of :meth:`~Tensor.div`
""")
add_docstr_all('dot',
r"""
dot(tensor2) -> Tensor
See :func:`torch.dot`
""")
add_docstr_all('eig',
r"""
eig(eigenvectors=False) -> (Tensor, Tensor)
See :func:`torch.eig`
""")
add_docstr_all('element_size',
r"""
element_size() -> int
Returns the size in bytes of an individual element.
Example::
>>> torch.tensor([]).element_size()
4
>>> torch.tensor([], dtype=torch.uint8).element_size()
1
""")
add_docstr_all('eq',
r"""
eq(other) -> Tensor
See :func:`torch.eq`
""")
add_docstr_all('eq_',
r"""
eq_(other) -> Tensor
In-place version of :meth:`~Tensor.eq`
""")
add_docstr_all('equal',
r"""
equal(other) -> bool
See :func:`torch.equal`
""")
add_docstr_all('erf',
r"""
erf() -> Tensor
See :func:`torch.erf`
""")
add_docstr_all('erf_',
r"""
erf_() -> Tensor
In-place version of :meth:`~Tensor.erf`
""")
add_docstr_all('erfc',
r"""
erfc() -> Tensor
See :func:`torch.erfc`
""")
add_docstr_all('erfc_',
r"""
erfc_() -> Tensor
In-place version of :meth:`~Tensor.erfc`
""")
add_docstr_all('erfinv',
r"""
erfinv() -> Tensor
See :func:`torch.erfinv`
""")
add_docstr_all('erfinv_',
r"""
erfinv_() -> Tensor
In-place version of :meth:`~Tensor.erfinv`
""")
add_docstr_all('exp',
r"""
exp() -> Tensor
See :func:`torch.exp`
""")
add_docstr_all('exp_',
r"""
exp_() -> Tensor
In-place version of :meth:`~Tensor.exp`
""")
add_docstr_all('expm1',
r"""
expm1() -> Tensor
See :func:`torch.expm1`
""")
add_docstr_all('expm1_',
r"""
expm1_() -> Tensor
In-place version of :meth:`~Tensor.expm1`
""")
add_docstr_all('exponential_',
r"""
exponential_(lambd=1, *, generator=None) -> Tensor
Fills :attr:`self` tensor with elements drawn from the exponential distribution:
.. math::
f(x) = \lambda e^{-\lambda x}
""")
add_docstr_all('fill_',
r"""
fill_(value) -> Tensor
Fills :attr:`self` tensor with the specified value.
""")
add_docstr_all('floor',
r"""
floor() -> Tensor
See :func:`torch.floor`
""")
add_docstr_all('flip',
r"""
flip(dims) -> Tensor
See :func:`torch.flip`
""")
add_docstr_all('roll',
r"""
roll(shifts, dims) -> Tensor
See :func:`torch.roll`
""")
add_docstr_all('floor_',
r"""
floor_() -> Tensor
In-place version of :meth:`~Tensor.floor`
""")
add_docstr_all('fmod',
r"""
fmod(divisor) -> Tensor
See :func:`torch.fmod`
""")
add_docstr_all('fmod_',
r"""
fmod_(divisor) -> Tensor
In-place version of :meth:`~Tensor.fmod`
""")
add_docstr_all('frac',
r"""
frac() -> Tensor
See :func:`torch.frac`
""")
add_docstr_all('frac_',
r"""
frac_() -> Tensor
In-place version of :meth:`~Tensor.frac`
""")
add_docstr_all('flatten',
r"""
flatten(input, start_dim=0, end_dim=-1) -> Tensor
see :func:`torch.flatten`
""")
add_docstr_all('gather',
r"""
gather(dim, index) -> Tensor
See :func:`torch.gather`
""")
add_docstr_all('ge',
r"""
ge(other) -> Tensor
See :func:`torch.ge`
""")
add_docstr_all('ge_',
r"""
ge_(other) -> Tensor
In-place version of :meth:`~Tensor.ge`
""")
add_docstr_all('gels',
r"""
gels(A) -> Tensor
See :func:`torch.gels`
""")
add_docstr_all('geometric_',
r"""
geometric_(p, *, generator=None) -> Tensor
Fills :attr:`self` tensor with elements drawn from the geometric distribution:
.. math::
f(X=k) = (1 - p)^{k - 1} p
""")
add_docstr_all('geqrf',
r"""
geqrf() -> (Tensor, Tensor)
See :func:`torch.geqrf`
""")
add_docstr_all('ger',
r"""
ger(vec2) -> Tensor
See :func:`torch.ger`
""")
add_docstr_all('gesv',
r"""
gesv(A) -> Tensor, Tensor
See :func:`torch.gesv`
""")
add_docstr_all('indices',
r"""
indices() -> Tensor
If :attr:`self` is a sparse COO tensor (i.e., with ``torch.sparse_coo`` layout),
this returns a view of the contained indices tensor. Otherwise, this throws an
error.
See also :meth:`Tensor.values`.
.. note::
This method can only be called on a coalesced sparse tensor. See
:meth:`Tensor.coalesce` for details.
""")
add_docstr_all('get_device',
r"""
get_device() -> Device ordinal (Integer)
For CUDA tensors, this function returns the device ordinal of the GPU on which the tensor resides.
For CPU tensors, an error is thrown.
Example::
>>> x = torch.randn(3, 4, 5, device='cuda:0')
>>> x.get_device()
0
>>> x.cpu().get_device() # RuntimeError: get_device is not implemented for type torch.FloatTensor
""")
add_docstr_all('values',
r"""
values() -> Tensor
If :attr:`self` is a sparse COO tensor (i.e., with ``torch.sparse_coo`` layout),
this returns a view of the contained values tensor. Otherwise, this throws an
error.
See also :meth:`Tensor.indices`.
.. note::
This method can only be called on a coalesced sparse tensor. See
:meth:`Tensor.coalesce` for details.
""")
add_docstr_all('gt',
r"""
gt(other) -> Tensor
See :func:`torch.gt`
""")
add_docstr_all('gt_',
r"""
gt_(other) -> Tensor
In-place version of :meth:`~Tensor.gt`
""")
add_docstr_all('hardshrink',
r"""
hardshrink(lambd=0.5) -> Tensor
See :func:`torch.nn.functional.hardshrink`
""")
add_docstr_all('histc',
r"""
histc(bins=100, min=0, max=0) -> Tensor
See :func:`torch.histc`
""")
add_docstr_all('index_add_',
r"""
index_add_(dim, index, tensor) -> Tensor
Accumulate the elements of :attr:`tensor` into the :attr:`self` tensor by adding
to the indices in the order given in :attr:`index`. For example, if ``dim == 0``
and ``index[i] == j``, then the ``i``\ th row of :attr:`tensor` is added to the
``j``\ th row of :attr:`self`.
The :attr:`dim`\ th dimension of :attr:`tensor` must have the same size as the
length of :attr:`index` (which must be a vector), and all other dimensions must
match :attr:`self`, or an error will be raised.
.. include:: cuda_deterministic.rst
Args:
dim (int): dimension along which to index
index (LongTensor): indices of :attr:`tensor` to select from
tensor (Tensor): the tensor containing values to add
Example::
>>> x = torch.ones(5, 3)
>>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
>>> index = torch.tensor([0, 4, 2])
>>> x.index_add_(0, index, t)
tensor([[ 2., 3., 4.],
[ 1., 1., 1.],
[ 8., 9., 10.],
[ 1., 1., 1.],
[ 5., 6., 7.]])
""")
add_docstr_all('index_copy_',
r"""
index_copy_(dim, index, tensor) -> Tensor
Copies the elements of :attr:`tensor` into the :attr:`self` tensor by selecting
the indices in the order given in :attr:`index`. For example, if ``dim == 0``
and ``index[i] == j``, then the ``i``\ th row of :attr:`tensor` is copied to the
``j``\ th row of :attr:`self`.
The :attr:`dim`\ th dimension of :attr:`tensor` must have the same size as the
length of :attr:`index` (which must be a vector), and all other dimensions must
match :attr:`self`, or an error will be raised.
Args:
dim (int): dimension along which to index
index (LongTensor): indices of :attr:`tensor` to select from
tensor (Tensor): the tensor containing values to copy
Example::
>>> x = torch.zeros(5, 3)
>>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
>>> index = torch.tensor([0, 4, 2])
>>> x.index_copy_(0, index, t)
tensor([[ 1., 2., 3.],
[ 0., 0., 0.],
[ 7., 8., 9.],
[ 0., 0., 0.],
[ 4., 5., 6.]])
""")
add_docstr_all('index_fill_',
r"""
index_fill_(dim, index, val) -> Tensor
Fills the elements of the :attr:`self` tensor with value :attr:`val` by
selecting the indices in the order given in :attr:`index`.
Args:
dim (int): dimension along which to index
index (LongTensor): indices of :attr:`self` tensor to fill in
val (float): the value to fill with
Example::
>>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
>>> index = torch.tensor([0, 2])
>>> x.index_fill_(1, index, -1)
tensor([[-1., 2., -1.],
[-1., 5., -1.],
[-1., 8., -1.]])
""")
add_docstr_all('index_put_',
r"""
index_put_(indices, value, accumulate=False) -> Tensor
Puts values from the tensor :attr:`value` into the tensor :attr:`self` using
the indices specified in :attr:`indices` (which is a tuple of Tensors). The
expression ``tensor.index_put_(indices, value)`` is equivalent to
``tensor[indices] = value``. Returns :attr:`self`.
If :attr:`accumulate` is ``True``, the elements in :attr:`tensor` are added to
:attr:`self`. If accumulate is ``False``, the behavior is undefined if indices
contain duplicate elements.
Args:
indices (tuple of LongTensor): tensors used to index into `self`.
value (Tensor): tensor of same dtype as `self`.
accumulate (bool): whether to accumulate into self
""")
add_docstr_all('index_select',
r"""
index_select(dim, index) -> Tensor
See :func:`torch.index_select`
""")
add_docstr_all('sparse_mask',
r"""
sparse_mask(input, mask) -> Tensor
Returns a new SparseTensor with values from Tensor :attr:`input` filtered
by indices of :attr:`mask` and values are ignored. :attr:`input` and :attr:`mask`
must have the same shape.
Args:
input (Tensor): an input Tensor
mask (SparseTensor): a SparseTensor which we filter :attr:`input` based on its indices
Example::
>>> nnz = 5
>>> dims = [5, 5, 2, 2]
>>> I = torch.cat([torch.randint(0, dims[0], size=(nnz,)),
torch.randint(0, dims[1], size=(nnz,))], 0).reshape(2, nnz)
>>> V = torch.randn(nnz, dims[2], dims[3])
>>> size = torch.Size(dims)
>>> S = torch.sparse_coo_tensor(I, V, size).coalesce()
>>> D = torch.randn(dims)
>>> D.sparse_mask(S)
tensor(indices=tensor([[0, 0, 0, 2],
[0, 1, 4, 3]]),
values=tensor([[[ 1.6550, 0.2397],
[-0.1611, -0.0779]],
[[ 0.2326, -1.0558],
[ 1.4711, 1.9678]],
[[-0.5138, -0.0411],
[ 1.9417, 0.5158]],
[[ 0.0793, 0.0036],
[-0.2569, -0.1055]]]),
size=(5, 5, 2, 2), nnz=4, layout=torch.sparse_coo)
""")
add_docstr_all('inverse',
r"""
inverse() -> Tensor
See :func:`torch.inverse`
""")
add_docstr_all('is_contiguous',
r"""
is_contiguous() -> bool
Returns True if :attr:`self` tensor is contiguous in memory in C order.
""")
add_docstr_all('is_set_to',
r"""
is_set_to(tensor) -> bool
Returns True if this object refers to the same ``THTensor`` object from the
Torch C API as the given tensor.
""")
add_docstr_all('item', r"""
item() -> number
Returns the value of this tensor as a standard Python number. This only works
for tensors with one element. For other cases, see :meth:`~Tensor.tolist`.
This operation is not differentiable.
Example::
>>> x = torch.tensor([1.0])
>>> x.item()
1.0
""")
add_docstr_all('kthvalue',
r"""
kthvalue(k, dim=None, keepdim=False) -> (Tensor, LongTensor)
See :func:`torch.kthvalue`
""")
add_docstr_all('le',
r"""
le(other) -> Tensor
See :func:`torch.le`
""")
add_docstr_all('le_',
r"""
le_(other) -> Tensor
In-place version of :meth:`~Tensor.le`
""")
add_docstr_all('lerp',
r"""
lerp(start, end, weight) -> Tensor
See :func:`torch.lerp`
""")
add_docstr_all('lerp_',
r"""
lerp_(start, end, weight) -> Tensor
In-place version of :meth:`~Tensor.lerp`
""")
add_docstr_all('log',
r"""
log() -> Tensor
See :func:`torch.log`
""")
add_docstr_all('log_', r"""
log_() -> Tensor
In-place version of :meth:`~Tensor.log`
""")
add_docstr_all('log10',
r"""
log10() -> Tensor
See :func:`torch.log10`
""")
add_docstr_all('log10_',
r"""
log10_() -> Tensor
In-place version of :meth:`~Tensor.log10`
""")
add_docstr_all('log1p',
r"""
log1p() -> Tensor
See :func:`torch.log1p`
""")
add_docstr_all('log1p_',
r"""
log1p_() -> Tensor
In-place version of :meth:`~Tensor.log1p`
""")
add_docstr_all('log2',
r"""
log2() -> Tensor
See :func:`torch.log2`
""")
add_docstr_all('log2_',
r"""
log2_() -> Tensor
In-place version of :meth:`~Tensor.log2`
""")
add_docstr_all('log_normal_', r"""
log_normal_(mean=1, std=2, *, generator=None)
Fills :attr:`self` tensor with numbers samples from the log-normal distribution
parameterized by the given mean :math:`\mu` and standard deviation
:math:`\sigma`. Note that :attr:`mean` and :attr:`std` are the mean and
standard deviation of the underlying normal distribution, and not of the
returned distribution:
.. math::
f(x) = \dfrac{1}{x \sigma \sqrt{2\pi}}\ e^{-\frac{(\ln x - \mu)^2}{2\sigma^2}}
""")
add_docstr_all('logsumexp',
r"""
logsumexp(dim, keepdim=False) -> Tensor
See :func:`torch.logsumexp`
""")
add_docstr_all('lt',
r"""
lt(other) -> Tensor
See :func:`torch.lt`
""")
add_docstr_all('lt_',
r"""
lt_(other) -> Tensor
In-place version of :meth:`~Tensor.lt`
""")
add_docstr_all('map_',
r"""
map_(tensor, callable)
Applies :attr:`callable` for each element in :attr:`self` tensor and the given
:attr:`tensor` and stores the results in :attr:`self` tensor. :attr:`self` tensor and
the given :attr:`tensor` must be :ref:`broadcastable <broadcasting-semantics>`.
The :attr:`callable` should have the signature::
def callable(a, b) -> number
""")
add_docstr_all('masked_scatter_',
r"""
masked_scatter_(mask, source)
Copies elements from :attr:`source` into :attr:`self` tensor at positions where
the :attr:`mask` is one.
The shape of :attr:`mask` must be :ref:`broadcastable <broadcasting-semantics>`
with the shape of the underlying tensor. The :attr:`source` should have at least
as many elements as the number of ones in :attr:`mask`
Args:
mask (ByteTensor): the binary mask
source (Tensor): the tensor to copy from
.. note::
The :attr:`mask` operates on the :attr:`self` tensor, not on the given
:attr:`source` tensor.
""")
add_docstr_all('masked_fill_',
r"""
masked_fill_(mask, value)
Fills elements of :attr:`self` tensor with :attr:`value` where :attr:`mask` is
one. The shape of :attr:`mask` must be
:ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
tensor.
Args:
mask (ByteTensor): the binary mask
value (float): the value to fill in with
""")
add_docstr_all('masked_select',
r"""
masked_select(mask) -> Tensor
See :func:`torch.masked_select`
""")
add_docstr_all('matrix_power',
r"""
matrix_power(n) -> Tensor
See :func:`torch.matrix_power`
""")
add_docstr_all('max',
r"""
max(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
See :func:`torch.max`
""")
add_docstr_all('mean',
r"""
mean(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
See :func:`torch.mean`
""")
add_docstr_all('median',
r"""
median(dim=None, keepdim=False) -> (Tensor, LongTensor)
See :func:`torch.median`
""")
add_docstr_all('min',
r"""
min(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
See :func:`torch.min`
""")
add_docstr_all('mm',
r"""
mm(mat2) -> Tensor
See :func:`torch.mm`
""")
add_docstr_all('mode',
r"""
mode(dim=None, keepdim=False) -> (Tensor, LongTensor)
See :func:`torch.mode`
""")
add_docstr_all('mul',
r"""
mul(value) -> Tensor
See :func:`torch.mul`
""")
add_docstr_all('mul_',
r"""
mul_(value)
In-place version of :meth:`~Tensor.mul`
""")
add_docstr_all('multinomial',
r"""
multinomial(num_samples, replacement=False, *, generator=None) -> Tensor
See :func:`torch.multinomial`
""")
add_docstr_all('mv',
r"""
mv(vec) -> Tensor
See :func:`torch.mv`
""")
add_docstr_all('mvlgamma',
r"""
mvlgamma(p) -> Tensor
See :func:`torch.mvlgamma`
""")
add_docstr_all('mvlgamma_',
r"""
mvlgamma_(p) -> Tensor
In-place version of :meth:`~Tensor.mvlgamma`
""")
add_docstr_all('narrow',
r"""
narrow(dimension, start, length) -> Tensor
See :func:`torch.narrow`
Example::
>>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> x.narrow(0, 0, 2)
tensor([[ 1, 2, 3],
[ 4, 5, 6]])
>>> x.narrow(1, 1, 2)
tensor([[ 2, 3],
[ 5, 6],
[ 8, 9]])
""")
add_docstr_all('narrow_copy',
r"""
narrow_copy(dimension, start, length) -> Tensor
Same as :meth:`Tensor.narrow` except returning a copy rather
than shared storage. This is primarily for sparse tensors, which
do not have a shared-storage narrow method. Calling ```narrow_copy``
with ```dimemsion > self.sparse_dim()``` will return a copy with the
relevant dense dimension narrowed, and ```self.shape``` updated accordingly.
""")
add_docstr_all('ndimension',
r"""
ndimension() -> int
Alias for :meth:`~Tensor.dim()`
""")
add_docstr_all('ne',
r"""
ne(other) -> Tensor
See :func:`torch.ne`
""")
add_docstr_all('ne_',
r"""
ne_(other) -> Tensor
In-place version of :meth:`~Tensor.ne`
""")
add_docstr_all('neg',
r"""
neg() -> Tensor
See :func:`torch.neg`
""")
add_docstr_all('neg_',
r"""
neg_() -> Tensor
In-place version of :meth:`~Tensor.neg`
""")
add_docstr_all('nelement',
r"""
nelement() -> int
Alias for :meth:`~Tensor.numel`
""")
add_docstr_all('nonzero',
r"""
nonzero() -> LongTensor
See :func:`torch.nonzero`
""")
add_docstr_all('norm',
r"""
norm(p=2, dim=None, keepdim=False) -> Tensor
See :func:`torch.norm`
""")
add_docstr_all('normal_',
r"""
normal_(mean=0, std=1, *, generator=None) -> Tensor
Fills :attr:`self` tensor with elements samples from the normal distribution
parameterized by :attr:`mean` and :attr:`std`.
""")
add_docstr_all('numel',
r"""
numel() -> int
See :func:`torch.numel`
""")
add_docstr_all('numpy',
r"""
numpy() -> numpy.ndarray
Returns :attr:`self` tensor as a NumPy :class:`ndarray`. This tensor and the
returned :class:`ndarray` share the same underlying storage. Changes to
:attr:`self` tensor will be reflected in the :class:`ndarray` and vice versa.
""")
add_docstr_all('orgqr',
r"""
orgqr(input2) -> Tensor
See :func:`torch.orgqr`
""")
add_docstr_all('ormqr',
r"""
ormqr(input2, input3, left=True, transpose=False) -> Tensor
See :func:`torch.ormqr`
""")
add_docstr_all('permute',
r"""
permute(*dims) -> Tensor
Permute the dimensions of this tensor.
Args:
*dims (int...): The desired ordering of dimensions
Example:
>>> x = torch.randn(2, 3, 5)
>>> x.size()
torch.Size([2, 3, 5])
>>> x.permute(2, 0, 1).size()
torch.Size([5, 2, 3])
""")
add_docstr_all('potri',
r"""
potri(upper=True) -> Tensor
See :func:`torch.potri`
""")
add_docstr_all('potrs',
r"""
potrs(input2, upper=True) -> Tensor
See :func:`torch.potrs`
""")
add_docstr_all('pow',
r"""
pow(exponent) -> Tensor
See :func:`torch.pow`
""")
add_docstr_all('pow_',
r"""
pow_(exponent) -> Tensor
In-place version of :meth:`~Tensor.pow`
""")
add_docstr_all('prod',
r"""
prod(dim=None, keepdim=False, dtype=None) -> Tensor
See :func:`torch.prod`
""")
add_docstr_all('pstrf',
r"""
pstrf(upper=True, tol=-1) -> (Tensor, IntTensor)
See :func:`torch.pstrf`
""")
add_docstr_all('put_',
r"""
put_(indices, tensor, accumulate=False) -> Tensor
Copies the elements from :attr:`tensor` into the positions specified by
indices. For the purpose of indexing, the :attr:`self` tensor is treated as if
it were a 1-D tensor.
If :attr:`accumulate` is ``True``, the elements in :attr:`tensor` are added to
:attr:`self`. If accumulate is ``False``, the behavior is undefined if indices
contain duplicate elements.
Args:
indices (LongTensor): the indices into self
tensor (Tensor): the tensor containing values to copy from
accumulate (bool): whether to accumulate into self
Example::
>>> src = torch.tensor([[4, 3, 5],
[6, 7, 8]])
>>> src.put_(torch.tensor([1, 3]), torch.tensor([9, 10]))
tensor([[ 4, 9, 5],
[ 10, 7, 8]])
""")
add_docstr_all('qr',
r"""
qr() -> (Tensor, Tensor)
See :func:`torch.qr`
""")
add_docstr_all('random_',
r"""
random_(from=0, to=None, *, generator=None) -> Tensor
Fills :attr:`self` tensor with numbers sampled from the discrete uniform
distribution over ``[from, to - 1]``. If not specified, the values are usually
only bounded by :attr:`self` tensor's data type. However, for floating point
types, if unspecified, range will be ``[0, 2^mantissa]`` to ensure that every
value is representable. For example, `torch.tensor(1, dtype=torch.double).random_()`
will be uniform in ``[0, 2^53]``.
""")
add_docstr_all('reciprocal',
r"""
reciprocal() -> Tensor
See :func:`torch.reciprocal`
""")
add_docstr_all('reciprocal_',
r"""
reciprocal_() -> Tensor
In-place version of :meth:`~Tensor.reciprocal`
""")
add_docstr_all('remainder',
r"""
remainder(divisor) -> Tensor
See :func:`torch.remainder`
""")
add_docstr_all('remainder_',
r"""
remainder_(divisor) -> Tensor
In-place version of :meth:`~Tensor.remainder`
""")
add_docstr_all('renorm',
r"""
renorm(p, dim, maxnorm) -> Tensor
See :func:`torch.renorm`
""")
add_docstr_all('renorm_',
r"""
renorm_(p, dim, maxnorm) -> Tensor
In-place version of :meth:`~Tensor.renorm`
""")
add_docstr_all('repeat',
r"""
repeat(*sizes) -> Tensor
Repeats this tensor along the specified dimensions.
Unlike :meth:`~Tensor.expand`, this function copies the tensor's data.
.. warning::
:func:`torch.repeat` behaves differently from
`numpy.repeat <https://docs.scipy.org/doc/numpy/reference/generated/numpy.repeat.html>`_,
but is more similar to
`numpy.tile <https://docs.scipy.org/doc/numpy/reference/generated/numpy.tile.html>`_.
Args:
sizes (torch.Size or int...): The number of times to repeat this tensor along each
dimension
Example::
>>> x = torch.tensor([1, 2, 3])
>>> x.repeat(4, 2)
tensor([[ 1, 2, 3, 1, 2, 3],
[ 1, 2, 3, 1, 2, 3],
[ 1, 2, 3, 1, 2, 3],
[ 1, 2, 3, 1, 2, 3]])
>>> x.repeat(4, 2, 1).size()
torch.Size([4, 2, 3])
""")
add_docstr_all('requires_grad_',
r"""
requires_grad_(requires_grad=True) -> Tensor
Change if autograd should record operations on this tensor: sets this tensor's
:attr:`requires_grad` attribute in-place. Returns this tensor.
:func:`require_grad_`'s main use case is to tell autograd to begin recording
operations on a Tensor ``tensor``. If ``tensor`` has ``requires_grad=False``
(because it was obtained through a DataLoader, or required preprocessing or
initialization), ``tensor.requires_grad_()`` makes it so that autograd will
begin to record operations on ``tensor``.
Args:
requires_grad (bool): If autograd should record operations on this tensor.
Default: ``True``.
Example::
>>> # Let's say we want to preprocess some saved weights and use
>>> # the result as new weights.
>>> saved_weights = [0.1, 0.2, 0.3, 0.25]
>>> loaded_weights = torch.tensor(saved_weights)
>>> weights = preprocess(loaded_weights) # some function
>>> weights
tensor([-0.5503, 0.4926, -2.1158, -0.8303])
>>> # Now, start to record operations done to weights
>>> weights.requires_grad_()
>>> out = weights.pow(2).sum()
>>> out.backward()
>>> weights.grad
tensor([-1.1007, 0.9853, -4.2316, -1.6606])
""")
add_docstr_all('reshape',
r"""
reshape(*shape) -> Tensor
Returns a tensor with the same data and number of elements as :attr:`self`
but with the specified shape. This method returns a view if :attr:`shape` is
compatible with the current shape. See :meth:`torch.Tensor.view` on when it is
possible to return a view.
See :func:`torch.reshape`
Args:
shape (tuple of ints or int...): the desired shape
""")
add_docstr_all('reshape_as',
r"""
reshape_as(other) -> Tensor
Returns this tensor as the same shape as :attr:`other`.
``self.reshape_as(other)`` is equivalent to ``self.reshape(other.sizes())``.
This method returns a view if ``other.sizes()`` is compatible with the current
shape. See :meth:`torch.Tensor.view` on when it is possible to return a view.
Please see :meth:`reshape` for more information about ``reshape``.
Args:
other (:class:`torch.Tensor`): The result tensor has the same shape
as :attr:`other`.
""")
add_docstr_all('resize_',
r"""
resize_(*sizes) -> Tensor
Resizes :attr:`self` tensor to the specified size. If the number of elements is
larger than the current storage size, then the underlying storage is resized
to fit the new number of elements. If the number of elements is smaller, the
underlying storage is not changed. Existing elements are preserved but any new
memory is uninitialized.
.. warning::
This is a low-level method. The storage is reinterpreted as C-contiguous,
ignoring the current strides (unless the target size equals the current
size, in which case the tensor is left unchanged). For most purposes, you
will instead want to use :meth:`~Tensor.view()`, which checks for
contiguity, or :meth:`~Tensor.reshape()`, which copies data if needed. To
change the size in-place with custom strides, see :meth:`~Tensor.set_()`.
Args:
sizes (torch.Size or int...): the desired size
Example::
>>> x = torch.tensor([[1, 2], [3, 4], [5, 6]])
>>> x.resize_(2, 2)
tensor([[ 1, 2],
[ 3, 4]])
""")
add_docstr_all('resize_as_',
r"""
resize_as_(tensor) -> Tensor
Resizes the :attr:`self` tensor to be the same size as the specified
:attr:`tensor`. This is equivalent to ``self.resize_(tensor.size())``.
""")
add_docstr_all('rot90',
r"""
rot90(k, dims) -> Tensor
See :func:`torch.rot90`
""")
add_docstr_all('round',
r"""
round() -> Tensor
See :func:`torch.round`
""")
add_docstr_all('round_',
r"""
round_() -> Tensor
In-place version of :meth:`~Tensor.round`
""")
add_docstr_all('rsqrt',
r"""
rsqrt() -> Tensor
See :func:`torch.rsqrt`
""")
add_docstr_all('rsqrt_',
r"""
rsqrt_() -> Tensor
In-place version of :meth:`~Tensor.rsqrt`
""")
add_docstr_all('scatter_',
r"""
scatter_(dim, index, src) -> Tensor
Writes all values from the tensor :attr:`src` into :attr:`self` at the indices
specified in the :attr:`index` tensor. For each value in :attr:`src`, its output
index is specified by its index in :attr:`src` for ``dimension != dim`` and by
the corresponding value in :attr:`index` for ``dimension = dim``.
For a 3-D tensor, :attr:`self` is updated as::
self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0
self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1
self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2
This is the reverse operation of the manner described in :meth:`~Tensor.gather`.
:attr:`self`, :attr:`index` and :attr:`src` (if it is a Tensor) should have same
number of dimensions. It is also required that ``index.size(d) <= src.size(d)``
for all dimensions ``d``, and that ``index.size(d) <= self.size(d)`` for all
dimensions ``d != dim``.
Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be
between ``0`` and ``self.size(dim) - 1`` inclusive, and all values in a row
along the specified dimension :attr:`dim` must be unique.
Args:
dim (int): the axis along which to index
index (LongTensor): the indices of elements to scatter,
can be either empty or the same size of src.
When empty, the operation returns identity
src (Tensor or float): the source element(s) to scatter
Example::
>>> x = torch.rand(2, 5)
>>> x
tensor([[ 0.3992, 0.2908, 0.9044, 0.4850, 0.6004],
[ 0.5735, 0.9006, 0.6797, 0.4152, 0.1732]])
>>> torch.zeros(3, 5).scatter_(0, torch.tensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]]), x)
tensor([[ 0.3992, 0.9006, 0.6797, 0.4850, 0.6004],
[ 0.0000, 0.2908, 0.0000, 0.4152, 0.0000],
[ 0.5735, 0.0000, 0.9044, 0.0000, 0.1732]])
>>> z = torch.zeros(2, 4).scatter_(1, torch.tensor([[2], [3]]), 1.23)
>>> z
tensor([[ 0.0000, 0.0000, 1.2300, 0.0000],
[ 0.0000, 0.0000, 0.0000, 1.2300]])
""")
add_docstr_all('scatter_add_',
r"""
scatter_add_(dim, index, other) -> Tensor
Adds all values from the tensor :attr:`other` into :attr:`self` at the indices
specified in the :attr:`index` tensor in a similar fashion as
:meth:`~torch.Tensor.scatter_`. For each value in :attr:`other`, it is added to
an index in :attr:`self` which is specified by its index in :attr:`other`
for ``dimension != dim`` and by the corresponding value in :attr:`index` for
``dimension = dim``.
For a 3-D tensor, :attr:`self` is updated as::
self[index[i][j][k]][j][k] += other[i][j][k] # if dim == 0
self[i][index[i][j][k]][k] += other[i][j][k] # if dim == 1
self[i][j][index[i][j][k]] += other[i][j][k] # if dim == 2
:attr:`self`, :attr:`index` and :attr:`other` should have same number of
dimensions. It is also required that ``index.size(d) <= other.size(d)`` for all
dimensions ``d``, and that ``index.size(d) <= self.size(d)`` for all dimensions
``d != dim``.
Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be
between ``0`` and ``self.size(dim) - 1`` inclusive, and all values in a row along
the specified dimension :attr:`dim` must be unique.
.. include:: cuda_deterministic.rst
Args:
dim (int): the axis along which to index
index (LongTensor): the indices of elements to scatter and add,
can be either empty or the same size of src.
When empty, the operation returns identity.
other (Tensor): the source elements to scatter and add
Example::
>>> x = torch.rand(2, 5)
>>> x
tensor([[0.7404, 0.0427, 0.6480, 0.3806, 0.8328],
[0.7953, 0.2009, 0.9154, 0.6782, 0.9620]])
>>> torch.ones(3, 5).scatter_add_(0, torch.tensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]]), x)
tensor([[1.7404, 1.2009, 1.9154, 1.3806, 1.8328],
[1.0000, 1.0427, 1.0000, 1.6782, 1.0000],
[1.7953, 1.0000, 1.6480, 1.0000, 1.9620]])
""")
add_docstr_all('select',
r"""
select(dim, index) -> Tensor
Slices the :attr:`self` tensor along the selected dimension at the given index.
This function returns a tensor with the given dimension removed.
Args:
dim (int): the dimension to slice
index (int): the index to select with
.. note::
:meth:`select` is equivalent to slicing. For example,
``tensor.select(0, index)`` is equivalent to ``tensor[index]`` and
``tensor.select(2, index)`` is equivalent to ``tensor[:,:,index]``.
""")
add_docstr_all('set_',
r"""
set_(source=None, storage_offset=0, size=None, stride=None) -> Tensor
Sets the underlying storage, size, and strides. If :attr:`source` is a tensor,
:attr:`self` tensor will share the same storage and have the same size and
strides as :attr:`source`. Changes to elements in one tensor will be reflected
in the other.
If :attr:`source` is a :class:`~torch.Storage`, the method sets the underlying
storage, offset, size, and stride.
Args:
source (Tensor or Storage): the tensor or storage to use
storage_offset (int, optional): the offset in the storage
size (torch.Size, optional): the desired size. Defaults to the size of the source.
stride (tuple, optional): the desired stride. Defaults to C-contiguous strides.
""")
add_docstr_all('sigmoid',
r"""
sigmoid() -> Tensor
See :func:`torch.sigmoid`
""")
add_docstr_all('sigmoid_',
r"""
sigmoid_() -> Tensor
In-place version of :meth:`~Tensor.sigmoid`
""")
add_docstr_all('sign',
r"""
sign() -> Tensor
See :func:`torch.sign`
""")
add_docstr_all('sign_',
r"""
sign_() -> Tensor
In-place version of :meth:`~Tensor.sign`
""")
add_docstr_all('sin',
r"""
sin() -> Tensor
See :func:`torch.sin`
""")
add_docstr_all('sin_',
r"""
sin_() -> Tensor
In-place version of :meth:`~Tensor.sin`
""")
add_docstr_all('sinh',
r"""
sinh() -> Tensor
See :func:`torch.sinh`
""")
add_docstr_all('sinh_',
r"""
sinh_() -> Tensor
In-place version of :meth:`~Tensor.sinh`
""")
add_docstr_all('size',
r"""
size() -> torch.Size
Returns the size of the :attr:`self` tensor. The returned value is a subclass of
:class:`tuple`.
Example::
>>> torch.empty(3, 4, 5).size()
torch.Size([3, 4, 5])
""")
add_docstr_all('sort',
r"""
sort(dim=None, descending=False) -> (Tensor, LongTensor)
See :func:`torch.sort`
""")
add_docstr_all('sparse_dim',
r"""
sparse_dim() -> int
If :attr:`self` is a sparse COO tensor (i.e., with ``torch.sparse_coo`` layout),
this returns a the number of sparse dimensions. Otherwise, this throws an
error.
See also :meth:`Tensor.dense_dim`.
""")
add_docstr_all('sqrt',
r"""
sqrt() -> Tensor
See :func:`torch.sqrt`
""")
add_docstr_all('sqrt_',
r"""
sqrt_() -> Tensor
In-place version of :meth:`~Tensor.sqrt`
""")
add_docstr_all('squeeze',
r"""
squeeze(dim=None) -> Tensor
See :func:`torch.squeeze`
""")
add_docstr_all('squeeze_',
r"""
squeeze_(dim=None) -> Tensor
In-place version of :meth:`~Tensor.squeeze`
""")
add_docstr_all('std',
r"""
std(dim=None, unbiased=True, keepdim=False) -> Tensor
See :func:`torch.std`
""")
add_docstr_all('storage',
r"""
storage() -> torch.Storage
Returns the underlying storage
""")
add_docstr_all('storage_offset',
r"""
storage_offset() -> int
Returns :attr:`self` tensor's offset in the underlying storage in terms of
number of storage elements (not bytes).
Example::
>>> x = torch.tensor([1, 2, 3, 4, 5])
>>> x.storage_offset()
0
>>> x[3:].storage_offset()
3
""")
add_docstr_all('stride',
r"""
stride(dim) -> tuple or int
Returns the stride of :attr:`self` tensor.
Stride is the jump necessary to go from one element to the next one in the
specified dimension :attr:`dim`. A tuple of all strides is returned when no
argument is passed in. Otherwise, an integer value is returned as the stride in
the particular dimension :attr:`dim`.
Args:
dim (int, optional): the desired dimension in which stride is required
Example::
>>> x = torch.tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
>>> x.stride()
(5, 1)
>>>x.stride(0)
5
>>> x.stride(-1)
1
""")
add_docstr_all('sub',
r"""
sub(value, other) -> Tensor
Subtracts a scalar or tensor from :attr:`self` tensor. If both :attr:`value` and
:attr:`other` are specified, each element of :attr:`other` is scaled by
:attr:`value` before being used.
When :attr:`other` is a tensor, the shape of :attr:`other` must be
:ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
tensor.
""")
add_docstr_all('sub_',
r"""
sub_(x) -> Tensor
In-place version of :meth:`~Tensor.sub`
""")
add_docstr_all('sum',
r"""
sum(dim=None, keepdim=False, dtype=None) -> Tensor
See :func:`torch.sum`
""")
add_docstr_all('svd',
r"""
svd(some=True, compute_uv=True) -> (Tensor, Tensor, Tensor)
See :func:`torch.svd`
""")
add_docstr_all('symeig',
r"""
symeig(eigenvectors=False, upper=True) -> (Tensor, Tensor)
See :func:`torch.symeig`
""")
add_docstr_all('t',
r"""
t() -> Tensor
See :func:`torch.t`
""")
add_docstr_all('t_',
r"""
t_() -> Tensor
In-place version of :meth:`~Tensor.t`
""")
add_docstr_all('to',
r"""
to(*args, **kwargs) -> Tensor
Performs Tensor dtype and/or device conversion. A :class:`torch.dtype` and :class:`torch.device` are
inferred from the arguments of ``self.to(*args, **kwargs)``.
.. note::
If the ``self`` Tensor already
has the correct :class:`torch.dtype` and :class:`torch.device`, then ``self`` is returned.
Otherwise, the returned tensor is a copy of ``self`` with the desired
:class:`torch.dtype` and :class:`torch.device`.
Here are the ways to call ``to``:
.. function:: to(dtype, non_blocking=False, copy=False) -> Tensor
Returns a Tensor with the specified :attr:`dtype`
.. function:: to(device=None, dtype=None, non_blocking=False, copy=False) -> Tensor
Returns a Tensor with the specified :attr:`device` and (optional)
:attr:`dtype`. If :attr:`dtype` is ``None`` it is inferred to be ``self.dtype``.
When :attr:`non_blocking`, tries to convert asynchronously with respect to
the host if possible, e.g., converting a CPU Tensor with pinned memory to a
CUDA Tensor.
When :attr:`copy` is set, a new Tensor is created even when the Tensor
already matches the desired conversion.
.. function:: to(other, non_blocking=False, copy=False) -> Tensor
Returns a Tensor with same :class:`torch.dtype` and :class:`torch.device` as
the Tensor :attr:`other`. When :attr:`non_blocking`, tries to convert
asynchronously with respect to the host if possible, e.g., converting a CPU
Tensor with pinned memory to a CUDA Tensor.
When :attr:`copy` is set, a new Tensor is created even when the Tensor
already matches the desired conversion.
Example::
>>> tensor = torch.randn(2, 2) # Initially dtype=float32, device=cpu
>>> tensor.to(torch.float64)
tensor([[-0.5044, 0.0005],
[ 0.3310, -0.0584]], dtype=torch.float64)
>>> cuda0 = torch.device('cuda:0')
>>> tensor.to(cuda0)
tensor([[-0.5044, 0.0005],
[ 0.3310, -0.0584]], device='cuda:0')
>>> tensor.to(cuda0, dtype=torch.float64)
tensor([[-0.5044, 0.0005],
[ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
>>> other = torch.randn((), dtype=torch.float64, device=cuda0)
>>> tensor.to(other, non_blocking=True)
tensor([[-0.5044, 0.0005],
[ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
""")
add_docstr_all('byte',
r"""
byte() -> Tensor
``self.byte()`` is equivalent to ``self.to(torch.uint8)``. See :func:`to`.
""")
add_docstr_all('char',
r"""
char() -> Tensor
``self.char()`` is equivalent to ``self.to(torch.int8)``. See :func:`to`.
""")
add_docstr_all('double',
r"""
double() -> Tensor
``self.double()`` is equivalent to ``self.to(torch.float64)``. See :func:`to`.
""")
add_docstr_all('float',
r"""
float() -> Tensor
``self.float()`` is equivalent to ``self.to(torch.float32)``. See :func:`to`.
""")
add_docstr_all('half',
r"""
half() -> Tensor
``self.half()`` is equivalent to ``self.to(torch.float16)``. See :func:`to`.
""")
add_docstr_all('int',
r"""
int() -> Tensor
``self.int()`` is equivalent to ``self.to(torch.int32)``. See :func:`to`.
""")
add_docstr_all('long',
r"""
long() -> Tensor
``self.long()`` is equivalent to ``self.to(torch.int64)``. See :func:`to`.
""")
add_docstr_all('short',
r"""
short() -> Tensor
``self.short()`` is equivalent to ``self.to(torch.int16)``. See :func:`to`.
""")
add_docstr_all('take',
r"""
take(indices) -> Tensor
See :func:`torch.take`
""")
add_docstr_all('tan_',
r"""
tan_() -> Tensor
In-place version of :meth:`~Tensor.tan`
""")
add_docstr_all('tanh',
r"""
tanh() -> Tensor
See :func:`torch.tanh`
""")
add_docstr_all('tanh_',
r"""
tanh_() -> Tensor
In-place version of :meth:`~Tensor.tanh`
""")
add_docstr_all('tolist',
r""""
tolist() -> list or number
Returns the tensor as a (nested) list. For scalars, a standard
Python number is returned, just like with :meth:`~Tensor.item`.
Tensors are automatically moved to the CPU first if necessary.
This operation is not differentiable.
Examples::
>>> a = torch.randn(2, 2)
>>> a.tolist()
[[0.012766935862600803, 0.5415473580360413],
[-0.08909505605697632, 0.7729271650314331]]
>>> a[0,0].tolist()
0.012766935862600803
""")
add_docstr_all('topk',
r"""
topk(k, dim=None, largest=True, sorted=True) -> (Tensor, LongTensor)
See :func:`torch.topk`
""")
add_docstr_all('to_sparse',
r"""
to_sparse(sparseDims) -> Tensor
Returns a sparse copy of the tensor. PyTorch supports sparse tensors in
:ref:`coordinate format <sparse-docs>`.
Args:
sparseDims (int, optional): the number of sparse dimensions to include in the new sparse tensor
Example::
>>> d = torch.tensor([[0, 0, 0], [9, 0, 10], [0, 0, 0]])
>>> d
tensor([[ 0, 0, 0],
[ 9, 0, 10],
[ 0, 0, 0]])
>>> d.to_sparse()
tensor(indices=tensor([[1, 1],
[0, 2]]),
values=tensor([ 9, 10]),
size=(3, 3), nnz=2, layout=torch.sparse_coo)
>>> d.to_sparse(1)
tensor(indices=tensor([[1]]),
values=tensor([[ 9, 0, 10]]),
size=(3, 3), nnz=1, layout=torch.sparse_coo)
""")
add_docstr_all('trace',
r"""
trace() -> Tensor
See :func:`torch.trace`
""")
add_docstr_all('transpose',
r"""
transpose(dim0, dim1) -> Tensor
See :func:`torch.transpose`
""")
add_docstr_all('transpose_',
r"""
transpose_(dim0, dim1) -> Tensor
In-place version of :meth:`~Tensor.transpose`
""")
add_docstr_all('tril',
r"""
tril(k=0) -> Tensor
See :func:`torch.tril`
""")
add_docstr_all('tril_',
r"""
tril_(k=0) -> Tensor
In-place version of :meth:`~Tensor.tril`
""")
add_docstr_all('triu',
r"""
triu(k=0) -> Tensor
See :func:`torch.triu`
""")
add_docstr_all('triu_',
r"""
triu_(k=0) -> Tensor
In-place version of :meth:`~Tensor.triu`
""")
add_docstr_all('trtrs',
r"""
trtrs(A, upper=True, transpose=False, unitriangular=False) -> (Tensor, Tensor)
See :func:`torch.trtrs`
""")
add_docstr_all('trunc',
r"""
trunc() -> Tensor
See :func:`torch.trunc`
""")
add_docstr_all('trunc_',
r"""
trunc_() -> Tensor
In-place version of :meth:`~Tensor.trunc`
""")
add_docstr_all('type',
r"""
type(dtype=None, non_blocking=False, **kwargs) -> str or Tensor
Returns the type if `dtype` is not provided, else casts this object to
the specified type.
If this is already of the correct type, no copy is performed and the
original object is returned.
Args:
dtype (type or string): The desired type
non_blocking (bool): If ``True``, and the source is in pinned memory
and destination is on the GPU or vice versa, the copy is performed
asynchronously with respect to the host. Otherwise, the argument
has no effect.
**kwargs: For compatibility, may contain the key ``async`` in place of
the ``non_blocking`` argument. The ``async`` arg is deprecated.
""")
add_docstr_all('type_as',
r"""
type_as(tensor) -> Tensor
Returns this tensor cast to the type of the given tensor.
This is a no-op if the tensor is already of the correct type. This is
equivalent to::
self.type(tensor.type())
Params:
tensor (Tensor): the tensor which has the desired type
""")
add_docstr_all('unfold',
r"""
unfold(dim, size, step) -> Tensor
Returns a tensor which contains all slices of size :attr:`size` from
:attr:`self` tensor in the dimension :attr:`dim`.
Step between two slices is given by :attr:`step`.
If `sizedim` is the size of dimension dim for :attr:`self`, the size of
dimension :attr:`dim` in the returned tensor will be
`(sizedim - size) / step + 1`.
An additional dimension of size size is appended in the returned tensor.
Args:
dim (int): dimension in which unfolding happens
size (int): the size of each slice that is unfolded
step (int): the step between each slice
Example::
>>> x = torch.arange(1., 8)
>>> x
tensor([ 1., 2., 3., 4., 5., 6., 7.])
>>> x.unfold(0, 2, 1)
tensor([[ 1., 2.],
[ 2., 3.],
[ 3., 4.],
[ 4., 5.],
[ 5., 6.],
[ 6., 7.]])
>>> x.unfold(0, 2, 2)
tensor([[ 1., 2.],
[ 3., 4.],
[ 5., 6.]])
""")
add_docstr_all('uniform_',
r"""
uniform_(from=0, to=1) -> Tensor
Fills :attr:`self` tensor with numbers sampled from the continuous uniform
distribution:
.. math::
P(x) = \dfrac{1}{\text{to} - \text{from}}
""")
add_docstr_all('unsqueeze',
r"""
unsqueeze(dim) -> Tensor
See :func:`torch.unsqueeze`
""")
add_docstr_all('unsqueeze_',
r"""
unsqueeze_(dim) -> Tensor
In-place version of :meth:`~Tensor.unsqueeze`
""")
add_docstr_all('var',
r"""
var(dim=None, unbiased=True, keepdim=False) -> Tensor
See :func:`torch.var`
""")
add_docstr_all('view',
r"""
view(*shape) -> Tensor
Returns a new tensor with the same data as the :attr:`self` tensor but of a
different :attr:`shape`.
The returned tensor shares the same data and must have the same number
of elements, but may have a different size. For a tensor to be viewed, the new
view size must be compatible with its original size and stride, i.e., each new
view dimension must either be a subspace of an original dimension, or only span
across original dimensions :math:`d, d+1, \dots, d+k` that satisfy the following
contiguity-like condition that :math:`\forall i = 0, \dots, k-1`,
.. math::
\text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]
Otherwise, :meth:`contiguous` needs to be called before the tensor can be
viewed. See also: :meth:`reshape`, which returns a view if the shapes are
compatible, and copies (equivalent to calling :meth:`contiguous`) otherwise.
Args:
shape (torch.Size or int...): the desired size
Example::
>>> x = torch.randn(4, 4)
>>> x.size()
torch.Size([4, 4])
>>> y = x.view(16)
>>> y.size()
torch.Size([16])
>>> z = x.view(-1, 8) # the size -1 is inferred from other dimensions
>>> z.size()
torch.Size([2, 8])
""")
add_docstr_all('view_as',
r"""
view_as(other) -> Tensor
View this tensor as the same size as :attr:`other`.
``self.view_as(other)`` is equivalent to ``self.view(other.size())``.
Please see :meth:`~Tensor.view` for more information about ``view``.
Args:
other (:class:`torch.Tensor`): The result tensor has the same size
as :attr:`other`.
""")
add_docstr_all('expand',
r"""
expand(*sizes) -> Tensor
Returns a new view of the :attr:`self` tensor with singleton dimensions expanded
to a larger size.
Passing -1 as the size for a dimension means not changing the size of
that dimension.
Tensor can be also expanded to a larger number of dimensions, and the
new ones will be appended at the front. For the new dimensions, the
size cannot be set to -1.
Expanding a tensor does not allocate new memory, but only creates a
new view on the existing tensor where a dimension of size one is
expanded to a larger size by setting the ``stride`` to 0. Any dimension
of size 1 can be expanded to an arbitrary value without allocating new
memory.
Args:
*sizes (torch.Size or int...): the desired expanded size
Example::
>>> x = torch.tensor([[1], [2], [3]])
>>> x.size()
torch.Size([3, 1])
>>> x.expand(3, 4)
tensor([[ 1, 1, 1, 1],
[ 2, 2, 2, 2],
[ 3, 3, 3, 3]])
>>> x.expand(-1, 4) # -1 means not changing the size of that dimension
tensor([[ 1, 1, 1, 1],
[ 2, 2, 2, 2],
[ 3, 3, 3, 3]])
""")
add_docstr_all('expand_as',
r"""
expand_as(other) -> Tensor
Expand this tensor to the same size as :attr:`other`.
``self.expand_as(other)`` is equivalent to ``self.expand(other.size())``.
Please see :meth:`~Tensor.expand` for more information about ``expand``.
Args:
other (:class:`torch.Tensor`): The result tensor has the same size
as :attr:`other`.
""")
add_docstr_all('zero_',
r"""
zero_() -> Tensor
Fills :attr:`self` tensor with zeros.
""")
add_docstr_all('matmul',
r"""
matmul(tensor2) -> Tensor
See :func:`torch.matmul`
""")
add_docstr_all('chunk',
r"""
chunk(chunks, dim=0) -> List of Tensors
See :func:`torch.chunk`
""")
add_docstr_all('stft',
r"""
stft(frame_length, hop, fft_size=None, return_onesided=True, window=None, pad_end=0) -> Tensor
See :func:`torch.stft`
""")
add_docstr_all('fft',
r"""
fft(signal_ndim, normalized=False) -> Tensor
See :func:`torch.fft`
""")
add_docstr_all('ifft',
r"""
ifft(signal_ndim, normalized=False) -> Tensor
See :func:`torch.ifft`
""")
add_docstr_all('rfft',
r"""
rfft(signal_ndim, normalized=False, onesided=True) -> Tensor
See :func:`torch.rfft`
""")
add_docstr_all('irfft',
r"""
irfft(signal_ndim, normalized=False, onesided=True, signal_sizes=None) -> Tensor
See :func:`torch.irfft`
""")
add_docstr_all('det',
r"""
det() -> Tensor
See :func:`torch.det`
""")
add_docstr_all('where',
r"""
where(condition, y) -> Tensor
``self.where(condition, y)`` is equivalent to ``torch.where(condition, self, y)``.
See :func:`torch.where`
""")
add_docstr_all('logdet',
r"""
logdet() -> Tensor
See :func:`torch.logdet`
""")
add_docstr_all('slogdet',
r"""
slogdet() -> (Tensor, Tensor)
See :func:`torch.slogdet`
""")
add_docstr_all('unbind',
r"""
unbind(dim=0) -> seq
See :func:`torch.unbind`
""")
add_docstr_all('pinverse',
r"""
pinverse() -> Tensor
See :func:`torch.pinverse`
""")
add_docstr_all('grad',
r"""
This attribute is ``None`` by default and becomes a Tensor the first time a call to
:func:`backward` computes gradients for ``self``.
The attribute will then contain the gradients computed and future calls to
:func:`backward` will accumulate (add) gradients into it.
""")
add_docstr_all('requires_grad',
r"""
Is ``True`` if gradients need to be computed for this Tensor, ``False`` otherwise.
.. note::
The fact that gradients need to be computed for a Tensor do not mean that the :attr:`grad`
attribute will be populated, see :attr:`is_leaf` for more details.
""")
add_docstr_all('is_leaf',
r"""
All Tensors that have :attr:`requires_grad` which is ``False`` will be leaf Tensors by convention.
For Tensors that have :attr:`requires_grad` which is ``True``, they will be leaf Tensors if they were
created by the user. This means that they are not the result of an operation and so
:attr:`grad_fn` is None.
Only leaf Tensors will have their :attr:`grad` populated during a call to :func:`backward`.
To get :attr:`grad` populated for non-leaf Tensors, you can use :func:`retain_grad`.
Example::
>>> a = torch.rand(10, requires_grad=True)
>>> a.is_leaf
True
>>> b = torch.rand(10, requires_grad=True).cuda()
>>> b.is_leaf
False
# b was created by the operation that cast a cpu Tensor into a cuda Tensor
>>> c = torch.rand(10, requires_grad=True) + 2
>>> c.is_leaf
False
# c was created by the addition operation
>>> d = torch.rand(10).cuda()
>>> d.is_leaf
True
# d does not require gradients and so has no operation creating it (that is tracked by the autograd engine)
>>> e = torch.rand(10).cuda().requires_grad_()
>>> e.is_leaf
True
# e requires gradients and has no operations creating it
>>> f = torch.rand(10, requires_grad=True, device="cuda")
>>> f.is_leaf
True
# f requires grad, has not operation creating it
""")
add_docstr_all('is_cuda',
r"""
Is ``True`` if the Tensor is stored on the GPU, ``False`` otherwise.
""")
add_docstr_all('device',
r"""
Is the :class:`torch.device` where this Tensor is.
""")
| mit | 8,127,286,000,224,170,000 | 22.59519 | 114 | 0.59671 | false |
peterheim1/robbie | bin/tf_head_tracker.py | 1 | 13024 | #!/usr/bin/env python
"""
tf_head_tracker.py - Version 1.0 2011-08-01
Move the head to track a PointStamped target on the /target_point topic.
Created for the Pi Robot Project: http://www.pirobot.org
Copyright (c) 2011 Patrick Goebel. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.html
"""
import roslib; roslib.load_manifest('robbie')
import rospy
import tf
from std_msgs.msg import Float64
from dynamixel_controllers.srv import *
from geometry_msgs.msg import PointStamped, Point
from sensor_msgs.msg import JointState, RegionOfInterest, CameraInfo
from math import radians, sqrt
import sys
""" A speed of exactly 0 has a special meaning for Dynamixel servos--namely, "move as fast as you can".
This can have some very undesirable consequences since it is the complete opposite of what 0 normally
means. So we define a very small speed value to represent zero speed.
"""
ZERO_SPEED = 0.0001
class tfTracker():
def __init__(self):
rospy.init_node('tf_head_tracker')
rospy.on_shutdown(self.shutdown)
""" How fast should we update the servos? """
self.rate = rospy.get_param('~rate', 10)
r = rospy.Rate(self.rate)
""" Joint speeds are given in radians per second """
self.default_joint_speed = rospy.get_param('~default_joint_speed', 0.3)
self.max_joint_speed = rospy.get_param('~max_joint_speed', 0.5)
""" How far ahead or behind the target (in radians) should we aim for? """
self.lead_target_angle = rospy.get_param('~lead_target_angle', 0.5)
""" How long (in seconds) should we permit the target to be lost before re-centering the servos? """
self.target_timeout = 3.0
self.target_lost = False
self.servos_centered = False
""" Remap these in the launch file or command line if necessary """
self.camera_link = 'head_cam_link'
self.head_pan_joint = 'head_pan_joint'
self.head_tilt_joint = 'head_tilt_joint'
self.head_pan_link = 'head_pan_link'
self.head_tilt_link = 'head_tilt_link'
self.dynamixels = rospy.get_param('dynamixels', '')
""" The pan/tilt thresholds indicate how far (in meters) the ROI needs to be off-center
before we make a movement. """
self.pan_threshold = int(rospy.get_param('~pan_threshold', 0.01))
self.tilt_threshold = int(rospy.get_param('~tilt_threshold', 0.01))
""" The k_pan and k_tilt parameter determine how responsive the servo movements are.
If these are set too high, oscillation can result. """
self.k_pan = rospy.get_param('~k_pan', 1.5)
self.k_tilt = rospy.get_param('~k_tilt', 1.5)
""" Set limits on how far we can pan or tilt """
self.max_pan = rospy.get_param('~max_pan', radians(145))
self.min_pan = rospy.get_param('~min_pan', radians(-145))
self.max_tilt = rospy.get_param('~max_tilt', radians(90))
self.min_tilt = rospy.get_param('~min_tilt', radians(-90))
self.servo_speed = dict()
self.servo_position = dict()
self.torque_enable = dict()
""" Connect to the set_speed and torque_enable services for each servo.
Also define a position publisher for each servo. """
for name in sorted(self.dynamixels):
try:
controller = name
# The set_speed services
set_speed_service = '/' + controller + '/set_speed'
rospy.wait_for_service(set_speed_service)
self.servo_speed[name] = rospy.ServiceProxy(set_speed_service, SetSpeed, persistent=True)
# Initialize the servo speed to the default_joint_speed
self.servo_speed[name](self.default_joint_speed)
# Torque enable/disable control for each servo
torque_service = '/' + controller + '/torque_enable'
rospy.wait_for_service(torque_service)
self.torque_enable[name] = rospy.ServiceProxy(torque_service, TorqueEnable)
# Start each servo in the disabled state so we can move them by hand
self.torque_enable[name](False)
# The position controllers
self.servo_position[name] = rospy.Publisher('/' + controller + '/command', Float64)
except:
rospy.loginfo("Can't contact servo services!")
rospy.loginfo("TF Tracker node started. Centering servos...")
self.pan_position = 0
self.tilt_position = 0
self.pan_speed = ZERO_SPEED
self.tilt_speed = ZERO_SPEED
self.last_tilt_speed = 0
self.last_pan_speed = 0
""" Use a counter to detect when we have lost the target """
self.tracking_seq = 0
self.last_tracking_seq = -1
self.target_lost_count = 0
self.max_target_lost_count = self.rate * 5
""" Center the pan and tilt servos at the start. """
self.center_head_servos()
""" Initialize tf listener """
self.tf = tf.TransformListener()
""" Make sure we can see the camera and head pan links """
self.tf.waitForTransform(self.camera_link, self.head_pan_link, rospy.Time(), rospy.Duration(5.0))
""" Wait also for the joint_states topic so we can track our own joint positions """
rospy.wait_for_message('joint_states', JointState)
self.joint_state = JointState()
rospy.Subscriber('joint_states', JointState, self.update_joint_state)
""" Subscribe to the target point topic """
#rospy.Subscriber('target_point', PointStamped, self.update_head_position)
rospy.Subscriber('roi', RegionOfInterest, self.update_head_position)
rospy.Subscriber('head_cam/rgb/camera_info', CameraInfo, self.getCameraInfo)
while not rospy.is_shutdown():
if self.last_tracking_seq == self.tracking_seq:
self.pan_speed = ZERO_SPEED
self.tilt_speed = ZERO_SPEED
self.target_lost_count += 1
else:
self.last_tracking_seq = self.tracking_seq
self.target_lost_count = 0
if self.target_lost_count > self.max_target_lost_count:
self.center_head_servos()
else:
try:
""" Only update the pan speed if it differs from the last value """
if self.last_pan_speed != self.pan_speed:
self.servo_speed[self.head_pan_joint](self.pan_speed)
self.last_pan_speed = self.pan_speed
self.servo_position[self.head_pan_joint].publish(self.pan_position)
except:
""" If we run into any exceptions, mometarily stop the head movement by setting
the target pan position to the current position. """
try:
current_pan_position = self.joint_state.position[self.joint_state.name.index(self.head_pan_joint)]
self.servo_position[self.head_pan_joint].publish(current_pan_position)
rospy.loginfo("Servo SetSpeed Exception!")
rospy.loginfo(sys.exc_info())
except:
pass
try:
""" Only update the tilt speed if it differs from the last value """
if self.last_tilt_speed != self.tilt_speed:
self.servo_speed[self.head_tilt_joint](self.tilt_speed)
self.last_tilt_speed = self.tilt_speed
self.servo_position[self.head_tilt_joint].publish(self.tilt_position)
except:
""" If we run into any exceptions, mometarily stop the head movement by setting
the target tilt position to the current position. """
try:
current_tilt_position = self.joint_state.position[self.joint_state.name.index(self.head_tilt_joint)]
self.servo_position[self.head_tilt_joint].publish(current_tilt_position)
rospy.loginfo("Servo SetSpeed Exception!")
rospy.loginfo(sys.exc_info())
except:
pass
r.sleep()
def center_head_servos(self):
try:
self.servo_speed[self.head_pan_joint](self.default_joint_speed)
self.servo_speed[self.head_tilt_joint](self.default_joint_speed)
for i in range(3):
self.servo_position[self.head_pan_joint].publish(0)
self.servo_position[self.head_tilt_joint].publish(0)
rospy.sleep(1)
except:
pass
def update_joint_state(self, msg):
self.joint_state = msg
def update_head_position(self, msg):
""" We increment a tracking counter upon receiving a target message so we can use the counter to
determine when we have lost the target. """
self.tracking_seq += 1
""" Check to see if we have lost the ROI. """
if msg.width == 0 or msg.height == 0 or msg.width > self.image_width / 2 or \
msg.height > self.image_height / 2:
self.center_head_servos()
return# mod up to here
""" Compute the center of the ROI """
COG_x = msg.x_offset + msg.width / 2 - self.image_width / 2
COG_y = msg.y_offset + msg.height / 2 - self.image_height / 2
""" Pan the camera only if the displacement of the target point exceeds the threshold """
if abs(COG_x) > self.pan_threshold:
""" Set the pan speed proportion to the horizontal displacement of the target """
#self.pan_speed = trunc(min(self.max_joint_speed, max(ZERO_SPEED, self.k_pan * abs(COG_x))), 2)
""" Set the target position ahead or behind the current position """
try:
current_pan = self.joint_state.position[self.joint_state.name.index(self.head_pan_joint)]
except:
return
if COG_x > 0:
self.pan_position = max(self.min_pan, current_pan - self.lead_target_angle)
else:
self.pan_position = min(self.max_pan, current_pan + self.lead_target_angle)
else:
self.pan_speed = ZERO_SPEED
""" Tilt the camera only if the displacement of the target point exceeds the threshold """
if abs(COG_y) > self.tilt_threshold:
""" Set the pan speed proportion to the vertical displacement of the target """
#self.tilt_speed = trunc(min(self.max_joint_speed, max(ZERO_SPEED, self.k_tilt * abs(COG_y))), 2)
""" Set the target position ahead or behind the current position """
try:
current_tilt = self.joint_state.position[self.joint_state.name.index(self.head_tilt_joint)]
except:
return
if COG_y < 0:
self.tilt_position = max(self.min_tilt, current_tilt - self.lead_target_angle)
else:
self.tilt_position = min(self.max_tilt, current_tilt + self.lead_target_angle)
else:
self.tilt_speed = ZERO_SPEED
def getCameraInfo(self, msg):
self.image_width = msg.width
self.image_height = msg.height
def shutdown(self):
rospy.loginfo("Shutting down frame tracking node...")
self.center_head_servos()
# Relax all servos to give them a rest.
for servo in self.dynamixels:
self.torque_enable[servo](False)
def trunc(f, n):
'''Truncates/pads a float f to n decimal places without rounding'''
slen = len('%.*f' % (n, f))
return float(str(f)[:slen])
if __name__ == '__main__':
try:
tracker = tfTracker()
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("TF tracking node is shut down.")
| gpl-3.0 | 2,277,179,604,030,267,000 | 43.29932 | 124 | 0.575937 | false |
kwameboame/newsdex | news/utils/twitter_utils.py | 1 | 5325 | # coding=utf-8
import json
import logging
import time
from datetime import datetime, timedelta
from django.utils import timezone
from tweepy import StreamListener, OAuthHandler, Stream, API
from news.models import TwitterUser, Tweet, TwitterAPISetting
from news.models.twitter import FilterKeyword, FilterLocation, TwitterStream
from news.utils.common import chunks
__author__ = 'ilov3'
logger = logging.getLogger(__name__)
def authenticate(api_settings=None):
if not api_settings:
try:
api_settings = TwitterAPISetting.objects.get()
except TwitterAPISetting.MultipleObjectsReturned:
logger.error('You have more than one twitter API settings! Go to admin page, and fix the problem.')
raise Exception()
except TwitterAPISetting.DoesNotExist:
logger.error('You haven\'t got any twitter API settings! Go to admin page, and add one.')
raise Exception()
auth = OAuthHandler(api_settings.consumer_key, api_settings.consumer_secret)
auth.set_access_token(api_settings.access_token, api_settings.access_token_secret)
return auth
class SaveListener(StreamListener):
def __init__(self, stream, api=None):
self.stream = stream
super().__init__(api)
def save_tweet(self, tweet):
dt_format = '%a %b %d %X %z %Y'
data = {
'text': tweet['text'],
'created_time': datetime.strptime(tweet['created_at'], dt_format).strftime('%Y-%m-%d %H:%M:%S'),
'tweet_id': tweet['id_str'],
'coordinates': tweet.get('coordinates', None),
'retweet_count': tweet.get('retweet_count', None),
'user': TwitterUser.objects.get(user_id=tweet['user']['id_str']),
'stream': self.stream,
}
Tweet.objects.get_or_create(tweet_id=data['tweet_id'], defaults=data)
@staticmethod
def save_twitter_user(user):
data = {
'user_id': user['id_str'],
'name': user['name'],
'location': user.get('location'),
'description': user.get('description'),
}
TwitterUser.objects.get_or_create(user_id=data['user_id'], defaults=data)
@staticmethod
def is_retweet(tweet):
if 'retweeted_status' in tweet:
logger.debug('Retweet found: %s' % tweet['text'])
return True
return False
def process_retweet(self, retweet):
logger.debug('Getting original tweet from retweet')
original_tweet = retweet['retweeted_status']
self.save_twitter_user(original_tweet['user'])
self.save_tweet(original_tweet)
def on_data(self, data):
try:
tweet = json.loads(data)
logger.debug('%s %s:%s' % (tweet['created_at'], tweet['user']['name'], tweet['text']))
if not self.is_retweet(tweet):
self.save_twitter_user(tweet['user'])
self.save_tweet(tweet)
else:
self.process_retweet(tweet)
return True
except Exception as e:
logger.error(e)
time.sleep(2)
def on_error(self, status):
logger.error('Error: status code %s' % status)
def subscribe_on_stream(task_id, api_settings=None, keyword=None, location=None):
logger.debug('Starting parse twitter stream on keyword/location: "%s"' % (keyword or location))
assert not (keyword and location), logger.error('Error: can\'t fetch by keyword and location in the same time!')
assert keyword or location, logger.error('Nor keyword or location param is given')
auth = authenticate(api_settings)
if keyword:
filter_keyword, created = FilterKeyword.objects.get_or_create(keyword=keyword)
stream_obj = TwitterStream.objects.create(filter_keyword=filter_keyword, celery_task_id=task_id)
l = SaveListener(stream=stream_obj)
stream = Stream(auth, l)
stream.filter(track=[keyword])
if location:
filter_location, created = FilterLocation.objects.get_or_create(west_limit=location[0], south_limit=location[1], east_limit=location[2], north_limit=location[3])
stream_obj = TwitterStream.objects.create(filter_location=filter_location, celery_task_id=task_id)
l = SaveListener(stream=stream_obj)
stream = Stream(auth, l)
stream.filter(locations=location)
def count_retweets():
auth = authenticate()
api = API(auth)
week_ago = timezone.now().replace() - timedelta(days=7)
tweets_ids = Tweet.objects.filter(created_time__gt=week_ago).values_list('tweet_id', flat=True)
logger.debug('Count retweets for %s tweets from %s' % (tweets_ids.count(), week_ago))
try:
for chunk in chunks(tweets_ids, 100):
for tweet in api.statuses_lookup(chunk):
try:
tweet_obj = Tweet.objects.get(tweet_id=tweet.id_str)
logger.debug('Tweet %s::before - %s retweets, after - %s retweets' % (tweet_obj.tweet_id, tweet_obj.retweet_count, tweet.retweet_count))
tweet_obj.retweet_count = tweet.retweet_count
tweet_obj.save()
except Exception as e:
logger.error(e)
except Exception as e:
logger.error(e)
logger.debug('Finish count retweets!')
| bsd-2-clause | -7,017,367,773,884,610,000 | 40.601563 | 169 | 0.627793 | false |
rangermeier/flaskberry | flaskberry/models/disk.py | 1 | 3422 | # -*- coding: utf-8 -*-
import subprocess
import re
import os
import psutil
MOUNTPOINTS = ["/home/media/disks/usb%s" % i for i in range(8)]
class Disk(dict):
def __init__(self, **args):
self.mounted = False
if args.has_key("uuid"):
self.uuid = args["uuid"]
if self.uuid_exists():
self.get_device()
self.get_id()
if args.has_key("dev"):
self.dev = args["dev"]
self.get_id()
if args.has_key("partition"):
self.set_partition_info(args["partition"])
self.get_id();
def set_partition_info(self, info):
self.dev = info.device
self.mountpoint = info.mountpoint
self.type = info.fstype
self.options = info.opts
self.mounted = True
def get_usage(self):
if not self.is_mounted():
return
self.usage = psutil.disk_usage(self.mountpoint)
def get_id(self):
blkid = subprocess.check_output(["sudo", "blkid", "-p", self.dev])
#/dev/sdb1: LABEL="Kingston" UUID="1C86-3319" VERSION="FAT32" TYPE="vfat"
fields = ["label", "uuid", "version", "type"]
for field in fields:
regexp = '%s="(.+?)"' % field.upper()
parts = re.search(regexp, blkid)
if parts:
self[field] = parts.groups()[0]
def get_device(self):
if not self.has_key("dev"):
self.dev = subprocess.check_output(["sudo", "blkid", "-U", self.uuid]).rstrip()
return self.dev
def is_mounted(self):
if not self.has_key("mounted"):
df = subprocess.check_output(["df", "-hT", self.dev]).splitlines()[1]
if re.search("/dev$", df):
self.mounted = False
else:
self.mounted = True
return self.mounted
def is_mountable(self):
mountable = False;
if self.has_key("uuid") and self.has_key("type"):
if not self["type"].startswith("crypto_"):
if self["type"] != "swap":
mountable = True
return mountable
def uuid_exists(self):
return os.path.exists("/dev/disk/by-uuid/%s" % self.uuid)
def find_mountpoint(self):
# look for fstab entries
with open("/etc/fstab") as fstab:
regexp = re.compile("UUID=%s\s+?(/.*?)\s" % self.uuid)
for line in fstab.readlines():
match = regexp.match(line)
if match:
return match.groups()[0]
# try empty media directories
mi = iter(MOUNTPOINTS)
mountpoint = mi.next()
while os.path.exists(mountpoint) and not os.listdir(mountpoint) == []:
try:
mountpoint.next()
except StopIteration:
return
if not os.path.exists(mountpoint):
return None
return mountpoint
def mount(self):
mountpoint = self.find_mountpoint()
if mountpoint and not self.is_mounted() and self.uuid_exists():
subprocess.call(["sudo", "/bin/mount",
"/dev/disk/by-uuid/%s" % self.uuid, mountpoint])
self.mounted = True
return True
return False
def unmount(self):
if self.uuid_exists():
return subprocess.call(["sudo", "/bin/umount", "/dev/disk/by-uuid/%s" % self.uuid])
| mit | 7,954,179,522,211,220,000 | 32.54902 | 95 | 0.527177 | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_1_0_6/models/imagingobjectselection.py | 1 | 9601 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/ImagingObjectSelection) on 2016-06-23.
# 2016, SMART Health IT.
from . import domainresource
class ImagingObjectSelection(domainresource.DomainResource):
""" Key Object Selection.
A manifest of a set of DICOM Service-Object Pair Instances (SOP Instances).
The referenced SOP Instances (images or other content) are for a single
patient, and may be from one or more studies. The referenced SOP Instances
have been selected for a purpose, such as quality assurance, conference, or
consult. Reflecting that range of purposes, typical ImagingObjectSelection
resources may include all SOP Instances in a study (perhaps for sharing
through a Health Information Exchange); key images from multiple studies
(for reference by a referring or treating physician); a multi-frame
ultrasound instance ("cine" video clip) and a set of measurements taken
from that instance (for inclusion in a teaching file); and so on.
"""
resource_name = "ImagingObjectSelection"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.author = None
""" Author (human or machine).
Type `FHIRReference` referencing `Practitioner, Device, Organization, Patient, RelatedPerson` (represented as `dict` in JSON). """
self.authoringTime = None
""" Authoring time of the selection.
Type `FHIRDate` (represented as `str` in JSON). """
self.description = None
""" Description text.
Type `str`. """
self.patient = None
""" Patient of the selected objects.
Type `FHIRReference` referencing `Patient` (represented as `dict` in JSON). """
self.study = None
""" Study identity of the selected instances.
List of `ImagingObjectSelectionStudy` items (represented as `dict` in JSON). """
self.title = None
""" Reason for selection.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.uid = None
""" Instance UID.
Type `str`. """
super(ImagingObjectSelection, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImagingObjectSelection, self).elementProperties()
js.extend([
("author", "author", fhirreference.FHIRReference, False, None, False),
("authoringTime", "authoringTime", fhirdate.FHIRDate, False, None, False),
("description", "description", str, False, None, False),
("patient", "patient", fhirreference.FHIRReference, False, None, True),
("study", "study", ImagingObjectSelectionStudy, True, None, True),
("title", "title", codeableconcept.CodeableConcept, False, None, True),
("uid", "uid", str, False, None, True),
])
return js
from . import backboneelement
class ImagingObjectSelectionStudy(backboneelement.BackboneElement):
""" Study identity of the selected instances.
Study identity and locating information of the DICOM SOP instances in the
selection.
"""
resource_name = "ImagingObjectSelectionStudy"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.imagingStudy = None
""" Reference to ImagingStudy.
Type `FHIRReference` referencing `ImagingStudy` (represented as `dict` in JSON). """
self.series = None
""" Series identity of the selected instances.
List of `ImagingObjectSelectionStudySeries` items (represented as `dict` in JSON). """
self.uid = None
""" Study instance UID.
Type `str`. """
self.url = None
""" Retrieve study URL.
Type `str`. """
super(ImagingObjectSelectionStudy, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImagingObjectSelectionStudy, self).elementProperties()
js.extend([
("imagingStudy", "imagingStudy", fhirreference.FHIRReference, False, None, False),
("series", "series", ImagingObjectSelectionStudySeries, True, None, True),
("uid", "uid", str, False, None, True),
("url", "url", str, False, None, False),
])
return js
class ImagingObjectSelectionStudySeries(backboneelement.BackboneElement):
""" Series identity of the selected instances.
Series identity and locating information of the DICOM SOP instances in the
selection.
"""
resource_name = "ImagingObjectSelectionStudySeries"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.instance = None
""" The selected instance.
List of `ImagingObjectSelectionStudySeriesInstance` items (represented as `dict` in JSON). """
self.uid = None
""" Series instance UID.
Type `str`. """
self.url = None
""" Retrieve series URL.
Type `str`. """
super(ImagingObjectSelectionStudySeries, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImagingObjectSelectionStudySeries, self).elementProperties()
js.extend([
("instance", "instance", ImagingObjectSelectionStudySeriesInstance, True, None, True),
("uid", "uid", str, False, None, False),
("url", "url", str, False, None, False),
])
return js
class ImagingObjectSelectionStudySeriesInstance(backboneelement.BackboneElement):
""" The selected instance.
Identity and locating information of the selected DICOM SOP instances.
"""
resource_name = "ImagingObjectSelectionStudySeriesInstance"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.frames = None
""" The frame set.
List of `ImagingObjectSelectionStudySeriesInstanceFrames` items (represented as `dict` in JSON). """
self.sopClass = None
""" SOP class UID of instance.
Type `str`. """
self.uid = None
""" Selected instance UID.
Type `str`. """
self.url = None
""" Retrieve instance URL.
Type `str`. """
super(ImagingObjectSelectionStudySeriesInstance, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImagingObjectSelectionStudySeriesInstance, self).elementProperties()
js.extend([
("frames", "frames", ImagingObjectSelectionStudySeriesInstanceFrames, True, None, False),
("sopClass", "sopClass", str, False, None, True),
("uid", "uid", str, False, None, True),
("url", "url", str, False, None, True),
])
return js
class ImagingObjectSelectionStudySeriesInstanceFrames(backboneelement.BackboneElement):
""" The frame set.
Identity and location information of the frames in the selected instance.
"""
resource_name = "ImagingObjectSelectionStudySeriesInstanceFrames"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.frameNumbers = None
""" Frame numbers.
List of `int` items. """
self.url = None
""" Retrieve frame URL.
Type `str`. """
super(ImagingObjectSelectionStudySeriesInstanceFrames, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImagingObjectSelectionStudySeriesInstanceFrames, self).elementProperties()
js.extend([
("frameNumbers", "frameNumbers", int, True, None, True),
("url", "url", str, False, None, True),
])
return js
from . import codeableconcept
from . import fhirdate
from . import fhirreference
| bsd-3-clause | 9,194,539,053,608,248,000 | 37.404 | 138 | 0.629726 | false |
tochikuji/pyPyrTools | pyrtools/blurDn.py | 1 | 2593 | import numpy
from .namedFilter import namedFilter
from .corrDn import corrDn
def blurDn(*args):
''' RES = blurDn(IM, LEVELS, FILT)
Blur and downsample an image. The blurring is done with filter
kernel specified by FILT (default = 'binom5'), which can be a string
(to be passed to namedFilter), a vector (applied separably as a 1D
convolution kernel in X and Y), or a matrix (applied as a 2D
convolution kernel). The downsampling is always by 2 in each
direction.
The procedure is applied recursively LEVELS times (default=1).
Eero Simoncelli, 3/97. Ported to python by Rob Young 4/14
function res = blurDn(im, nlevs, filt) '''
if len(args) == 0:
print("Error: image input parameter required.")
return
im = numpy.array(args[0])
# optional args
if len(args) > 1:
nlevs = args[1]
else:
nlevs = 1
if len(args) > 2:
filt = args[2]
if isinstance(filt, str):
filt = namedFilter(filt)
else:
filt = namedFilter('binom5')
if filt.shape[0] == 1 or filt.shape[1] == 1:
filt = [x / sum(filt) for x in filt]
else:
filt = [x / sum(sum(filt)) for x in filt]
filt = numpy.array(filt)
if nlevs > 1:
im = blurDn(im, nlevs - 1, filt)
if nlevs >= 1:
if len(im.shape) == 1 or im.shape[0] == 1 or im.shape[1] == 1:
# 1D image
if len(filt.shape) > 1 and (filt.shape[1] != 1 and filt.shape[2] != 1):
# >1D filter
print('Error: Cannot apply 2D filter to 1D signal')
return
# orient filter and image correctly
if im.shape[0] == 1:
if len(filt.shape) == 1 or filt.shape[1] == 1:
filt = filt.T
else:
if filt.shape[0] == 1:
filt = filt.T
res = corrDn(image=im, filt=filt, step=(2, 2))
if len(im.shape) == 1 or im.shape[1] == 1:
res = numpy.reshape(res, (numpy.ceil(im.shape[0] / 2.0), 1))
else:
res = numpy.reshape(res, (1, numpy.ceil(im.shape[1] / 2.0)))
elif len(filt.shape) == 1 or filt.shape[0] == 1 or filt.shape[1] == 1:
# 2D image and 1D filter
res = corrDn(image=im, filt=filt.T, step=(2, 1))
res = corrDn(image=res, filt=filt, step=(1, 2))
else: # 2D image and 2D filter
res = corrDn(image=im, filt=filt, step=(2, 2))
else:
res = im
return res
| mit | 7,257,363,821,098,295,000 | 32.675325 | 83 | 0.528731 | false |
D4N/FSM_exercise_class | sheet_3/2d_plot.py | 1 | 2451 | #-*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
class simulation_output(object):
def __init__(self, filename):
self.__filename = str(filename)
self.get_data_from_file()
def get_data_from_file(self):
self.__data_count = sum(1 for line in open(self.__filename))
self.__time = np.zeros((self.__data_count))
self.__energy = np.zeros((self.__data_count))
with open(self.__filename, 'r') as data:
first_line = data.readline()
tmp = first_line.split(' ')
self.__object_count = int((len(tmp) - 2)/6)
self.__x = np.zeros((self.__object_count, self.__data_count))
self.__y = np.zeros((self.__object_count, self.__data_count))
self.__z = np.zeros((self.__object_count, self.__data_count))
with open(self.__filename, 'r') as data:
j = 0
for line in data:
tmp = line.split(' ')
self.__time[j] = float(tmp[0])
self.__energy[j] = float(tmp[1])
for i in xrange(self.__object_count):
self.__x[i,j] = float(tmp[2+6*i])
self.__y[i,j] = float(tmp[3+6*i])
self.__z[i,j] = float(tmp[4+6*i])
j += 1
def plot_data(self, plot_type = "xy"):
if not plot_type in ["xy", "yz", "xz", "xyz", "energy"]:
raise ValueError("Possible values for the plot_type are: xy, yz, xz, xyz and energy")
self.fig = plt.figure()
if plot_type == "xyz":
self.ax = self.fig.add_subplot(111, projection='3d')
else:
self.ax = self.fig.add_subplot(111)
if plot_type == "xy":
for i in xrange(self.__object_count):
self.ax.plot(self.__x[i], self.__y[i])
elif plot_type == "yz":
for i in xrange(self.__object_count):
self.ax.plot(self.__y[i], self.__z[i])
elif plot_type == "xz":
for i in xrange(self.__object_count):
self.ax.plot(self.__x[i], self.__z[i])
elif plot_type == "xyz":
for i in xrange(self.__object_count):
self.ax.plot(self.__x[i], self.__y[i], self.__z[i])
elif plot_type == "energy":
self.ax.plot(self.__time, self.__energy)
self.ax.set_xlabel(plot_type[0])
self.ax.set_ylabel(plot_type[1])
if plot_type == "xyz":
self.ax.set_zlabel("z")
elif plot_type == "energy":
self.ax.set_xlabel("t")
self.ax.set_ylabel(r"$E_{tot}$")
if not plot_type == "xyz":
plt.grid()
plt.show()
plt.close()
if __name__ == '__main__':
import sys
S = simulation_output(sys.argv[1])
S.plot_data(sys.argv[2])
| gpl-3.0 | -4,800,749,468,022,277,000 | 22.796117 | 88 | 0.598939 | false |
twilio/twilio-python | tests/integration/insights/v1/call/test_event.py | 1 | 7025 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class EventTestCase(IntegrationTestCase):
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.insights.v1.calls("CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.events.list()
self.holodeck.assert_has_request(Request(
'get',
'https://insights.twilio.com/v1/Voice/CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Events',
))
def test_read_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Events?PageSize=50&Page=0",
"previous_page_url": null,
"next_page_url": null,
"key": "events",
"url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Events?PageSize=50&Page=0"
},
"events": [
{
"timestamp": "2019-09-19T22:15:23Z",
"call_sid": "CA03a02b156c6faa96c86906f7e9ad0f38",
"account_sid": "AC998c10b68cbfda9f67277f7d8f4439c9",
"edge": "sdk_edge",
"group": "connection",
"name": "error",
"level": "ERROR",
"sdk_edge": {
"error": {
"code": 31600
},
"metadata": {
"client_name": "GTI9300323095d271b890c91568931321395",
"location": {
"lat": 37.4192,
"lon": -122.0574
},
"city": "Mountain View",
"country_code": "US",
"country_subdivision": "California",
"ip_address": "108.177.7.83",
"sdk": {
"type": "twilio-voice-android",
"version": "4.5.1",
"platform": "android",
"selected_region": "gll",
"os": {
"name": "android",
"version": "4.3"
},
"device": {
"model": "GT-I9300",
"type": "GT-I9300",
"vendor": "samsung",
"arch": "armeabi-v7a"
}
}
}
},
"client_edge": null,
"carrier_edge": null,
"sip_edge": null
}
]
}
'''
))
actual = self.client.insights.v1.calls("CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.events.list()
self.assertIsNotNone(actual)
def test_read_deep_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 10,
"page_size": 5,
"first_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Events?PageSize=5&Page=0",
"previous_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Events?PageSize=5&Page=9&PageToken=DP10",
"next_page_url": null,
"key": "events",
"url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Events?PageSize=5&Page=10"
},
"events": [
{
"timestamp": "2019-09-19T22:15:23Z",
"call_sid": "CA03a02b156c6faa96c86906f7e9ad0f38",
"account_sid": "AC998c10b68cbfda9f67277f7d8f4439c9",
"edge": "sdk_edge",
"group": "connection",
"name": "error",
"level": "ERROR",
"sdk_edge": {
"error": {
"code": 31600
},
"metadata": {
"client_name": "GTI9300323095d271b890c91568931321395",
"location": {
"lat": 37.4192,
"lon": -122.0574
},
"city": "Mountain View",
"country_code": "US",
"country_subdivision": "California",
"ip_address": "108.177.7.83",
"sdk": {
"type": "twilio-voice-android",
"version": "4.5.1",
"platform": "android",
"selected_region": "gll",
"os": {
"name": "android",
"version": "4.3"
},
"device": {
"model": "GT-I9300",
"type": "GT-I9300",
"vendor": "samsung",
"arch": "armeabi-v7a"
}
}
}
},
"client_edge": null,
"carrier_edge": null,
"sip_edge": null
}
]
}
'''
))
actual = self.client.insights.v1.calls("CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.events.list()
self.assertIsNotNone(actual)
| mit | -8,465,989,922,263,330,000 | 41.575758 | 155 | 0.34306 | false |
openfisca/legislation-ipp-to-code | ipp_tax_benefit_tables_to_openfisca_parameters.py | 1 | 24949 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <[email protected]>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Extract parameters from IPP's tax benefit tables.
Note: Currently this script requires an XLS version of the tables. XLSX file must be converted to XLS before use.
IPP = Institut des politiques publiques
http://www.ipp.eu/en/tools/ipp-tax-and-benefit-tables/
http://www.ipp.eu/fr/outils/baremes-ipp/
"""
import argparse
import collections
import datetime
import itertools
import logging
import os
import re
import sys
import textwrap
from biryani import baseconv, custom_conv, datetimeconv, states
from biryani import strings
import xlrd
app_name = os.path.splitext(os.path.basename(__file__))[0]
baremes = [
# u'Chomage',
# u'Impot Revenu',
# u'Marche du travail',
u'prelevements sociaux',
# u'Prestations',
# u'Taxation indirecte',
# u'Taxation du capital',
# u'Taxes locales',
]
conv = custom_conv(baseconv, datetimeconv, states)
forbiden_sheets = {
# u'Impot Revenu': (u'Barème IGR',),
u'prelevements sociaux': (
u'ASSIETTE PU',
u'AUBRYI',
# u'AUBRYII',
u'CNRACL',
u'FILLON',
),
# u'Taxation indirecte': (u'TVA par produit',),
}
french_date_re = re.compile(ur'(?P<day>0?[1-9]|[12]\d|3[01])/(?P<month>0?[1-9]|1[0-2])/(?P<year>[12]\d{3})$')
log = logging.getLogger(app_name)
N_ = lambda message: message
parameters = []
year_re = re.compile(ur'[12]\d{3}$')
def input_to_french_date(value, state = None):
if value is None:
return None, None
if state is None:
state = conv.default_state
match = french_date_re.match(value)
if match is None:
return value, state._(u'Invalid french date')
return datetime.date(int(match.group('year')), int(match.group('month')), int(match.group('day'))), None
cell_to_date = conv.condition(
conv.test_isinstance(int),
conv.pipe(
conv.test_between(1914, 2020),
conv.function(lambda year: datetime.date(year, 1, 1)),
),
conv.pipe(
conv.test_isinstance(basestring),
conv.first_match(
conv.pipe(
conv.test(lambda date: year_re.match(date), error = 'Not a valid year'),
conv.function(lambda year: datetime.date(year, 1, 1)),
),
input_to_french_date,
conv.iso8601_input_to_date,
),
),
)
# currency_converter = conv.first_match(
# conv.pipe(
# conv.test_isinstance(basestring),
# conv.cleanup_line,
# conv.test_none(),
# ),
# conv.pipe(
# conv.test_isinstance(tuple),
# conv.test(lambda couple: len(couple) == 2, error = N_(u"Invalid couple length")),
# conv.struct(
# (
# conv.pipe(
# conv.test_isinstance((float, int)),
# conv.not_none,
# ),
# conv.pipe(
# conv.test_isinstance(basestring),
# conv.test_in([
# u'%',
# u'EUR',
# u'FRF',
# ]),
# ),
# ),
# ),
# ),
# )
currency_or_number_converter = conv.first_match(
conv.test_isinstance(float),
conv.test_isinstance(int),
conv.pipe(
conv.test_isinstance(basestring),
conv.cleanup_line,
conv.test_none(),
),
conv.pipe(
conv.test_isinstance(tuple),
conv.test(lambda couple: len(couple) == 2, error = N_(u"Invalid couple length")),
conv.struct(
(
conv.pipe(
conv.test_isinstance((float, int)),
conv.not_none,
),
conv.pipe(
conv.test_isinstance(basestring),
conv.test_in([
u'%',
u'EUR',
u'FRF',
]),
),
),
),
),
)
def rename_keys(new_key_by_old_key):
def rename_keys_converter(value, state = None):
if value is None:
return value, None
renamed_value = value.__class__()
for item_key, item_value in value.iteritems():
renamed_value[new_key_by_old_key.get(item_key, item_key)] = item_value
return renamed_value, None
return rename_keys_converter
values_row_converter = conv.pipe(
rename_keys({
u"Date d'effet": u"Date d'entrée en vigueur",
u"Note": u"Notes",
u"Publication au JO": u"Parution au JO",
u"Publication JO": u"Parution au JO",
u"Publication JO": u"Parution au JO",
u"Référence": u"Références législatives",
u"Référence législative": u"Références législatives",
u"Références législatives (taux d'appel)": u"Références législatives",
u"Références législatives (taux de cotisation)": u"Références législatives",
u"Références législatives ou BOI": u"Références législatives",
u"Remarques": u"Notes",
}),
conv.struct(
collections.OrderedDict((
(u"Date d'entrée en vigueur", conv.pipe(
conv.test_isinstance(basestring),
conv.iso8601_input_to_date,
conv.not_none,
)),
(u"Références législatives", conv.pipe(
conv.test_isinstance(basestring),
conv.cleanup_line,
)),
(u"Parution au JO", conv.pipe(
conv.test_isinstance(basestring),
conv.iso8601_input_to_date,
conv.date_to_iso8601_str,
)),
(u"Notes", conv.pipe(
conv.test_isinstance(basestring),
conv.cleanup_line,
)),
(None, conv.pipe(
conv.test_isinstance(basestring),
conv.cleanup_line,
conv.test_none(),
)),
)),
default = currency_or_number_converter,
),
)
def escape_xml(value):
if value is None:
return value
if isinstance(value, str):
return value.decode('utf-8')
if not isinstance(value, unicode):
value = unicode(value)
return value.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"')
def get_hyperlink(sheet, row_index, column_index):
return sheet.hyperlink_map.get((row_index, column_index))
def get_unmerged_cell_coordinates(row_index, column_index, merged_cells_tree):
unmerged_cell_coordinates = merged_cells_tree.get(row_index, {}).get(column_index)
if unmerged_cell_coordinates is None:
return row_index, column_index
return unmerged_cell_coordinates
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dir', default = 'Baremes_IPP_2015', help = 'path of IPP XLS directory')
parser.add_argument('-v', '--verbose', action = 'store_true', default = False, help = "increase output verbosity")
args = parser.parse_args()
# args.dir = path
logging.basicConfig(level = logging.DEBUG if args.verbose else logging.WARNING, stream = sys.stdout)
root_node = dict(
children = [],
name = "root",
text = textwrap.dedent(u"""\
Ce document présente l'ensemble de la législation permettant le calcul des contributions sociales, taxes sur
les salaires et cotisations sociales. Il s'agit des barèmes bruts de la législation utilisés dans le
micro-simulateur de l'IPP, TAXIPP. Les sources législatives (texte de loi, numéro du décret ou arrêté) ainsi
que la date de publication au Journal Officiel de la République française (JORF) sont systématiquement
indiquées. La première ligne du fichier (masquée) indique le nom des paramètres dans TAXIPP.
Citer cette source :
Barèmes IPP: prélèvements sociaux, Institut des politiques publiques, avril 2014.
Auteurs :
Antoine Bozio, Julien Grenet, Malka Guillot, Laura Khoury et Marianne Tenand
Contacts :
[email protected]; [email protected]; [email protected]
Licence :
Licence ouverte / Open Licence
""").split(u'\n'),
title = u"Barème IPP",
type = u'NODE',
)
for bareme in baremes:
xls_path = os.path.join(args.dir.decode('utf-8'), u"Baremes IPP - {0}.xls".format(bareme))
if not os.path.exists(xls_path):
log.warning("Skipping file {} that doesn't exist: {}".format(bareme, xls_path))
continue
log.info(u'Parsing file {}'.format(bareme))
book = xlrd.open_workbook(filename = xls_path, formatting_info = True)
sheet_names = [
sheet_name
for sheet_name in book.sheet_names()
if not sheet_name.startswith((u'Abréviations', u'Outline')) and sheet_name not in forbiden_sheets.get(
bareme, [])
]
sheet_title_by_name = {}
for sheet_name in sheet_names:
log.info(u' Parsing sheet {}'.format(sheet_name))
sheet = book.sheet_by_name(sheet_name)
# Extract coordinates of merged cells.
merged_cells_tree = {}
for row_low, row_high, column_low, column_high in sheet.merged_cells:
for row_index in range(row_low, row_high):
cell_coordinates_by_merged_column_index = merged_cells_tree.setdefault(
row_index, {})
for column_index in range(column_low, column_high):
cell_coordinates_by_merged_column_index[column_index] = (row_low, column_low)
if sheet_name.startswith(u'Sommaire'):
# Associate the titles of the sheets to their Excel names.
for row_index in range(sheet.nrows):
linked_sheet_number = transform_xls_cell_to_json(book, sheet, merged_cells_tree, row_index, 2)
if isinstance(linked_sheet_number, int):
linked_sheet_title = transform_xls_cell_to_str(book, sheet, merged_cells_tree, row_index, 3)
if linked_sheet_title is not None:
hyperlink = get_hyperlink(sheet, row_index, 3)
if hyperlink is not None and hyperlink.type == u'workbook':
linked_sheet_name = hyperlink.textmark.split(u'!', 1)[0].strip(u'"').strip(u"'")
sheet_title_by_name[linked_sheet_name] = linked_sheet_title
continue
descriptions_rows = []
labels_rows = []
notes_rows = []
state = 'taxipp_names'
taxipp_names_row = None
values_rows = []
for row_index in range(sheet.nrows):
columns_count = len(sheet.row_values(row_index))
if state == 'taxipp_names':
taxipp_names_row = [
taxipp_name
for taxipp_name in (
transform_xls_cell_to_str(book, sheet, merged_cells_tree, row_index, column_index)
for column_index in range(columns_count)
)
]
state = 'labels'
continue
if state == 'labels':
first_cell_value = transform_xls_cell_to_json(book, sheet, merged_cells_tree, row_index, 0)
date_or_year, error = conv.pipe(
conv.test_isinstance((int, basestring)),
cell_to_date,
conv.not_none,
)(first_cell_value, state = conv.default_state)
if error is not None:
# First cell of row is not a date => Assume it is a label.
labels_rows.append([
transform_xls_cell_to_str(book, sheet, merged_cells_tree, row_index, column_index)
for column_index in range(columns_count)
])
continue
state = 'values'
if state == 'values':
first_cell_value = transform_xls_cell_to_json(book, sheet, merged_cells_tree, row_index, 0)
if first_cell_value is None or isinstance(first_cell_value, (int, basestring)):
date_or_year, error = cell_to_date(first_cell_value, state = conv.default_state)
if error is None:
# First cell of row is a valid date or year.
values_row = [
transform_xls_cell_to_json(book, sheet, merged_cells_tree, row_index, column_index)
for column_index in range(columns_count)
]
if date_or_year is not None:
assert date_or_year.year < 2601, 'Invalid date {} in {} at row {}'.format(date_or_year,
sheet_name, row_index + 1)
values_rows.append(values_row)
continue
if all(value in (None, u'') for value in values_row):
# If first cell is empty and all other cells in line are also empty, ignore this line.
continue
# First cell has no date and other cells in row are not empty => Assume it is a note.
state = 'notes'
if state == 'notes':
first_cell_value = transform_xls_cell_to_json(book, sheet, merged_cells_tree, row_index, 0)
if isinstance(first_cell_value, basestring) and first_cell_value.strip().lower() == 'notes':
notes_rows.append([
transform_xls_cell_to_str(book, sheet, merged_cells_tree, row_index, column_index)
for column_index in range(columns_count)
])
continue
state = 'description'
assert state == 'description'
descriptions_rows.append([
transform_xls_cell_to_str(book, sheet, merged_cells_tree, row_index, column_index)
for column_index in range(columns_count)
])
text_lines = []
for row in notes_rows:
text_lines.append(u' | '.join(
cell for cell in row
if cell
))
if text_lines:
text_lines.append(None)
for row in descriptions_rows:
text_lines.append(u' | '.join(
cell for cell in row
if cell
))
sheet_title = sheet_title_by_name.get(sheet_name)
if sheet_title is None:
log.warning(u"Missing title for sheet {} in summary".format(sheet_name))
continue
labels = []
for labels_row in labels_rows:
for column_index, label in enumerate(labels_row):
if not label:
continue
while column_index >= len(labels):
labels.append([])
labels_column = labels[column_index]
if not labels_column or labels_column[-1] != label:
labels_column.append(label)
labels = [
tuple(labels_column1) if len(labels_column1) > 1 else labels_column1[0]
for labels_column1 in labels
]
cell_by_label_rows = []
for value_row in values_rows:
cell_by_label = collections.OrderedDict(itertools.izip(labels, value_row))
cell_by_label, errors = values_row_converter(cell_by_label, state = conv.default_state)
assert errors is None, "Errors in {}:\n{}".format(cell_by_label, errors)
cell_by_label_rows.append(cell_by_label)
sheet_node = dict(
children = [],
name = strings.slugify(sheet_name, separator = u'_'),
text = text_lines,
title = sheet_title,
type = u'NODE',
)
root_node['children'].append(sheet_node)
for taxipp_name, labels_column in zip(taxipp_names_row, labels):
if not taxipp_name or taxipp_name in (u'date',):
continue
variable_node = dict(
children = [],
name = strings.slugify(taxipp_name, separator = u'_'),
title = u' - '.join(labels_column) if isinstance(labels_column, tuple) else labels_column,
type = u'CODE',
)
sheet_node['children'].append(variable_node)
for cell_by_label in cell_by_label_rows:
amount_and_unit = cell_by_label[labels_column]
variable_node['children'].append(dict(
law_reference = cell_by_label[u'Références législatives'],
notes = cell_by_label[u'Notes'],
publication_date = cell_by_label[u"Parution au JO"],
start_date = cell_by_label[u"Date d'entrée en vigueur"],
type = u'VALUE',
unit = amount_and_unit[1] if isinstance(amount_and_unit, tuple) else None,
value = amount_and_unit[0] if isinstance(amount_and_unit, tuple) else amount_and_unit,
))
# dates = [
# conv.check(cell_to_date)(
# row[1] if bareme == u'Impot Revenu' else row[0],
# state = conv.default_state,
# )
# for row in values_rows
# ]
# for column_index, taxipp_name in enumerate(taxipp_names_row):
# if taxipp_name and strings.slugify(taxipp_name) not in (
# 'date',
# 'date-ir',
# 'date-rev',
# 'note',
# 'notes',
# 'ref-leg',
# ):
# vector = [
# transform_cell_value(date, row[column_index])
# for date, row in zip(dates, values_rows)
# ]
# vector = [
# cell if not isinstance(cell, basestring) or cell == u'nc' else '-'
# for cell in vector
# ]
# # vector_by_taxipp_name[taxipp_name] = pd.Series(vector, index = dates)
# vector_by_taxipp_name[taxipp_name] = vector
#
print_node(root_node)
return 0
def print_node(node, indent = 0):
attributes = node.copy()
children = attributes.pop('children', None)
text = attributes.pop('text', None)
if text:
while text and not (text[0] and text[0].strip()):
del text[0]
while text and not (text[-1] and text[-1].strip()):
del text[-1]
type = attributes.pop('type')
print u'{}<{}{}{}>'.format(
u' ' * indent,
type,
u''.join(
u' {}="{}"'.format(name, escape_xml(value))
for name, value in sorted(attributes.iteritems())
if value is not None
),
u'' if children or text else u'/',
).encode('utf-8')
if text:
for line in text:
if line and line.strip():
print u'{}{}'.format(u' ' * (indent + 1), escape_xml(line)).encode('utf-8')
else:
print
if children or text:
for child in children:
print_node(child, indent = indent + 1)
print u'{}</{}>'.format(u' ' * indent, type).encode('utf-8')
def transform_cell_value(date, cell_value):
if isinstance(cell_value, tuple):
value, currency = cell_value
if currency == u'FRF':
if date < datetime.date(1960, 1, 1):
return round(value / (100 * 6.55957), 2)
return round(value / 6.55957, 2)
return value
return cell_value
def transform_xls_cell_to_json(book, sheet, merged_cells_tree, row_index, column_index):
"""Convert an XLS cell (type & value) to an unicode string.
Code taken from http://code.activestate.com/recipes/546518-simple-conversion-of-excel-files-into-csv-and-yaml/
Type Codes:
EMPTY 0
TEXT 1 a Unicode string
NUMBER 2 float
DATE 3 float
BOOLEAN 4 int; 1 means TRUE, 0 means FALSE
ERROR 5
"""
unmerged_cell_coordinates = merged_cells_tree.get(row_index, {}).get(column_index)
if unmerged_cell_coordinates is None:
unmerged_row_index = row_index
unmerged_column_index = column_index
else:
unmerged_row_index, unmerged_column_index = unmerged_cell_coordinates
type = sheet.row_types(unmerged_row_index)[unmerged_column_index]
value = sheet.row_values(unmerged_row_index)[unmerged_column_index]
if type == 0:
value = None
elif type == 1:
if not value:
value = None
elif type == 2:
# NUMBER
value_int = int(value)
if value_int == value:
value = value_int
xf_index = sheet.cell_xf_index(row_index, column_index)
xf = book.xf_list[xf_index] # Get an XF object.
format_key = xf.format_key
format = book.format_map[format_key] # Get a Format object.
format_str = format.format_str # This is the "number format string".
if format_str in (
u'0',
u'General',
u'GENERAL',
u'_-* #,##0\ _€_-;\-* #,##0\ _€_-;_-* \-??\ _€_-;_-@_-',
) or format_str.endswith(u'0.00'):
return value
if u'€' in format_str:
return (value, u'EUR')
if u'FRF' in format_str or ur'\F\R\F' in format_str:
return (value, u'FRF')
assert format_str.endswith(u'%'), 'Unexpected format "{}" for value: {}'.format(format_str, value)
return (value, u'%')
elif type == 3:
# DATE
y, m, d, hh, mm, ss = xlrd.xldate_as_tuple(value, book.datemode)
date = u'{0:04d}-{1:02d}-{2:02d}'.format(y, m, d) if any(n != 0 for n in (y, m, d)) else None
value = u'T'.join(
fragment
for fragment in (
date,
(u'{0:02d}:{1:02d}:{2:02d}'.format(hh, mm, ss)
if any(n != 0 for n in (hh, mm, ss)) or date is None
else None),
)
if fragment is not None
)
elif type == 4:
value = bool(value)
elif type == 5:
# ERROR
value = xlrd.error_text_from_code[value]
# elif type == 6:
# TODO
# else:
# assert False, str((type, value))
return value
def transform_xls_cell_to_str(book, sheet, merged_cells_tree, row_index, column_index):
cell = transform_xls_cell_to_json(book, sheet, merged_cells_tree, row_index, column_index)
assert cell is None or isinstance(cell, basestring), u'Expected a string. Got: {}'.format(cell).encode('utf-8')
return cell
if __name__ == "__main__":
sys.exit(main())
| agpl-3.0 | 6,170,964,241,538,797,000 | 38.871795 | 120 | 0.521463 | false |
vecnet/om | website/apps/ts_om/views/ScenarioListView.py | 1 | 1416 | # -*- coding: utf-8 -*-
#
# This file is part of the VecNet OpenMalaria Portal.
# For copyright and licensing information about this package, see the
# NOTICE.txt and LICENSE.txt files in its top-level directory; they are
# available at https://github.com/vecnet/om
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License (MPL), version 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.generic import ListView
from website.apps.ts_om.models import Scenario as ScenarioModel
class ScenarioListView(ListView):
template_name = 'ts_om/list.html'
paginate_by = 10
model = ScenarioModel
# ensure_csrf_cookie is to send CSRF cookie with this view - to ensure that DeleteView is working properly
@method_decorator(ensure_csrf_cookie)
def dispatch(self, request, *args, **kwargs):
return super(ScenarioListView, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
scenarios = ScenarioModel.objects.filter(user=self.request.user, deleted=False).order_by('-last_modified')
return scenarios
def get_context_data(self, **kwargs):
context = super(ScenarioListView, self).get_context_data(**kwargs)
return context
| mpl-2.0 | 7,419,997,059,402,621,000 | 38.333333 | 114 | 0.731638 | false |
FilipeMaia/h5proxy | h5proxy/serializer.py | 1 | 5488 | import numpy
import h5py
import cPickle as pickle
class Serializer(object):
def __init__(self, parent, socket = None):
self._parent = parent
self._socket = socket
if(socket):
import threading
self.lock = threading.Lock()
else:
# Use an internal server is there's no socket
self._server = Server(None)
def call(self, data):
if(self._socket):
with self.lock:
self.send(data)
return self.recv()
else:
if(data['func'] == 'attrs'):
ret, _ = self._serialize(self._server.handleRPC(data),[],data['fileName'],data['path'])
return self._deserialize(ret)
else:
ret, _ = self._serialize(self._server.handleRPC(data),[],None,None)
return self._deserialize(ret)
def recv(self):
data = pickle.loads(self._socket.recv())
ret = self._deserialize(data)
return ret
def _deserialize(self, data):
if(isinstance(data, dict)):
if('className' in data):
if(data['className'] == "Dataset"):
data = Dataset(self._parent, data['fileName'], data['path'])
elif(data['className'] == "Group"):
data = Group(self._parent, data['fileName'], data['path'])
elif(data['className'] == "Attributes"):
data = Attributes(self._parent, data['fileName'], data['path'])
elif(data['className'] == "SoftLink"):
data = h5py.SoftLink(data['path'])
elif(data['className'] == "ExternalLink"):
data = h5py.ExternalLink(data['fileName'],data['path'])
elif(data['className'] == "exception"):
exc_type = data['exc_type']
exc_value = data['exc_value']
raise exc_type(exc_value)
elif(data['className'] == "ndarray" and self._socket):
d = self._socket.recv()
data = numpy.frombuffer(buffer(d), dtype=data['dtype']).reshape(data['shape'])
elif(data['className'] == "File"):
pass
else:
raise RuntimeError('Unknown class: %s' % data['className'])
else:
# We need to sort to be able to receive any possible arrays
# in the correct order
for k in sorted(data.keys()):
data[k] = self._deserialize(data[k])
elif isinstance(data, list) or isinstance(data, tuple):
ldata = [None]*len(data)
for i in range(len(data)):
ldata[i] = self._deserialize(data[i])
data = type(data)(ldata)
return data
def send(self,data, fileName = None, path = None):
data, arrays = self._serialize(data, [], fileName, path)
flags = 0
if(len(arrays)):
import zmq
flags = zmq.SNDMORE
self._socket.send(pickle.dumps(data), flags)
for i in range(len(arrays)):
# When sending the last array change the flag back
if(i == len(arrays) -1):
flags = 0
self._socket.send(arrays[i], flags)
def _serialize(self, data, arrays, fileName, path):
if type(data) is h5py.Dataset:
data = dict(
className = "Dataset",
fileName = data.file.filename,
path = data.name
)
elif type(data) is h5py.Group:
data = dict(
className = "Group",
fileName = data.file.filename,
path = data.name
)
elif type(data) is h5py.AttributeManager:
data = dict(
className = "Attributes",
fileName = fileName,
path = path,
)
elif type(data) is h5py.File:
data = dict(
className = "File",
fileName = data.file.filename,
path = ''
)
elif type(data) is h5proxy.ExternalLink:
data = dict(
className = "ExternalLink",
fileName = data.filename,
path = data.path
)
elif type(data) is h5proxy.SoftLink:
data = dict(
className = "SoftLink",
path = data.path
)
elif isinstance(data, numpy.ndarray) and self._socket:
arrays.append(data)
data = dict(
className = "ndarray",
dtype = data.dtype,
shape = data.shape
)
elif isinstance(data, dict):
# We need to sort to be able to receive any possible arrays
# in the correct order
for k in sorted(data.keys()):
data[k], arrays = self._serialize(data[k], arrays, fileName, path)
elif isinstance(data, list) or isinstance(data, tuple):
ldata = [None]*len(data)
for i in range(len(data)):
ldata[i], arrays = self._serialize(data[i], arrays, fileName, path)
data = type(data)(ldata)
return data, arrays
from .h5proxy import Dataset,Group,File,Attributes, SoftLink, ExternalLink
import h5proxy
from .server import Server
| bsd-2-clause | -8,297,233,515,295,991,000 | 36.589041 | 103 | 0.493258 | false |
rssalessio/PythonVRFT | test/test_vrft.py | 1 | 3518 | # test_vrft.py - Unittest for VRFT
#
# Code author: [Alessio Russo - [email protected]]
# Last update: 10th January 2021, by [email protected]
#
# Copyright (c) [2017-2021] Alessio Russo [[email protected]]. All rights reserved.
# This file is part of PythonVRFT.
# PythonVRFT is free software: you can redistribute it and/or modify
# it under the terms of the MIT License. You should have received a copy of
# the MIT License along with PythonVRFT.
# If not, see <https://opensource.org/licenses/MIT>.
#
from unittest import TestCase
import numpy as np
import scipy.signal as scipysig
from vrft.iddata import *
from vrft.vrft_algo import *
from vrft.extended_tf import ExtendedTF
class TestVRFT(TestCase):
def test_vrft(self):
t_start = 0
t_step = 1e-2
t_ends = [10, 10 + t_step]
expected_theta = np.array([1.93220784, -1.05808206, 1.26623764, 0.0088772])
expected_loss = 0.00064687904235295
for t_end in t_ends:
t = np.arange(t_start, t_end, t_step)
u = np.ones(len(t)).tolist()
num = [0.1]
den = [1, -0.9]
sys = scipysig.TransferFunction(num, den, dt=t_step)
t, y = scipysig.dlsim(sys, u, t)
y = y[:,0]
data = iddata(y,u,t_step,[0])
refModel = ExtendedTF([0.2], [1, -0.8], dt=t_step)
prefilter = refModel * (1-refModel)
control = [ExtendedTF([1], [1,0], dt=t_step),
ExtendedTF([1], [1,0,0], dt=t_step),
ExtendedTF([1], [1,0,0,0], dt=t_step),
ExtendedTF([1, 0], [1,1], dt=t_step)]
theta1, _, loss1, _ = compute_vrft(data, refModel, control, prefilter)
theta2, _, loss2, _ = compute_vrft([data], refModel, control, prefilter)
theta3, _, loss3, _ = compute_vrft([data, data], refModel, control, prefilter)
self.assertTrue(np.isclose(loss1, loss2))
self.assertTrue(np.isclose(loss1, loss3))
self.assertTrue(np.linalg.norm(theta1-theta2)<1e-15)
self.assertTrue(np.linalg.norm(theta1-theta3)<1e-15)
self.assertTrue(np.linalg.norm(theta1-expected_theta, np.infty) < 1e-5)
self.assertTrue(abs(expected_loss - loss1) < 1e-5)
def test_iv(self):
t_start = 0
t_step = 1e-2
t_ends = [10, 10 + t_step]
for t_end in t_ends:
t = np.arange(t_start, t_end, t_step)
u = np.ones(len(t)).tolist()
num = [0.1]
den = [1, -0.9]
sys = scipysig.TransferFunction(num, den, dt=t_step)
_, y = scipysig.dlsim(sys, u, t)
y = y.flatten() + 1e-2 * np.random.normal(size=t.size)
data1 = iddata(y,u,t_step,[0])
_, y = scipysig.dlsim(sys, u, t)
y = y.flatten() + 1e-2 * np.random.normal(size=t.size)
data2 = iddata(y,u,t_step,[0])
refModel = ExtendedTF([0.2], [1, -0.8], dt=t_step)
prefilter = refModel * (1-refModel)
control = [ExtendedTF([1], [1,0], dt=t_step),
ExtendedTF([1], [1,0,0], dt=t_step),
ExtendedTF([1], [1,0,0,0], dt=t_step),
ExtendedTF([1, 0], [1,1], dt=t_step)]
with self.assertRaises(ValueError):
compute_vrft(data1, refModel, control, prefilter, iv=True)
compute_vrft([data1, data2], refModel, control, prefilter, iv=True)
| gpl-3.0 | -6,444,991,884,711,760,000 | 36.425532 | 90 | 0.545765 | false |
vmonaco/single-hashing | single_hash.py | 1 | 2647 | '''
Created on Nov 20, 2012
@author: vinnie
'''
from utils import *
def in1d_running(q, A):
'''
j where q[k] in A for 0 <= k <= j
This is the maximum index j where q[0:j] is in A
'''
j = 0
while j < len(q) and q[j] in A:
j += 1
return j
def s_A(Q, A):
'''
s(A) = {(i,j) | q[i,k] in A for 0 <= k <= j}
The set of all coordinates where Q[i,0:k] is in A for 0 <= k <= j,
where j is defined by the ind1d_running function above
'''
return [(i, k) for i in A for k in range(in1d_running(Q[i], A))]
def P(Q, A, m):
'''
Given the single hashing scheme defined by matrix Q,
compute the probably that the first |A| slots are occupied by the
slots in A
'''
if len(A) == 0:
return 0
elif len(A) == 1:
return 1.0 / m
else:
return (1.0 / m) * sum([P(Q, tuple(a for a in A if a != Q[i][j]), m)
for (i, j) in s_A(Q, A)])
def P_map(Q):
'''
Compute P(A) for each n-combination in [0,1,2...m) for 0 <= n < m
Also compute P( [0,1,2...m] ). Only one combination is needed, this should
always be equal to 1.0
'''
m = len(Q)
m_range = range(m)
p = {A: P(Q, A, m) for A in generate_A(m_range)}
return p
def delta_prime(Q):
'''
The average number of spaces probed for each insertion by the time
the table is full. This is the best measure for the efficiency of a
single hashing scheme
'''
m = len(Q)
m_range = [row[0] for row in Q]
set_A = generate_A(m_range)
return (1.0 / (m ** 2)) * sum(P(Q, A, m) * len(s_A(Q, A)) for A in set_A)
def d_prime(Q, n):
'''
The average number of probes needed to insert the nth element
into a table with single hashing scheme Q
'''
m = len(Q)
m_range = [row[0] for row in Q]
assert n <= m
set_A = [A for A in generate_A(m_range) if len(A) == n - 1]
return (1.0 / m) * sum(P(Q, A, m) * len(s_A(Q, A)) for A in set_A)
def search_random(m, N):
from operator import itemgetter
import matplotlib.pyplot as plt
import random
random.seed(1234)
score_Q = [(delta_prime(Q), Q) for Q in [random_Q(m) for _ in range(N)]]
min_score, min_Q = min(score_Q, key=itemgetter(0))
max_score, max_Q = max(score_Q, key=itemgetter(0))
print('Best score:', min_score, min_Q)
print('Worst score:', max_score, max_Q)
plt.hist(list(zip(*score_Q))[0], bins=100, normed=True)
plt.xlabel('Probes per insertion')
plt.ylabel('Density')
plt.savefig('m%d_scores.png' % m)
return
if __name__ == '__main__':
search_random(5, 10000)
| mit | -4,057,426,931,684,197,400 | 24.451923 | 79 | 0.553457 | false |
nudomarinero/mltier1 | test/test_extinction.py | 1 | 3308 | """
Test the extinction module
"""
from __future__ import print_function
import sys
import os
import unittest
import numpy as np
import requests.exceptions
from astropy import units as u
import numpy.testing as npt
# Append the module to test
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
from extinction import (query, f99_extinction, get_filter_extinction,
FILTER_URIS)
# Expected data
response_gal_0_0 = {'EBV_SFD': 99.69757}
response_equ_0_0 = {'EBV_SFD': 0.03182}
response_equ_array = {'EBV_SFD': [0.03182, 0.03301]}
class TestQueryExtinction(unittest.TestCase):
""" Test the query of extiction data
"""
def test_query_position_gal_0_0(self):
self.assertEqual(query(0, 0, coordsys="gal"), response_gal_0_0)
def test_query_position_equ_0_0(self):
self.assertEqual(query(0, 0), response_equ_0_0)
def test_query_equ_array(self):
self.assertEqual(query([0, 1], [0, 1]), response_equ_array)
def test_query_equ_out_limits(self):
with self.assertRaises(requests.exceptions.HTTPError):
query(100, 380, verbose=False)
def test_query_out_of_size(self):
#with self.assertRaises(requests.exceptions.HTTPError):
#print(query(list(np.zeros(50000)), list(np.zeros(50000))))
pass
class TestExtinctionCurve(unittest.TestCase):
""" Test the computing of the extinction curve from Fitzpatrick 99
"""
def test_fir_wavelenghts(self):
self.assertEqual(f99_extinction(500*u.micron), [0.0010772042713472958])
def test_normal_wavelenghts(self):
self.assertEqual(f99_extinction(1*u.micron), [1.16611075588672])
def test_normal_wavelenghts_change_units(self):
npt.assert_array_max_ulp(f99_extinction(10000*u.Angstrom), np.array(1.16611075588672), dtype="float32")
def test_normal_wavelenghts_array(self):
npt.assert_array_max_ulp(f99_extinction([1, 1]*u.micron), np.array([1.16611075588672, 1.16611075588672]), dtype="float32")
class TestFilterExtinction(unittest.TestCase):
""" Test the retrieval and computing of the extinction associated to
the main filters used.
"""
def test_PanSTARRS_g(self):
self.assertEqual(get_filter_extinction(FILTER_URIS["g"]), 3.6121011749827514)
def test_PanSTARRS_r(self):
self.assertEqual(get_filter_extinction(FILTER_URIS["r"]), 2.5687511251039137)
def test_PanSTARRS_i(self):
self.assertEqual(get_filter_extinction(FILTER_URIS["i"]), 1.897167710862949)
def test_PanSTARRS_z(self):
self.assertEqual(get_filter_extinction(FILTER_URIS["z"]), 1.4948335405125801)
def test_PanSTARRS_y(self):
self.assertEqual(get_filter_extinction(FILTER_URIS["y"]), 1.2478667172854474)
def test_WISE_W1(self):
self.assertEqual(get_filter_extinction(FILTER_URIS["W1"]), 0.19562893570345422)
def test_WISE_W2(self):
self.assertEqual(get_filter_extinction(FILTER_URIS["W2"]), 0.13438419437135862)
def test_WISE_W3(self):
self.assertEqual(get_filter_extinction(FILTER_URIS["W3"]), 0.046003159224496736)
def test_WISE_W4(self):
self.assertEqual(get_filter_extinction(FILTER_URIS["W4"]), 0.024851094687942197)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -3,343,834,670,868,604,000 | 33.458333 | 130 | 0.682285 | false |
sehughes/django-treebeard | treebeard/tests.py | 1 | 66184 | # -*- coding: utf-8 -*-
"""
treebeard.tests
---------------
Unit tests.
:copyright: 2008 by Gustavo Picon
:license: Apache License 2.0
"""
import functools
import os
from django.test import TestCase
from django.db import models, transaction
from django.contrib.auth.models import User, AnonymousUser
from django.db.models import Q
from django.conf import settings
from treebeard import numconv
from treebeard.exceptions import InvalidPosition, InvalidMoveToDescendant, \
PathOverflow, MissingNodeOrderBy
from treebeard.mp_tree import MP_Node
from treebeard.al_tree import AL_Node
from treebeard.ns_tree import NS_Node
# ghetto app detection, there is probably some introspection method,
# but meh, this works
HAS_DJANGO_AUTH = 'django.contrib.auth' in settings.INSTALLED_APPS
BASE_DATA = [
{'data':{'desc':'1'}},
{'data':{'desc':'2'}, 'children':[
{'data':{'desc':'21'}},
{'data':{'desc':'22'}},
{'data':{'desc':'23'}, 'children':[
{'data':{'desc':'231'}},
]},
{'data':{'desc':'24'}},
]},
{'data':{'desc':'3'}},
{'data':{'desc':'4'}, 'children':[
{'data':{'desc':'41'}},
]},
]
class MP_TestNode(MP_Node):
steplen = 3
desc = models.CharField(max_length=255)
class MP_TestNodeSomeDep(models.Model):
node = models.ForeignKey(MP_TestNode)
class NS_TestNode(NS_Node):
desc = models.CharField(max_length=255)
class NS_TestNodeSomeDep(models.Model):
node = models.ForeignKey(NS_TestNode)
class AL_TestNode(AL_Node):
parent = models.ForeignKey('self',
related_name='children_set',
null=True,
db_index=True)
sib_order = models.PositiveIntegerField()
desc = models.CharField(max_length=255)
class AL_TestNodeSomeDep(models.Model):
node = models.ForeignKey(AL_TestNode)
class MP_TestNodeSorted(MP_Node):
steplen = 1
node_order_by = ['val1', 'val2', 'desc']
val1 = models.IntegerField()
val2 = models.IntegerField()
desc = models.CharField(max_length=255)
class NS_TestNodeSorted(NS_Node):
node_order_by = ['val1', 'val2', 'desc']
val1 = models.IntegerField()
val2 = models.IntegerField()
desc = models.CharField(max_length=255)
class AL_TestNodeSorted(AL_Node):
parent = models.ForeignKey('self',
related_name='children_set',
null=True,
db_index=True)
node_order_by = ['val1', 'val2', 'desc']
val1 = models.IntegerField()
val2 = models.IntegerField()
desc = models.CharField(max_length=255)
class MP_TestNodeAlphabet(MP_Node):
steplen = 2
numval = models.IntegerField()
class MP_TestNodeSmallStep(MP_Node):
steplen = 1
alphabet = '0123456789'
class MP_TestNodeSortedAutoNow(MP_Node):
desc = models.CharField(max_length=255)
created = models.DateTimeField(auto_now_add=True)
node_order_by = ['created']
class MP_TestNodeShortPath(MP_Node):
steplen = 1
alphabet = '01234'
desc = models.CharField(max_length=255)
# This is how you change the default fields defined in a Django abstract class
# (in this case, MP_Node), since Django doesn't allow overriding fields, only
# mehods and attributes
MP_TestNodeShortPath._meta.get_field('path').max_length = 4
class MP_TestSortedNodeShortPath(MP_Node):
steplen = 1
alphabet = '01234'
desc = models.CharField(max_length=255)
node_order_by = ['desc']
MP_TestSortedNodeShortPath._meta.get_field('path').max_length = 4
if HAS_DJANGO_AUTH:
class MP_TestIssue14(MP_Node):
name = models.CharField(max_length=255)
users = models.ManyToManyField(User)
def multi_test():
def decorator(f):
@functools.wraps(f)
def _multi_test(self):
try:
try:
self.set_MP() ; f(self)
finally:
transaction.rollback()
try:
self.set_AL() ; f(self)
finally:
transaction.rollback()
try:
self.set_NS() ; f(self)
finally:
transaction.rollback()
finally:
self.model = None
self.sorted_model = None
self.dep_model = None
return _multi_test
return decorator
class TestTreeBase(TestCase):
def setUp(self):
self.set_MP()
self.unchanged = [(u'1', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
def set_MP(self):
self.model = MP_TestNode
self.sorted_model = MP_TestNodeSorted
self.dep_model = MP_TestNodeSomeDep
def set_NS(self):
self.model = NS_TestNode
self.sorted_model = NS_TestNodeSorted
self.dep_model = NS_TestNodeSomeDep
def set_AL(self):
self.model = AL_TestNode
self.sorted_model = AL_TestNodeSorted
self.dep_model = AL_TestNodeSomeDep
def got(self):
if self.model == NS_TestNode:
# this slows down nested sets tests quite a bit, but it has the
# advantage that we'll check the node edges are correct
d = {}
for tree_id, lft, rgt in self.model.objects.values_list('tree_id',
'lft',
'rgt'):
d.setdefault(tree_id, []).extend([lft, rgt])
for tree_id, got_edges in d.items():
self.assertEqual(len(got_edges), max(got_edges))
good_edges = range(1, len(got_edges)+1)
self.assertEqual(sorted(got_edges), good_edges)
return [(o.desc, o.get_depth(), o.get_children_count())
for o in self.model.get_tree()]
class TestEmptyTree(TestTreeBase):
@multi_test()
def test_load_bulk_empty(self):
ids = self.model.load_bulk(BASE_DATA)
got_descs = [obj.desc
for obj in self.model.objects.filter(id__in=ids)]
expected_descs = [x[0] for x in self.unchanged]
self.assertEqual(sorted(got_descs), sorted(expected_descs))
self.assertEqual(self.got(), self.unchanged)
@multi_test()
def test_dump_bulk_empty(self):
self.assertEqual(self.model.dump_bulk(), [])
@multi_test()
def test_add_root_empty(self):
obj = self.model.add_root(desc='1')
expected = [(u'1', 1, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_get_root_nodes_empty(self):
got = self.model.get_root_nodes()
expected = []
self.assertEqual([node.desc for node in got], expected)
@multi_test()
def test_get_first_root_node_empty(self):
got = self.model.get_first_root_node()
self.assertEqual(got, None)
@multi_test()
def test_get_last_root_node_empty(self):
got = self.model.get_last_root_node()
self.assertEqual(got, None)
@multi_test()
def test_get_tree(self):
got = list(self.model.get_tree())
self.assertEqual(got, [])
class TestNonEmptyTree(TestTreeBase):
def setUp(self):
super(TestNonEmptyTree, self).setUp()
MP_TestNode.load_bulk(BASE_DATA)
AL_TestNode.load_bulk(BASE_DATA)
NS_TestNode.load_bulk(BASE_DATA)
class TestClassMethods(TestNonEmptyTree):
def setUp(self):
super(TestClassMethods, self).setUp()
@multi_test()
def test_load_bulk_existing(self):
# inserting on an existing node
node = self.model.objects.get(desc=u'231')
ids = self.model.load_bulk(BASE_DATA, node)
expected = [(u'1', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 4),
(u'1', 4, 0),
(u'2', 4, 4),
(u'21', 5, 0),
(u'22', 5, 0),
(u'23', 5, 1),
(u'231', 6, 0),
(u'24', 5, 0),
(u'3', 4, 0),
(u'4', 4, 1),
(u'41', 5, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
expected_descs = [u'1', u'2', u'21', u'22', u'23', u'231', u'24',
u'3', u'4', u'41']
got_descs = [obj.desc
for obj in self.model.objects.filter(id__in=ids)]
self.assertEqual(sorted(got_descs), sorted(expected_descs))
self.assertEqual(self.got(), expected)
@multi_test()
def test_get_tree_all(self):
got = [(o.desc, o.get_depth(), o.get_children_count())
for o in self.model.get_tree()]
self.assertEqual(got, self.unchanged)
@multi_test()
def test_dump_bulk_all(self):
self.assertEqual(self.model.dump_bulk(keep_ids=False), BASE_DATA)
@multi_test()
def test_get_tree_node(self):
node = self.model.objects.get(desc=u'231')
self.model.load_bulk(BASE_DATA, node)
# this is ONLY needed by the nested set tree model, the rgt value of
# the node was updated in a raw query, and it must be updated in
# django's object
if self.model == NS_TestNode:
node = self.model.objects.get(pk=node.id)
got = [(o.desc, o.get_depth(), o.get_children_count())
for o in self.model.get_tree(node)]
expected = [(u'231', 3, 4),
(u'1', 4, 0),
(u'2', 4, 4),
(u'21', 5, 0),
(u'22', 5, 0),
(u'23', 5, 1),
(u'231', 6, 0),
(u'24', 5, 0),
(u'3', 4, 0),
(u'4', 4, 1),
(u'41', 5, 0)]
self.assertEqual(got, expected)
@multi_test()
def test_dump_bulk_node(self):
node = self.model.objects.get(desc=u'231')
self.model.load_bulk(BASE_DATA, node)
# this is ONLY needed by the nested set tree model, the rgt value of
# the node was updated in a raw query, and it must be updated in
# django's object
if self.model == NS_TestNode:
node = self.model.objects.get(pk=node.id)
got = self.model.dump_bulk(node, False)
expected = [{'data':{'desc':u'231'}, 'children':BASE_DATA}]
self.assertEqual(got, expected)
@multi_test()
def test_load_and_dump_bulk_keeping_ids(self):
exp = self.model.dump_bulk(keep_ids=True)
self.model.objects.all().delete()
self.model.load_bulk(exp, None, True)
got = self.model.dump_bulk(keep_ids=True)
self.assertEqual(got, exp)
@multi_test()
def test_get_root_nodes(self):
got = self.model.get_root_nodes()
expected = ['1', '2', '3', '4']
self.assertEqual([node.desc for node in got], expected)
@multi_test()
def test_get_first_root_node(self):
got = self.model.get_first_root_node()
self.assertEqual(got.desc, '1')
@multi_test()
def test_get_last_root_node(self):
got = self.model.get_last_root_node()
self.assertEqual(got.desc, '4')
@multi_test()
def test_add_root(self):
obj = self.model.add_root(desc='5')
self.assertEqual(obj.get_depth(), 1)
self.assertEqual(self.model.get_last_root_node().desc, '5')
class TestSimpleNodeMethods(TestNonEmptyTree):
@multi_test()
def test_is_root(self):
data = [
('2', True),
('1', True),
('4', True),
('21', False),
('24', False),
('22', False),
('231', False),
]
for desc, expected in data:
got = self.model.objects.get(desc=desc).is_root()
self.assertEqual(got, expected)
@multi_test()
def test_is_leaf(self):
data = [
('2', False),
('23', False),
('231', True),
]
for desc, expected in data:
got = self.model.objects.get(desc=desc).is_leaf()
self.assertEqual(got, expected)
@multi_test()
def test_get_root(self):
data = [
('2', '2'),
('1', '1'),
('4', '4'),
('21', '2'),
('24', '2'),
('22', '2'),
('231', '2'),
]
for desc, expected in data:
node = self.model.objects.get(desc=desc).get_root()
self.assertEqual(node.desc, expected)
@multi_test()
def test_get_parent(self):
data = [
('2', None),
('1', None),
('4', None),
('21', '2'),
('24', '2'),
('22', '2'),
('231', '23'),
]
data = dict(data)
objs = {}
for desc, expected in data.items():
node = self.model.objects.get(desc=desc)
parent = node.get_parent()
if expected:
self.assertEqual(parent.desc, expected)
else:
self.assertEqual(parent, None)
objs[desc] = node
# corrupt the objects' parent cache
node._parent_obj = 'CORRUPTED!!!'
for desc, expected in data.items():
node = objs[desc]
# asking get_parent to not use the parent cache (since we
# corrupted it in the previous loop)
parent = node.get_parent(True)
if expected:
self.assertEqual(parent.desc, expected)
else:
self.assertEqual(parent, None)
@multi_test()
def test_get_children(self):
data = [
('2', ['21', '22', '23', '24']),
('23', ['231']),
('231', []),
]
for desc, expected in data:
children = self.model.objects.get(desc=desc).get_children()
self.assertEqual([node.desc for node in children], expected)
@multi_test()
def test_get_children_count(self):
data = [
('2', 4),
('23', 1),
('231', 0),
]
for desc, expected in data:
got = self.model.objects.get(desc=desc).get_children_count()
self.assertEqual(got, expected)
@multi_test()
def test_get_siblings(self):
data = [
('2', ['1', '2', '3', '4']),
('21', ['21', '22', '23', '24']),
('231', ['231']),
]
for desc, expected in data:
siblings = self.model.objects.get(desc=desc).get_siblings()
self.assertEqual([node.desc for node in siblings], expected)
@multi_test()
def test_get_first_sibling(self):
data = [
('2', '1'),
('1', '1'),
('4', '1'),
('21', '21'),
('24', '21'),
('22', '21'),
('231', '231'),
]
for desc, expected in data:
node = self.model.objects.get(desc=desc).get_first_sibling()
self.assertEqual(node.desc, expected)
@multi_test()
def test_get_prev_sibling(self):
data = [
('2', '1'),
('1', None),
('4', '3'),
('21', None),
('24', '23'),
('22', '21'),
('231', None),
]
for desc, expected in data:
node = self.model.objects.get(desc=desc).get_prev_sibling()
if expected is None:
self.assertEqual(node, None)
else:
self.assertEqual(node.desc, expected)
@multi_test()
def test_get_next_sibling(self):
data = [
('2', '3'),
('1', '2'),
('4', None),
('21', '22'),
('24', None),
('22', '23'),
('231', None),
]
for desc, expected in data:
node = self.model.objects.get(desc=desc).get_next_sibling()
if expected is None:
self.assertEqual(node, None)
else:
self.assertEqual(node.desc, expected)
@multi_test()
def test_get_last_sibling(self):
data = [
('2', '4'),
('1', '4'),
('4', '4'),
('21', '24'),
('24', '24'),
('22', '24'),
('231', '231'),
]
for desc, expected in data:
node = self.model.objects.get(desc=desc).get_last_sibling()
self.assertEqual(node.desc, expected)
@multi_test()
def test_get_first_child(self):
data = [
('2', '21'),
('21', None),
('23', '231'),
('231', None),
]
for desc, expected in data:
node = self.model.objects.get(desc=desc).get_first_child()
if expected is None:
self.assertEqual(node, None)
else:
self.assertEqual(node.desc, expected)
@multi_test()
def test_get_last_child(self):
data = [
('2', '24'),
('21', None),
('23', '231'),
('231', None),
]
for desc, expected in data:
node = self.model.objects.get(desc=desc).get_last_child()
if expected is None:
self.assertEqual(node, None)
else:
self.assertEqual(node.desc, expected)
@multi_test()
def test_get_ancestors(self):
data = [
('2', []),
('21', ['2']),
('231', ['2', '23']),
]
for desc, expected in data:
nodes = self.model.objects.get(desc=desc).get_ancestors()
self.assertEqual([node.desc for node in nodes], expected)
@multi_test()
def test_get_descendants(self):
data = [
('2', ['21', '22', '23', '231', '24']),
('23', ['231']),
('231', []),
('1', []),
('4', ['41']),
]
for desc, expected in data:
nodes = self.model.objects.get(desc=desc).get_descendants()
self.assertEqual([node.desc for node in nodes], expected)
@multi_test()
def test_get_descendant_count(self):
data = [
('2', 5),
('23', 1),
('231', 0),
('1', 0),
('4', 1),
]
for desc, expected in data:
got = self.model.objects.get(desc=desc).get_descendant_count()
self.assertEqual(got, expected)
@multi_test()
def test_is_sibling_of(self):
data = [
('2', '2', True),
('2', '1', True),
('21', '2', False),
('231', '2', False),
('22', '23', True),
('231', '23', False),
('231', '231', True),
]
for desc1, desc2, expected in data:
node1 = self.model.objects.get(desc=desc1)
node2 = self.model.objects.get(desc=desc2)
self.assertEqual(node1.is_sibling_of(node2), expected)
@multi_test()
def test_is_child_of(self):
data = [
('2', '2', False),
('2', '1', False),
('21', '2', True),
('231', '2', False),
('231', '23', True),
('231', '231', False),
]
for desc1, desc2, expected in data:
node1 = self.model.objects.get(desc=desc1)
node2 = self.model.objects.get(desc=desc2)
self.assertEqual(node1.is_child_of(node2), expected)
@multi_test()
def test_is_descendant_of(self):
data = [
('2', '2', False),
('2', '1', False),
('21', '2', True),
('231', '2', True),
('231', '23', True),
('231', '231', False),
]
for desc1, desc2, expected in data:
node1 = self.model.objects.get(desc=desc1)
node2 = self.model.objects.get(desc=desc2)
self.assertEqual(node1.is_descendant_of(node2), expected)
class TestAddChild(TestNonEmptyTree):
@multi_test()
def test_add_child_to_leaf(self):
obj = self.model.objects.get(desc=u'231').add_child(desc='2311')
expected = [(u'1', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 1),
(u'2311', 4, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_add_child_to_node(self):
obj = self.model.objects.get(desc=u'2').add_child(desc='25')
expected = [(u'1', 1, 0),
(u'2', 1, 5),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'24', 2, 0),
(u'25', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
class TestAddSibling(TestNonEmptyTree):
@multi_test()
def test_add_sibling_invalid_pos(self):
method = self.model.objects.get(desc=u'231').add_sibling
self.assertRaises(InvalidPosition, method, 'invalid_pos')
@multi_test()
def test_add_sibling_missing_nodeorderby(self):
node_wchildren = self.model.objects.get(desc=u'2')
method = node_wchildren.add_sibling
self.assertRaises(MissingNodeOrderBy, method, 'sorted-sibling',
desc='aaa')
@multi_test()
def test_add_sibling_last_root(self):
node_wchildren = self.model.objects.get(desc=u'2')
obj = node_wchildren.add_sibling('last-sibling', desc='5')
self.assertEqual(obj.get_depth(), 1)
self.assertEqual(node_wchildren.get_last_sibling().desc, u'5')
@multi_test()
def test_add_sibling_last(self):
node = self.model.objects.get(desc=u'231')
obj = node.add_sibling('last-sibling', desc='232')
self.assertEqual(obj.get_depth(), 3)
self.assertEqual(node.get_last_sibling().desc, u'232')
@multi_test()
def test_add_sibling_first_root(self):
node_wchildren = self.model.objects.get(desc=u'2')
obj = node_wchildren.add_sibling('first-sibling', desc='new')
self.assertEqual(obj.get_depth(), 1)
expected = [(u'new', 1, 0),
(u'1', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_add_sibling_first(self):
node_wchildren = self.model.objects.get(desc=u'23')
obj = node_wchildren.add_sibling('first-sibling', desc='new')
self.assertEqual(obj.get_depth(), 2)
expected = [(u'1', 1, 0),
(u'2', 1, 5),
(u'new', 2, 0),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_add_sibling_left_root(self):
node_wchildren = self.model.objects.get(desc=u'2')
obj = node_wchildren.add_sibling('left', desc='new')
self.assertEqual(obj.get_depth(), 1)
expected = [(u'1', 1, 0),
(u'new', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_add_sibling_left(self):
node_wchildren = self.model.objects.get(desc=u'23')
obj = node_wchildren.add_sibling('left', desc='new')
self.assertEqual(obj.get_depth(), 2)
expected = [(u'1', 1, 0),
(u'2', 1, 5),
(u'21', 2, 0),
(u'22', 2, 0),
(u'new', 2, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_add_sibling_left_noleft_root(self):
node = self.model.objects.get(desc=u'1')
obj = node.add_sibling('left', desc='new')
self.assertEqual(obj.get_depth(), 1)
expected = [(u'new', 1, 0),
(u'1', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_add_sibling_left_noleft(self):
node = self.model.objects.get(desc=u'231')
obj = node.add_sibling('left', desc='new')
self.assertEqual(obj.get_depth(), 3)
expected = [(u'1', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 2),
(u'new', 3, 0),
(u'231', 3, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_add_sibling_right_root(self):
node_wchildren = self.model.objects.get(desc=u'2')
obj = node_wchildren.add_sibling('right', desc='new')
self.assertEqual(obj.get_depth(), 1)
expected = [(u'1', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'24', 2, 0),
(u'new', 1, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_add_sibling_right(self):
node_wchildren = self.model.objects.get(desc=u'23')
obj = node_wchildren.add_sibling('right', desc='new')
self.assertEqual(obj.get_depth(), 2)
expected = [(u'1', 1, 0),
(u'2', 1, 5),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'new', 2, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_add_sibling_right_noright_root(self):
node = self.model.objects.get(desc=u'4')
obj = node.add_sibling('right', desc='new')
self.assertEqual(obj.get_depth(), 1)
expected = [(u'1', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0),
(u'new', 1, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_add_sibling_right_noright(self):
node = self.model.objects.get(desc=u'231')
obj = node.add_sibling('right', desc='new')
self.assertEqual(obj.get_depth(), 3)
expected = [(u'1', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 2),
(u'231', 3, 0),
(u'new', 3, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
class TestDelete(TestNonEmptyTree):
def setUp(self):
super(TestDelete, self).setUp()
for node in self.model.objects.all():
self.dep_model(node=node).save()
@multi_test()
def test_delete_leaf(self):
self.model.objects.get(desc=u'231').delete()
expected = [(u'1', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_delete_node(self):
self.model.objects.get(desc=u'23').delete()
expected = [(u'1', 1, 0),
(u'2', 1, 3),
(u'21', 2, 0),
(u'22', 2, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_delete_root(self):
self.model.objects.get(desc=u'2').delete()
expected = [(u'1', 1, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_delete_filter_root_nodes(self):
self.model.objects.filter(desc__in=('2', '3')).delete()
expected = [(u'1', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_delete_filter_children(self):
self.model.objects.filter(
desc__in=('2', '23', '231')).delete()
expected = [(u'1', 1, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_delete_nonexistant_nodes(self):
self.model.objects.filter(desc__in=('ZZZ', 'XXX')).delete()
self.assertEqual(self.got(), self.unchanged)
@multi_test()
def test_delete_same_node_twice(self):
self.model.objects.filter(
desc__in=('2', '2')).delete()
expected = [(u'1', 1, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_delete_all_root_nodes(self):
self.model.get_root_nodes().delete()
count = self.model.objects.count()
self.assertEqual(count, 0)
@multi_test()
def test_delete_all_nodes(self):
self.model.objects.all().delete()
count = self.model.objects.count()
self.assertEqual(count, 0)
class TestMoveErrors(TestNonEmptyTree):
@multi_test()
def test_move_invalid_pos(self):
node = self.model.objects.get(desc=u'231')
self.assertRaises(InvalidPosition, node.move, node, 'invalid_pos')
@multi_test()
def test_move_to_descendant(self):
node = self.model.objects.get(desc=u'2')
target = self.model.objects.get(desc=u'231')
self.assertRaises(InvalidMoveToDescendant, node.move, target,
'first-sibling')
@multi_test()
def test_nonsorted_move_in_sorted(self):
node = self.sorted_model.add_root(val1=3, val2=3, desc='zxy')
self.assertRaises(InvalidPosition, node.move, node, 'left')
@multi_test()
def test_move_missing_nodeorderby(self):
node = self.model.objects.get(desc=u'231')
self.assertRaises(MissingNodeOrderBy, node.move, node,
'sorted-child')
self.assertRaises(MissingNodeOrderBy, node.move, node,
'sorted-sibling')
class TestMoveLeafRoot(TestNonEmptyTree):
@multi_test()
def test_move_leaf_last_sibling_root(self):
self.model.objects.get(desc=u'231').move(
self.model.objects.get(desc=u'2'), 'last-sibling')
expected = [(u'1', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0),
(u'231', 1, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_leaf_first_sibling_root(self):
self.model.objects.get(desc=u'231').move(
self.model.objects.get(desc=u'2'), 'first-sibling')
expected = [(u'231', 1, 0),
(u'1', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_leaf_left_sibling_root(self):
self.model.objects.get(desc=u'231').move(
self.model.objects.get(desc=u'2'), 'left')
expected = [(u'1', 1, 0),
(u'231', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_leaf_right_sibling_root(self):
self.model.objects.get(desc=u'231').move(
self.model.objects.get(desc=u'2'), 'right')
expected = [(u'1', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 0),
(u'24', 2, 0),
(u'231', 1, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_leaf_last_child_root(self):
self.model.objects.get(desc=u'231').move(
self.model.objects.get(desc=u'2'), 'last-child')
expected = [(u'1', 1, 0),
(u'2', 1, 5),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 0),
(u'24', 2, 0),
(u'231', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_leaf_first_child_root(self):
self.model.objects.get(desc=u'231').move(
self.model.objects.get(desc=u'2'), 'first-child')
expected = [(u'1', 1, 0),
(u'2', 1, 5),
(u'231', 2, 0),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
class TestMoveLeaf(TestNonEmptyTree):
@multi_test()
def test_move_leaf_last_sibling(self):
self.model.objects.get(desc=u'231').move(
self.model.objects.get(desc=u'22'), 'last-sibling')
expected = [(u'1', 1, 0),
(u'2', 1, 5),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 0),
(u'24', 2, 0),
(u'231', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_leaf_first_sibling(self):
self.model.objects.get(desc=u'231').move(
self.model.objects.get(desc=u'22'), 'first-sibling')
expected = [(u'1', 1, 0),
(u'2', 1, 5),
(u'231', 2, 0),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_leaf_left_sibling(self):
self.model.objects.get(desc=u'231').move(
self.model.objects.get(desc=u'22'), 'left')
expected = [(u'1', 1, 0),
(u'2', 1, 5),
(u'21', 2, 0),
(u'231', 2, 0),
(u'22', 2, 0),
(u'23', 2, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_leaf_right_sibling(self):
self.model.objects.get(desc=u'231').move(
self.model.objects.get(desc=u'22'), 'right')
expected = [(u'1', 1, 0),
(u'2', 1, 5),
(u'21', 2, 0),
(u'22', 2, 0),
(u'231', 2, 0),
(u'23', 2, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_leaf_left_sibling_itself(self):
self.model.objects.get(desc=u'231').move(
self.model.objects.get(desc=u'231'), 'left')
self.assertEqual(self.got(), self.unchanged)
@multi_test()
def test_move_leaf_last_child(self):
self.model.objects.get(desc=u'231').move(
self.model.objects.get(desc=u'22'), 'last-child')
expected = [(u'1', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 1),
(u'231', 3, 0),
(u'23', 2, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_leaf_first_child(self):
self.model.objects.get(desc=u'231').move(
self.model.objects.get(desc=u'22'), 'first-child')
expected = [(u'1', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 1),
(u'231', 3, 0),
(u'23', 2, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
class TestMoveBranchRoot(TestNonEmptyTree):
@multi_test()
def test_move_branch_first_sibling_root(self):
self.model.objects.get(desc='4').move(
self.model.objects.get(desc='2'), 'first-sibling')
expected = [(u'4', 1, 1),
(u'41', 2, 0),
(u'1', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'24', 2, 0),
(u'3', 1, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_branch_last_sibling_root(self):
self.model.objects.get(desc='4').move(
self.model.objects.get(desc='2'), 'last-sibling')
expected = [(u'1', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_branch_left_sibling_root(self):
self.model.objects.get(desc='4').move(
self.model.objects.get(desc='2'), 'left')
expected = [(u'1', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'24', 2, 0),
(u'3', 1, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_branch_right_sibling_root(self):
self.model.objects.get(desc='4').move(
self.model.objects.get(desc='2'), 'right')
expected = [(u'1', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'24', 2, 0),
(u'4', 1, 1),
(u'41', 2, 0),
(u'3', 1, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_branch_left_noleft_sibling_root(self):
self.model.objects.get(desc='4').move(
self.model.objects.get(desc='2').get_first_sibling(), 'left')
expected = [(u'4', 1, 1),
(u'41', 2, 0),
(u'1', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'24', 2, 0),
(u'3', 1, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_branch_right_noright_sibling_root(self):
self.model.objects.get(desc='4').move(
self.model.objects.get(desc='2').get_last_sibling(), 'right')
expected = [(u'1', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'24', 2, 0),
(u'3', 1, 0),
(u'4', 1, 1),
(u'41', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_branch_first_child_root(self):
self.model.objects.get(desc='4').move(
self.model.objects.get(desc='2'), 'first-child')
expected = [(u'1', 1, 0),
(u'2', 1, 5),
(u'4', 2, 1),
(u'41', 3, 0),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'24', 2, 0),
(u'3', 1, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_branch_last_child_root(self):
self.model.objects.get(desc='4').move(
self.model.objects.get(desc='2'), 'last-child')
expected = [(u'1', 1, 0),
(u'2', 1, 5),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'24', 2, 0),
(u'4', 2, 1),
(u'41', 3, 0),
(u'3', 1, 0)]
self.assertEqual(self.got(), expected)
class TestMoveBranch(TestNonEmptyTree):
@multi_test()
def test_move_branch_first_sibling(self):
self.model.objects.get(desc='4').move(
self.model.objects.get(desc='23'), 'first-sibling')
expected = [(u'1', 1, 0),
(u'2', 1, 5),
(u'4', 2, 1),
(u'41', 3, 0),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'24', 2, 0),
(u'3', 1, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_branch_last_sibling(self):
self.model.objects.get(desc='4').move(
self.model.objects.get(desc='23'), 'last-sibling')
expected = [(u'1', 1, 0),
(u'2', 1, 5),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'24', 2, 0),
(u'4', 2, 1),
(u'41', 3, 0),
(u'3', 1, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_branch_left_sibling(self):
self.model.objects.get(desc='4').move(
self.model.objects.get(desc='23'), 'left')
expected = [(u'1', 1, 0),
(u'2', 1, 5),
(u'21', 2, 0),
(u'22', 2, 0),
(u'4', 2, 1),
(u'41', 3, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'24', 2, 0),
(u'3', 1, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_branch_right_sibling(self):
self.model.objects.get(desc='4').move(
self.model.objects.get(desc='23'), 'right')
expected = [(u'1', 1, 0),
(u'2', 1, 5),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'4', 2, 1),
(u'41', 3, 0),
(u'24', 2, 0),
(u'3', 1, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_branch_left_noleft_sibling(self):
self.model.objects.get(desc='4').move(
self.model.objects.get(desc='23').get_first_sibling(), 'left')
expected = [(u'1', 1, 0),
(u'2', 1, 5),
(u'4', 2, 1),
(u'41', 3, 0),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'24', 2, 0),
(u'3', 1, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_branch_right_noright_sibling(self):
self.model.objects.get(desc='4').move(
self.model.objects.get(desc='23').get_last_sibling(), 'right')
expected = [(u'1', 1, 0),
(u'2', 1, 5),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 1),
(u'231', 3, 0),
(u'24', 2, 0),
(u'4', 2, 1),
(u'41', 3, 0),
(u'3', 1, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_branch_left_itself_sibling(self):
self.model.objects.get(desc='4').move(
self.model.objects.get(desc='4'), 'left')
self.assertEqual(self.got(), self.unchanged)
@multi_test()
def test_move_branch_first_child(self):
self.model.objects.get(desc='4').move(
self.model.objects.get(desc='23'), 'first-child')
expected = [(u'1', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 2),
(u'4', 3, 1),
(u'41', 4, 0),
(u'231', 3, 0),
(u'24', 2, 0),
(u'3', 1, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_branch_last_child(self):
self.model.objects.get(desc='4').move(
self.model.objects.get(desc='23'), 'last-child')
expected = [(u'1', 1, 0),
(u'2', 1, 4),
(u'21', 2, 0),
(u'22', 2, 0),
(u'23', 2, 2),
(u'231', 3, 0),
(u'4', 3, 1),
(u'41', 4, 0),
(u'24', 2, 0),
(u'3', 1, 0)]
self.assertEqual(self.got(), expected)
class TestTreeSorted(TestTreeBase):
def got(self):
return [(o.val1, o.val2, o.desc, o.get_depth(), o.get_children_count())
for o in self.sorted_model.get_tree()]
@multi_test()
def test_add_root_sorted(self):
self.sorted_model.add_root(val1=3, val2=3, desc='zxy')
self.sorted_model.add_root(val1=1, val2=4, desc='bcd')
self.sorted_model.add_root(val1=2, val2=5, desc='zxy')
self.sorted_model.add_root(val1=3, val2=3, desc='abc')
self.sorted_model.add_root(val1=4, val2=1, desc='fgh')
self.sorted_model.add_root(val1=3, val2=3, desc='abc')
self.sorted_model.add_root(val1=2, val2=2, desc='qwe')
self.sorted_model.add_root(val1=3, val2=2, desc='vcx')
expected = [(1, 4, u'bcd', 1, 0),
(2, 2, u'qwe', 1, 0),
(2, 5, u'zxy', 1, 0),
(3, 2, u'vcx', 1, 0),
(3, 3, u'abc', 1, 0),
(3, 3, u'abc', 1, 0),
(3, 3, u'zxy', 1, 0),
(4, 1, u'fgh', 1, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_add_child_sorted(self):
root = self.sorted_model.add_root(val1=0, val2=0, desc='aaa')
root.add_child(val1=3, val2=3, desc='zxy')
root.add_child(val1=1, val2=4, desc='bcd')
root.add_child(val1=2, val2=5, desc='zxy')
root.add_child(val1=3, val2=3, desc='abc')
root.add_child(val1=4, val2=1, desc='fgh')
root.add_child(val1=3, val2=3, desc='abc')
root.add_child(val1=2, val2=2, desc='qwe')
root.add_child(val1=3, val2=2, desc='vcx')
expected = [(0, 0, u'aaa', 1, 8),
(1, 4, u'bcd', 2, 0),
(2, 2, u'qwe', 2, 0),
(2, 5, u'zxy', 2, 0),
(3, 2, u'vcx', 2, 0),
(3, 3, u'abc', 2, 0),
(3, 3, u'abc', 2, 0),
(3, 3, u'zxy', 2, 0),
(4, 1, u'fgh', 2, 0)]
self.assertEqual(self.got(), expected)
@multi_test()
def test_move_sorted(self):
self.sorted_model.add_root(val1=3, val2=3, desc='zxy')
self.sorted_model.add_root(val1=1, val2=4, desc='bcd')
self.sorted_model.add_root(val1=2, val2=5, desc='zxy')
self.sorted_model.add_root(val1=3, val2=3, desc='abc')
self.sorted_model.add_root(val1=4, val2=1, desc='fgh')
self.sorted_model.add_root(val1=3, val2=3, desc='abc')
self.sorted_model.add_root(val1=2, val2=2, desc='qwe')
self.sorted_model.add_root(val1=3, val2=2, desc='vcx')
root_nodes = self.sorted_model.get_root_nodes()
target = root_nodes[0]
for node in root_nodes[1:]:
# because raw queries don't update django objects
if self.sorted_model == NS_TestNodeSorted:
node = self.sorted_model.objects.get(pk=node.id)
target = self.sorted_model.objects.get(pk=target.id)
node.move(target, 'sorted-child')
expected = [(1, 4, u'bcd', 1, 7),
(2, 2, u'qwe', 2, 0),
(2, 5, u'zxy', 2, 0),
(3, 2, u'vcx', 2, 0),
(3, 3, u'abc', 2, 0),
(3, 3, u'abc', 2, 0),
(3, 3, u'zxy', 2, 0),
(4, 1, u'fgh', 2, 0)]
self.assertEqual(self.got(), expected)
class TestMP_TreeAlphabet(TestCase):
def test_alphabet(self):
if not os.getenv('TREEBEARD_TEST_ALPHABET', False):
# run this test only if the enviroment variable is set
return
basealpha = numconv.BASE85
got_err = False
last_good = None
for alphabetlen in range(35, len(basealpha)+1):
alphabet = basealpha[0:alphabetlen]
expected = [alphabet[0]+char for char in alphabet[1:]]
expected.extend([alphabet[1]+char for char in alphabet])
expected.append(alphabet[2]+alphabet[0])
# remove all nodes
MP_TestNodeAlphabet.objects.all().delete()
# change the model's alphabet
MP_TestNodeAlphabet.alphabet = alphabet
# insert root nodes
for pos in range(len(alphabet)*2):
try:
MP_TestNodeAlphabet.add_root(numval=pos)
except:
got_err = True
break
if got_err:
break
got = [obj.path for obj in MP_TestNodeAlphabet.objects.all()]
if got != expected:
got_err = True
last_good = alphabet
print '\nThe best BASE85 based alphabet for your setup is: %s' \
% (last_good, )
class TestHelpers(TestTreeBase):
def setUp(self):
for model in (MP_TestNode, AL_TestNode, NS_TestNode):
model.load_bulk(BASE_DATA)
for node in model.get_root_nodes():
model.load_bulk(BASE_DATA, node)
model.add_root(desc='5')
@multi_test()
def test_descendants_group_count_root(self):
expected = [(o.desc, o.get_descendant_count())
for o in self.model.get_root_nodes()]
got = [(o.desc, o.descendants_count)
for o in self.model.get_descendants_group_count()]
self.assertEqual(got, expected)
@multi_test()
def test_descendants_group_count_node(self):
parent = self.model.get_root_nodes().get(desc='2')
expected = [(o.desc, o.get_descendant_count())
for o in parent.get_children()]
got = [(o.desc, o.descendants_count)
for o in self.model.get_descendants_group_count(parent)]
self.assertEqual(got, expected)
class TestMP_TreeSortedAutoNow(TestCase):
"""
The sorting mechanism used by treebeard when adding a node can fail if the
ordering is using an "auto_now" field
"""
def test_sorted_by_autonow_workaround(self):
"""
workaround
"""
import datetime
for i in range(1, 5):
MP_TestNodeSortedAutoNow.add_root(desc='node%d' % (i, ),
created=datetime.datetime.now())
def test_sorted_by_autonow_FAIL(self):
"""
This test asserts that we have a problem.
fix this, somehow
"""
MP_TestNodeSortedAutoNow.add_root(desc='node1')
self.assertRaises(ValueError, MP_TestNodeSortedAutoNow.add_root,
desc='node2')
class TestMP_TreeStepOverflow(TestCase):
def test_add_root(self):
method = MP_TestNodeSmallStep.add_root
for i in range(1, 10):
method()
self.assertRaises(PathOverflow, method)
def test_add_child(self):
root = MP_TestNodeSmallStep.add_root()
method = root.add_child
for i in range(1, 10):
method()
self.assertRaises(PathOverflow, method)
def test_add_sibling(self):
root = MP_TestNodeSmallStep.add_root()
for i in range(1, 10):
root.add_child()
method = root.get_last_child().add_sibling
positions = ('first-sibling', 'left', 'right', 'last-sibling')
for pos in positions:
self.assertRaises(PathOverflow, method, pos)
def test_move(self):
root = MP_TestNodeSmallStep.add_root()
for i in range(1, 10):
root.add_child()
newroot = MP_TestNodeSmallStep.add_root()
targets = [(root, ['first-child', 'last-child']),
(root.get_first_child(), ['first-sibling',
'left',
'right',
'last-sibling'])]
for target, positions in targets:
for pos in positions:
self.assertRaises(PathOverflow, newroot.move, target, pos)
class TestMP_TreeShortPath(TestCase):
"""
Here we test a tree with a very small path field (max_length=4) and a
steplen of 1
"""
def test_short_path(self):
obj = MP_TestNodeShortPath.add_root()
obj = obj.add_child().add_child().add_child()
self.assertRaises(PathOverflow, obj.add_child)
class TestMP_TreeFindProblems(TestTreeBase):
def test_find_problems(self):
model = MP_TestNodeAlphabet
model.alphabet = '01234'
model(path='01', depth=1, numchild=0, numval=0).save()
model(path='1', depth=1, numchild=0, numval=0).save()
model(path='111', depth=1, numchild=0, numval=0).save()
model(path='abcd', depth=1, numchild=0, numval=0).save()
model(path='qa#$%!', depth=1, numchild=0, numval=0).save()
model(path='0201', depth=2, numchild=0, numval=0).save()
model(path='020201', depth=3, numchild=0, numval=0).save()
model(path='03', depth=1, numchild=2, numval=0).save()
model(path='0301', depth=2, numchild=0, numval=0).save()
model(path='030102', depth=3, numchild=10, numval=0).save()
model(path='04', depth=10, numchild=1, numval=0).save()
model(path='0401', depth=20, numchild=0, numval=0).save()
evil_chars, bad_steplen, orphans, wrong_depth, wrong_numchild = \
model.find_problems()
self.assertEqual(['abcd', 'qa#$%!'],
[o.path for o in model.objects.filter(id__in=evil_chars)])
self.assertEqual(['1', '111'],
[o.path for o in model.objects.filter(id__in=bad_steplen)])
self.assertEqual(['0201', '020201'],
[o.path for o in model.objects.filter(id__in=orphans)])
self.assertEqual(['03', '0301', '030102'],
[o.path for o in model.objects.filter(id__in=wrong_numchild)])
self.assertEqual(['04', '0401'],
[o.path for o in model.objects.filter(id__in=wrong_depth)])
class TestMP_TreeFix(TestTreeBase):
def setUp(self):
super(TestMP_TreeFix, self).setUp()
self.expected_no_holes = {
MP_TestNodeShortPath: [
(u'1', u'b', 1, 2),
(u'11', u'u', 2, 1),
(u'111', u'i', 3, 1),
(u'1111', u'e', 4, 0),
(u'12', u'o', 2, 0),
(u'2', u'd', 1, 0),
(u'3', u'g', 1, 0),
(u'4', u'a', 1, 4),
(u'41', u'a', 2, 0),
(u'42', u'a', 2, 0),
(u'43', u'u', 2, 1),
(u'431', u'i', 3, 1),
(u'4311', u'e', 4, 0),
(u'44', u'o', 2, 0)],
MP_TestSortedNodeShortPath: [
(u'1', u'a', 1, 4),
(u'11', u'a', 2, 0),
(u'12', u'a', 2, 0),
(u'13', u'o', 2, 0),
(u'14', u'u', 2, 1),
(u'141', u'i', 3, 1),
(u'1411', u'e', 4, 0),
(u'2', u'b', 1, 2),
(u'21', u'o', 2, 0),
(u'22', u'u', 2, 1),
(u'221', u'i', 3, 1),
(u'2211', u'e', 4, 0),
(u'3', u'd', 1, 0),
(u'4', u'g', 1, 0)]}
self.expected_with_holes = {
MP_TestNodeShortPath: [
(u'1', u'b', 1L, 2L),
(u'13', u'u', 2L, 1L),
(u'134', u'i', 3L, 1L),
(u'1343', u'e', 4L, 0L),
(u'14', u'o', 2L, 0L),
(u'2', u'd', 1L, 0L),
(u'3', u'g', 1L, 0L),
(u'4', u'a', 1L, 4L),
(u'41', u'a', 2L, 0L),
(u'42', u'a', 2L, 0L),
(u'43', u'u', 2L, 1L),
(u'434', u'i', 3L, 1L),
(u'4343', u'e', 4L, 0L),
(u'44', u'o', 2L, 0L)],
MP_TestSortedNodeShortPath: [
(u'1', u'b', 1L, 2L),
(u'13', u'u', 2L, 1L),
(u'134', u'i', 3L, 1L),
(u'1343', u'e', 4L, 0L),
(u'14', u'o', 2L, 0L),
(u'2', u'd', 1L, 0L),
(u'3', u'g', 1L, 0L),
(u'4', u'a', 1L, 4L),
(u'41', u'a', 2L, 0L),
(u'42', u'a', 2L, 0L),
(u'43', u'u', 2L, 1L),
(u'434', u'i', 3L, 1L),
(u'4343', u'e', 4L, 0L),
(u'44', u'o', 2L, 0L)]}
def got(self, model):
return [(o.path, o.desc, o.get_depth(), o.get_children_count())
for o in model.get_tree()]
def add_broken_test_data(self, model):
model(path='4', depth=2, numchild=2, desc='a').save()
model(path='13', depth=1000, numchild=0, desc='u').save()
model(path='14', depth=4, numchild=500, desc='o').save()
model(path='134', depth=321, numchild=543, desc='i').save()
model(path='1343', depth=321, numchild=543, desc='e').save()
model(path='42', depth=1, numchild=1, desc='a').save()
model(path='43', depth=1000, numchild=0, desc='u').save()
model(path='44', depth=4, numchild=500, desc='o').save()
model(path='434', depth=321, numchild=543, desc='i').save()
model(path='4343', depth=321, numchild=543, desc='e').save()
model(path='41', depth=1, numchild=1, desc='a').save()
model(path='3', depth=221, numchild=322, desc='g').save()
model(path='1', depth=10, numchild=3, desc='b').save()
model(path='2', depth=10, numchild=3, desc='d').save()
def test_fix_tree_non_destructive(self):
for model in (MP_TestNodeShortPath, MP_TestSortedNodeShortPath):
self.add_broken_test_data(model)
model.fix_tree(destructive=False)
self.assertEqual(self.got(model), self.expected_with_holes[model])
model.find_problems()
def test_fix_tree_destructive(self):
for model in (MP_TestNodeShortPath, MP_TestSortedNodeShortPath):
self.add_broken_test_data(model)
model.fix_tree(destructive=True)
self.assertEqual(self.got(model), self.expected_no_holes[model])
model.find_problems()
class TestIssue14(TestCase):
"test for http://code.google.com/p/django-treebeard/issues/detail?id=14"
def test_issue_14(self):
if not HAS_DJANGO_AUTH: # pragma: no cover
self.fail('this test needs django.contrib.auth in INSTALLED_APPS')
# Using AnonymousUser() in the querysets will expose non-treebeard
# related problems in Django 1.0
#
# Postgres:
# ProgrammingError: can't adapt
# SQLite:
# InterfaceError: Error binding parameter 4 - probably unsupported
# type.
# MySQL compared a string to an integer field:
# `treebeard_mp_testissue14_users`.`user_id` = 'AnonymousUser'
#
# Using a None field instead works (will be translated to IS NULL).
#
# anonuserobj = AnonymousUser()
anonuserobj = None
def qs_check(qs, expected):
self.assertEqual(
[o.name for o in qs],
expected)
user = User.objects.create_user('test_user', '[email protected]',
'testpasswd')
user.save()
root = MP_TestIssue14.add_root(name="the root node")
first = root.add_child(name="first")
second = root.add_child(name="second")
qs_check(root.get_children(), ['first', 'second'])
qs_check(root.get_children().filter(Q(name="first")), ['first'])
qs_check(root.get_children().filter(Q(users=user)), [])
qs_check(
root.get_children().filter(Q(name="first") | Q(users=user)),
['first'])
user = anonuserobj
qs_check(
root.get_children().filter(Q(name="first") | Q(users=user)),
['first', 'second'])
user = User.objects.get(username="test_user")
second.users.add(user)
qs_check(
root.get_children().filter(Q(name="first") | Q(users=user)),
['first', 'second'])
user = anonuserobj
qs_check(
root.get_children().filter(Q(name="first") | Q(users=user)),
['first'])
| apache-2.0 | -8,599,599,687,174,818,000 | 33.097888 | 79 | 0.443612 | false |
futurecolors/gopython3 | gopython3/core/rest.py | 1 | 2372 | from django.db import transaction
from rest_framework import viewsets, routers, status, mixins
from rest_framework.decorators import api_view, action
from rest_framework.generics import RetrieveAPIView, ListAPIView
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework_extensions.mixins import DetailSerializerMixin
from .serializers import JobSerializer, PackageSerializer, JobDetailSerialzier
from .models import Job, Spec, TASK_STATUS
class JobViewSet(DetailSerializerMixin, mixins.CreateModelMixin, viewsets.ReadOnlyModelViewSet):
model = Job
serializer_class = JobSerializer
serializer_detail_class = JobDetailSerialzier
def create(self, request, *args, **kwargs):
try:
with transaction.atomic():
job = Job.objects.create_from_requirements(request.DATA['requirements'])
job.start()
serializer = self.get_serializer(job)
headers = self.get_success_headers(serializer.data)
except Exception as e:
return Response({'requirements': 'Bad requirements. %s' % e},
status=status.HTTP_400_BAD_REQUEST)
else:
return Response(serializer.data, status=status.HTTP_201_CREATED,
headers=headers)
@action()
def restart(self, request, pk=None):
""" Restart existing job """
job = self.get_object()
if job.status in (TASK_STATUS.error, TASK_STATUS.success):
job.start()
return Response({'message': 'Job #%s has been restarted' % pk}, status=status.HTTP_202_ACCEPTED)
else:
return Response({'message': 'Job #%s was not restarted. It is %s.' % (pk, job.status)}, status=status.HTTP_400_BAD_REQUEST)
class PackageListView(ListAPIView):
model = Spec
serializer_class = PackageSerializer
class PackageView(RetrieveAPIView):
model = Spec
serializer_class = PackageSerializer
lookup_field = 'code'
@api_view(('GET',))
def api_root(request, format=None):
return Response({
'jobs': reverse('job-list', request=request, format=format),
'packages': reverse('spec-list', request=request, format=format)
})
router = routers.SimpleRouter()
router.include_format_suffixes = False
router.register(r'jobs', JobViewSet)
| mit | -1,590,789,759,737,583,600 | 36.0625 | 135 | 0.676644 | false |
Detailscool/YHSpider | BillboardAnalysis/bill/spiders/billtoprap.py | 1 | 1396 | #!/usr/bin/python
# -*- coding:utf-8 -*-
# billtoprap.py
# Created by HenryLee on 2017/9/14.
# Copyright © 2017年. All rights reserved.
# Description :
from bill.items import BillItem
from scrapy import Spider, Request
from bs4 import BeautifulSoup
class BillSpider(Spider):
name = 'billtoprap_spider'
allowed_ulrs = ['http://www.billboard.com/charts']
# start_urls = ['http://www.billboard.com/charts/year-end/2014/hot-rap-songs']
start_urls = ['http://www.billboard.com/charts/year-end/' + str(i) + '/hot-rap-songs' for i in range(2006, 2017)]
def parse(self, response):
artist_selectors = response.xpath('//a[@class="ye-chart__item-subtitle-link"]')
year = response.xpath('.//div[@class="ye-chart__year-nav"]/text()').extract()[2].strip('\n')
for selector in artist_selectors:
parent = selector.xpath("ancestor::div[@class='ye-chart__item-text']")[0]
artist = selector.xpath('text()').extract_first()
name = parent.xpath('h1[@class="ye-chart__item-title"]')[0].xpath('text()').extract_first().strip()
ranking = parent.xpath('div[@class="ye-chart__item-rank"]')[0].xpath('text()').extract_first()
item = BillItem()
item['ranking'] = ranking
item['name'] = name
item['artists'] = artist
item['year'] = year
yield item
| mit | -5,613,891,393,085,021,000 | 38.8 | 117 | 0.608758 | false |
HewlettPackard/python-hpOneView | hpOneView/oneview_client.py | 1 | 39340 | # -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2018) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
"""
This module implements a common client for HPE OneView REST API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
import json
import os
from hpOneView.connection import connection
from hpOneView.image_streamer.image_streamer_client import ImageStreamerClient
from hpOneView.resources.security.certificate_authority import CertificateAuthority
from hpOneView.resources.servers.connections import Connections
from hpOneView.resources.networking.fc_networks import FcNetworks
from hpOneView.resources.networking.fcoe_networks import FcoeNetworks
from hpOneView.resources.networking.ethernet_networks import EthernetNetworks
from hpOneView.resources.networking.connection_templates import ConnectionTemplates
from hpOneView.resources.networking.fabrics import Fabrics
from hpOneView.resources.networking.network_sets import NetworkSets
from hpOneView.resources.data_services.metric_streaming import MetricStreaming
from hpOneView.resources.networking.switches import Switches
from hpOneView.resources.networking.switch_types import SwitchTypes
from hpOneView.resources.activity.tasks import Tasks
from hpOneView.resources.settings.restores import Restores
from hpOneView.resources.settings.scopes import Scopes
from hpOneView.resources.settings.licenses import Licenses
from hpOneView.resources.servers.enclosures import Enclosures
from hpOneView.resources.servers.logical_enclosures import LogicalEnclosures
from hpOneView.resources.servers.enclosure_groups import EnclosureGroups
from hpOneView.resources.servers.server_hardware import ServerHardware
from hpOneView.resources.servers.server_hardware_types import ServerHardwareTypes
from hpOneView.resources.servers.id_pools_ranges import IdPoolsRanges
from hpOneView.resources.servers.id_pools_ipv4_ranges import IdPoolsIpv4Ranges
from hpOneView.resources.servers.id_pools_ipv4_subnets import IdPoolsIpv4Subnets
from hpOneView.resources.servers.id_pools import IdPools
from hpOneView.resources.networking.interconnects import Interconnects
from hpOneView.resources.networking.interconnect_types import InterconnectTypes
from hpOneView.resources.networking.interconnect_link_topologies import InterconnectLinkTopologies
from hpOneView.resources.networking.sas_interconnect_types import SasInterconnectTypes
from hpOneView.resources.networking.internal_link_sets import InternalLinkSets
from hpOneView.resources.uncategorized.unmanaged_devices import UnmanagedDevices
from hpOneView.resources.networking.logical_downlinks import LogicalDownlinks
from hpOneView.resources.facilities.power_devices import PowerDevices
from hpOneView.resources.facilities.racks import Racks
from hpOneView.resources.facilities.datacenters import Datacenters
from hpOneView.resources.fc_sans.managed_sans import ManagedSANs
from hpOneView.resources.fc_sans.san_managers import SanManagers
from hpOneView.resources.fc_sans.endpoints import Endpoints
from hpOneView.resources.networking.logical_interconnects import LogicalInterconnects
from hpOneView.resources.networking.logical_interconnect_groups import LogicalInterconnectGroups
from hpOneView.resources.networking.sas_logical_interconnects import SasLogicalInterconnects
from hpOneView.resources.networking.logical_switch_groups import LogicalSwitchGroups
from hpOneView.resources.networking.logical_switches import LogicalSwitches
from hpOneView.resources.networking.sas_interconnects import SasInterconnects
from hpOneView.resources.servers.server_profiles import ServerProfiles
from hpOneView.resources.servers.server_profile_templates import ServerProfileTemplate
from hpOneView.resources.storage.sas_logical_jbods import SasLogicalJbods
from hpOneView.resources.storage.storage_systems import StorageSystems
from hpOneView.resources.storage.storage_pools import StoragePools
from hpOneView.resources.storage.storage_volume_templates import StorageVolumeTemplates
from hpOneView.resources.storage.storage_volume_attachments import StorageVolumeAttachments
from hpOneView.resources.storage.drive_enclosures import DriveEnclosures
from hpOneView.resources.settings.firmware_drivers import FirmwareDrivers
from hpOneView.resources.settings.firmware_bundles import FirmwareBundles
from hpOneView.resources.settings.backups import Backups
from hpOneView.resources.storage.volumes import Volumes
from hpOneView.resources.storage.sas_logical_jbod_attachments import SasLogicalJbodAttachments
from hpOneView.resources.networking.uplink_sets import UplinkSets
from hpOneView.resources.servers.migratable_vc_domains import MigratableVcDomains
from hpOneView.resources.networking.sas_logical_interconnect_groups import SasLogicalInterconnectGroups
from hpOneView.resources.search.index_resources import IndexResources
from hpOneView.resources.search.labels import Labels
from hpOneView.resources.activity.alerts import Alerts
from hpOneView.resources.activity.events import Events
from hpOneView.resources.uncategorized.os_deployment_plans import OsDeploymentPlans
from hpOneView.resources.uncategorized.os_deployment_servers import OsDeploymentServers
from hpOneView.resources.security.certificate_rabbitmq import CertificateRabbitMQ
from hpOneView.resources.security.login_details import LoginDetails
from hpOneView.resources.security.roles import Roles
from hpOneView.resources.security.users import Users
from hpOneView.resources.settings.appliance_device_read_community import ApplianceDeviceReadCommunity
from hpOneView.resources.settings.appliance_device_snmp_v1_trap_destinations import ApplianceDeviceSNMPv1TrapDestinations
from hpOneView.resources.settings.appliance_device_snmp_v3_trap_destinations import ApplianceDeviceSNMPv3TrapDestinations
from hpOneView.resources.settings.appliance_device_snmp_v3_users import ApplianceDeviceSNMPv3Users
from hpOneView.resources.settings.appliance_node_information import ApplianceNodeInformation
from hpOneView.resources.settings.appliance_time_and_locale_configuration import ApplianceTimeAndLocaleConfiguration
from hpOneView.resources.settings.versions import Versions
ONEVIEW_CLIENT_INVALID_PROXY = 'Invalid Proxy format'
class OneViewClient(object):
DEFAULT_API_VERSION = 300
def __init__(self, config):
self.__connection = connection(config["ip"], config.get('api_version', self.DEFAULT_API_VERSION), config.get('ssl_certificate', False),
config.get('timeout'))
self.__image_streamer_ip = config.get("image_streamer_ip")
self.__set_proxy(config)
self.__connection.login(config["credentials"])
self.__certificate_authority = None
self.__connections = None
self.__connection_templates = None
self.__fc_networks = None
self.__fcoe_networks = None
self.__ethernet_networks = None
self.__fabrics = None
self.__network_sets = None
self.__switches = None
self.__switch_types = None
self.__tasks = None
self.__scopes = None
self.__enclosures = None
self.__logical_enclosures = None
self.__enclosure_groups = None
self.__metric_streaming = None
self.__server_hardware = None
self.__server_hardware_types = None
self.__id_pools_vsn_ranges = None
self.__id_pools_vmac_ranges = None
self.__id_pools_vwwn_ranges = None
self.__id_pools_ipv4_ranges = None
self.__id_pools_ipv4_subnets = None
self.__id_pools = None
self.__interconnects = None
self.__interconnect_types = None
self.__interconnect_link_topologies = None
self.__sas_interconnect_types = None
self.__internal_link_sets = None
self.__power_devices = None
self.__unmanaged_devices = None
self.__racks = None
self.__roles = None
self.__datacenters = None
self.__san_managers = None
self.__endpoints = None
self.__logical_interconnects = None
self.__sas_logical_interconnects = None
self.__logical_interconnect_groups = None
self.__logical_switch_groups = None
self.__logical_switches = None
self.__logical_downlinks = None
self.__restores = None
self.__server_profiles = None
self.__server_profile_templates = None
self.__sas_logical_jbods = None
self.__storage_systems = None
self.__storage_pools = None
self.__storage_volume_templates = None
self.__storage_volume_attachments = None
self.__firmware_drivers = None
self.__firmware_bundles = None
self.__uplink_sets = None
self.__volumes = None
self.__sas_logical_jbod_attachments = None
self.__managed_sans = None
self.__migratable_vc_domains = None
self.__sas_interconnects = None
self.__index_resources = None
self.__labels = None
self.__sas_logical_interconnect_groups = None
self.__alerts = None
self.__events = None
self.__drive_enclures = None
self.__os_deployment_plans = None
self.__os_deployment_servers = None
self.__certificate_rabbitmq = None
self.__users = None
self.__appliance_device_read_community = None
self.__appliance_device_snmp_v1_trap_destinations = None
self.__appliance_device_snmp_v3_trap_destinations = None
self.__appliance_device_snmp_v3_users = None
self.__appliance_time_and_locale_configuration = None
self.__appliance_node_information = None
self.__versions = None
self.__backups = None
self.__login_details = None
self.__licenses = None
@classmethod
def from_json_file(cls, file_name):
"""
Construct OneViewClient using a json file.
Args:
file_name: json full path.
Returns:
OneViewClient:
"""
with open(file_name) as json_data:
config = json.load(json_data)
return cls(config)
@classmethod
def from_environment_variables(cls):
"""
Construct OneViewClient using environment variables.
Allowed variables: ONEVIEWSDK_IP (required), ONEVIEWSDK_USERNAME (required), ONEVIEWSDK_PASSWORD (required),
ONEVIEWSDK_AUTH_LOGIN_DOMAIN, ONEVIEWSDK_API_VERSION, ONEVIEWSDK_IMAGE_STREAMER_IP, ONEVIEWSDK_SESSIONID, ONEVIEWSDK_SSL_CERTIFICATE,
ONEVIEWSDK_CONNECTION_TIMEOUT and ONEVIEWSDK_PROXY.
Returns:
OneViewClient:
"""
ip = os.environ.get('ONEVIEWSDK_IP', '')
image_streamer_ip = os.environ.get('ONEVIEWSDK_IMAGE_STREAMER_IP', '')
api_version = int(os.environ.get('ONEVIEWSDK_API_VERSION', OneViewClient.DEFAULT_API_VERSION))
ssl_certificate = os.environ.get('ONEVIEWSDK_SSL_CERTIFICATE', '')
username = os.environ.get('ONEVIEWSDK_USERNAME', '')
auth_login_domain = os.environ.get('ONEVIEWSDK_AUTH_LOGIN_DOMAIN', '')
password = os.environ.get('ONEVIEWSDK_PASSWORD', '')
proxy = os.environ.get('ONEVIEWSDK_PROXY', '')
sessionID = os.environ.get('ONEVIEWSDK_SESSIONID', '')
timeout = os.environ.get('ONEVIEWSDK_CONNECTION_TIMEOUT')
config = dict(ip=ip,
image_streamer_ip=image_streamer_ip,
api_version=api_version,
ssl_certificate=ssl_certificate,
credentials=dict(userName=username, authLoginDomain=auth_login_domain, password=password, sessionID=sessionID),
proxy=proxy, timeout=timeout)
return cls(config)
def __set_proxy(self, config):
"""
Set proxy if needed
Args:
config: Config dict
"""
if "proxy" in config and config["proxy"]:
proxy = config["proxy"]
splitted = proxy.split(':')
if len(splitted) != 2:
raise ValueError(ONEVIEW_CLIENT_INVALID_PROXY)
proxy_host = splitted[0]
proxy_port = int(splitted[1])
self.__connection.set_proxy(proxy_host, proxy_port)
@property
def api_version(self):
"""
Gets the OneView API Version.
Returns:
int: API Version.
"""
return self.__connection._apiVersion
@property
def connection(self):
"""
Gets the underlying HPE OneView connection used by the OneViewClient.
Returns:
connection:
"""
return self.__connection
def create_image_streamer_client(self):
"""
Create the Image Streamer API Client.
Returns:
ImageStreamerClient:
"""
image_streamer = ImageStreamerClient(self.__image_streamer_ip,
self.__connection.get_session_id(),
self.__connection._apiVersion,
self.__connection._sslBundle)
return image_streamer
@property
def certificate_authority(self):
"""
Gets the Certificate Authority API client.
Returns:
CertificateAuthority:
"""
if not self.__certificate_authority:
self.__certificate_authority = CertificateAuthority(self.__connection)
return self.__certificate_authority
@property
def connections(self):
"""
Gets the Connections API client.
Returns:
Connections:
"""
if not self.__connections:
self.__connections = Connections(
self.__connection)
return self.__connections
@property
def connection_templates(self):
"""
Gets the ConnectionTemplates API client.
Returns:
ConnectionTemplates:
"""
if not self.__connection_templates:
self.__connection_templates = ConnectionTemplates(
self.__connection)
return self.__connection_templates
@property
def fc_networks(self):
"""
Gets the FcNetworks API client.
Returns:
FcNetworks:
"""
if not self.__fc_networks:
self.__fc_networks = FcNetworks(self.__connection)
return self.__fc_networks
@property
def fcoe_networks(self):
"""
Gets the FcoeNetworks API client.
Returns:
FcoeNetworks:
"""
if not self.__fcoe_networks:
self.__fcoe_networks = FcoeNetworks(self.__connection)
return self.__fcoe_networks
@property
def ethernet_networks(self):
"""
Gets the EthernetNetworks API client.
Returns:
EthernetNetworks:
"""
if not self.__ethernet_networks:
self.__ethernet_networks = EthernetNetworks(self.__connection)
return self.__ethernet_networks
@property
def fabrics(self):
"""
Gets the Fabrics API client.
Returns:
Fabrics:
"""
if not self.__fabrics:
self.__fabrics = Fabrics(self.__connection)
return self.__fabrics
@property
def restores(self):
"""
Gets the Restores API client.
Returns:
Restores:
"""
if not self.__restores:
self.__restores = Restores(self.__connection)
return self.__restores
@property
def scopes(self):
"""
Gets the Scopes API client.
Returns:
Scopes:
"""
if not self.__scopes:
self.__scopes = Scopes(self.__connection)
return self.__scopes
@property
def datacenters(self):
"""
Gets the Datacenters API client.
Returns:
Datacenters:
"""
if not self.__datacenters:
self.__datacenters = Datacenters(self.__connection)
return self.__datacenters
@property
def network_sets(self):
"""
Gets the NetworkSets API client.
Returns:
NetworkSets:
"""
if not self.__network_sets:
self.__network_sets = NetworkSets(self.__connection)
return self.__network_sets
@property
def server_hardware(self):
"""
Gets the ServerHardware API client.
Returns:
ServerHardware:
"""
if not self.__server_hardware:
self.__server_hardware = ServerHardware(self.__connection)
return self.__server_hardware
@property
def server_hardware_types(self):
"""
Gets the ServerHardwareTypes API client.
Returns:
ServerHardwareTypes:
"""
if not self.__server_hardware_types:
self.__server_hardware_types = ServerHardwareTypes(
self.__connection)
return self.__server_hardware_types
@property
def id_pools_vsn_ranges(self):
"""
Gets the IdPoolsRanges API Client for VSN Ranges.
Returns:
IdPoolsRanges:
"""
if not self.__id_pools_vsn_ranges:
self.__id_pools_vsn_ranges = IdPoolsRanges('vsn', self.__connection)
return self.__id_pools_vsn_ranges
@property
def id_pools_vmac_ranges(self):
"""
Gets the IdPoolsRanges API Client for VMAC Ranges.
Returns:
IdPoolsRanges:
"""
if not self.__id_pools_vmac_ranges:
self.__id_pools_vmac_ranges = IdPoolsRanges('vmac', self.__connection)
return self.__id_pools_vmac_ranges
@property
def id_pools_vwwn_ranges(self):
"""
Gets the IdPoolsRanges API Client for VWWN Ranges.
Returns:
IdPoolsRanges:
"""
if not self.__id_pools_vwwn_ranges:
self.__id_pools_vwwn_ranges = IdPoolsRanges('vwwn', self.__connection)
return self.__id_pools_vwwn_ranges
@property
def id_pools_ipv4_ranges(self):
"""
Gets the IdPoolsIpv4Ranges API client.
Returns:
IdPoolsIpv4Ranges:
"""
if not self.__id_pools_ipv4_ranges:
self.__id_pools_ipv4_ranges = IdPoolsIpv4Ranges(self.__connection)
return self.__id_pools_ipv4_ranges
@property
def id_pools_ipv4_subnets(self):
"""
Gets the IdPoolsIpv4Subnets API client.
Returns:
IdPoolsIpv4Subnets:
"""
if not self.__id_pools_ipv4_subnets:
self.__id_pools_ipv4_subnets = IdPoolsIpv4Subnets(self.__connection)
return self.__id_pools_ipv4_subnets
@property
def id_pools(self):
"""
Gets the IdPools API client.
Returns:
IdPools:
"""
if not self.__id_pools:
self.__id_pools = IdPools(self.__connection)
return self.__id_pools
@property
def switches(self):
"""
Gets the Switches API client.
Returns:
Switches:
"""
if not self.__switches:
self.__switches = Switches(self.__connection)
return self.__switches
@property
def roles(self):
"""
Gets the Roles API client.
Returns:
Roles:
"""
if not self.__roles:
self.__roles = Roles(self.__connection)
return self.__roles
@property
def switch_types(self):
"""
Gets the SwitchTypes API client.
Returns:
SwitchTypes:
"""
if not self.__switch_types:
self.__switch_types = SwitchTypes(self.__connection)
return self.__switch_types
@property
def logical_switch_groups(self):
"""
Gets the LogicalSwitchGroups API client.
Returns:
LogicalSwitchGroups:
"""
if not self.__logical_switch_groups:
self.__logical_switch_groups = LogicalSwitchGroups(self.__connection)
return self.__logical_switch_groups
@property
def logical_switches(self):
"""
Gets the LogicalSwitches API client.
Returns:
LogicalSwitches:
"""
if not self.__logical_switches:
self.__logical_switches = LogicalSwitches(self.__connection)
return self.__logical_switches
@property
def tasks(self):
"""
Gets the Tasks API client.
Returns:
Tasks:
"""
if not self.__tasks:
self.__tasks = Tasks(self.__connection)
return self.__tasks
@property
def enclosure_groups(self):
"""
Gets the EnclosureGroups API client.
Returns:
EnclosureGroups:
"""
if not self.__enclosure_groups:
self.__enclosure_groups = EnclosureGroups(self.__connection)
return self.__enclosure_groups
@property
def enclosures(self):
"""
Gets the Enclosures API client.
Returns:
Enclosures:
"""
if not self.__enclosures:
self.__enclosures = Enclosures(self.__connection)
return self.__enclosures
@property
def logical_enclosures(self):
"""
Gets the LogicalEnclosures API client.
Returns:
LogicalEnclosures:
"""
if not self.__logical_enclosures:
self.__logical_enclosures = LogicalEnclosures(self.__connection)
return self.__logical_enclosures
@property
def metric_streaming(self):
"""
Gets the MetricStreaming API client.
Returns:
MetricStreaming:
"""
if not self.__metric_streaming:
self.__metric_streaming = MetricStreaming(self.__connection)
return self.__metric_streaming
@property
def interconnects(self):
"""
Gets the Interconnects API client.
Returns:
Interconnects:
"""
if not self.__interconnects:
self.__interconnects = Interconnects(self.__connection)
return self.__interconnects
@property
def interconnect_types(self):
"""
Gets the InterconnectTypes API client.
Returns:
InterconnectTypes:
"""
if not self.__interconnect_types:
self.__interconnect_types = InterconnectTypes(self.__connection)
return self.__interconnect_types
@property
def interconnect_link_topologies(self):
"""
Gets the InterconnectLinkTopologies API client.
Returns:
InterconnectLinkTopologies:
"""
if not self.__interconnect_link_topologies:
self.__interconnect_link_topologies = InterconnectLinkTopologies(self.__connection)
return self.__interconnect_link_topologies
@property
def sas_interconnect_types(self):
"""
Gets the SasInterconnectTypes API client.
Returns:
SasInterconnectTypes:
"""
if not self.__sas_interconnect_types:
self.__sas_interconnect_types = SasInterconnectTypes(self.__connection)
return self.__sas_interconnect_types
@property
def internal_link_sets(self):
"""
Gets the InternalLinkSets API client.
Returns:
InternalLinkSets:
"""
if not self.__internal_link_sets:
self.__internal_link_sets = InternalLinkSets(self.__connection)
return self.__internal_link_sets
@property
def logical_interconnect_groups(self):
"""
Gets the LogicalInterconnectGroups API client.
Returns:
LogicalInterconnectGroups:
"""
if not self.__logical_interconnect_groups:
self.__logical_interconnect_groups = LogicalInterconnectGroups(
self.__connection)
return self.__logical_interconnect_groups
@property
def logical_interconnects(self):
"""
Gets the LogicalInterconnects API client.
Returns:
LogicalInterconnects:
"""
if not self.__logical_interconnects:
self.__logical_interconnects = LogicalInterconnects(
self.__connection)
return self.__logical_interconnects
@property
def sas_logical_interconnects(self):
"""
Gets the SasLogicalInterconnects API client.
Returns:
SasLogicalInterconnects:
"""
if not self.__sas_logical_interconnects:
self.__sas_logical_interconnects = SasLogicalInterconnects(self.__connection)
return self.__sas_logical_interconnects
@property
def logical_downlinks(self):
"""
Gets the LogicalDownlinks API client.
Returns:
LogicalDownlinks:
"""
if not self.__logical_downlinks:
self.__logical_downlinks = LogicalDownlinks(
self.__connection)
return self.__logical_downlinks
@property
def power_devices(self):
"""
Gets the PowerDevices API client.
Returns:
PowerDevices:
"""
if not self.__power_devices:
self.__power_devices = PowerDevices(self.__connection)
return self.__power_devices
@property
def unmanaged_devices(self):
"""
Gets the Unmanaged Devices API client.
Returns:
UnmanagedDevices:
"""
if not self.__unmanaged_devices:
self.__unmanaged_devices = UnmanagedDevices(self.__connection)
return self.__unmanaged_devices
@property
def racks(self):
"""
Gets the Racks API client.
Returns:
Racks:
"""
if not self.__racks:
self.__racks = Racks(self.__connection)
return self.__racks
@property
def san_managers(self):
"""
Gets the SanManagers API client.
Returns:
SanManagers:
"""
if not self.__san_managers:
self.__san_managers = SanManagers(self.__connection)
return self.__san_managers
@property
def endpoints(self):
"""
Gets the Endpoints API client.
Returns:
Endpoints:
"""
if not self.__endpoints:
self.__endpoints = Endpoints(self.__connection)
return self.__endpoints
@property
def server_profiles(self):
"""
Gets the ServerProfiles API client.
Returns:
ServerProfiles:
"""
if not self.__server_profiles:
self.__server_profiles = ServerProfiles(self.__connection)
return self.__server_profiles
@property
def server_profile_templates(self):
"""
Gets the ServerProfileTemplate API client.
Returns:
ServerProfileTemplate:
"""
if not self.__server_profile_templates:
self.__server_profile_templates = ServerProfileTemplate(self.__connection)
return self.__server_profile_templates
@property
def storage_systems(self):
"""
Gets the StorageSystems API client.
Returns:
StorageSystems:
"""
if not self.__storage_systems:
self.__storage_systems = StorageSystems(self.__connection)
return self.__storage_systems
@property
def storage_pools(self):
"""
Gets the StoragePools API client.
Returns:
StoragePools:
"""
if not self.__storage_pools:
self.__storage_pools = StoragePools(self.__connection)
return self.__storage_pools
@property
def storage_volume_templates(self):
"""
Gets the StorageVolumeTemplates API client.
Returns:
StorageVolumeTemplates:
"""
if not self.__storage_volume_templates:
self.__storage_volume_templates = StorageVolumeTemplates(self.__connection)
return self.__storage_volume_templates
@property
def storage_volume_attachments(self):
"""
Gets the StorageVolumeAttachments API client.
Returns:
StorageVolumeAttachments:
"""
if not self.__storage_volume_attachments:
self.__storage_volume_attachments = StorageVolumeAttachments(self.__connection)
return self.__storage_volume_attachments
@property
def firmware_drivers(self):
"""
Gets the FirmwareDrivers API client.
Returns:
FirmwareDrivers:
"""
if not self.__firmware_drivers:
self.__firmware_drivers = FirmwareDrivers(self.__connection)
return self.__firmware_drivers
@property
def firmware_bundles(self):
"""
Gets the FirmwareBundles API client.
Returns:
FirmwareBundles:
"""
if not self.__firmware_bundles:
self.__firmware_bundles = FirmwareBundles(self.__connection)
return self.__firmware_bundles
@property
def uplink_sets(self):
"""
Gets the UplinkSets API client.
Returns:
UplinkSets:
"""
if not self.__uplink_sets:
self.__uplink_sets = UplinkSets(self.__connection)
return self.__uplink_sets
@property
def volumes(self):
"""
Gets the Volumes API client.
Returns:
Volumes:
"""
if not self.__volumes:
self.__volumes = Volumes(self.__connection)
return self.__volumes
@property
def sas_logical_jbod_attachments(self):
"""
Gets the SAS Logical JBOD Attachments client.
Returns:
SasLogicalJbodAttachments:
"""
if not self.__sas_logical_jbod_attachments:
self.__sas_logical_jbod_attachments = SasLogicalJbodAttachments(self.__connection)
return self.__sas_logical_jbod_attachments
@property
def managed_sans(self):
"""
Gets the Managed SANs API client.
Returns:
ManagedSANs:
"""
if not self.__managed_sans:
self.__managed_sans = ManagedSANs(self.__connection)
return self.__managed_sans
@property
def migratable_vc_domains(self):
"""
Gets the VC Migration Manager API client.
Returns:
MigratableVcDomains:
"""
if not self.__migratable_vc_domains:
self.__migratable_vc_domains = MigratableVcDomains(self.__connection)
return self.__migratable_vc_domains
@property
def sas_interconnects(self):
"""
Gets the SAS Interconnects API client.
Returns:
SasInterconnects:
"""
if not self.__sas_interconnects:
self.__sas_interconnects = SasInterconnects(self.__connection)
return self.__sas_interconnects
@property
def sas_logical_interconnect_groups(self):
"""
Gets the SasLogicalInterconnectGroups API client.
Returns:
SasLogicalInterconnectGroups:
"""
if not self.__sas_logical_interconnect_groups:
self.__sas_logical_interconnect_groups = SasLogicalInterconnectGroups(self.__connection)
return self.__sas_logical_interconnect_groups
@property
def drive_enclosures(self):
"""
Gets the Drive Enclosures API client.
Returns:
DriveEnclosures:
"""
if not self.__drive_enclures:
self.__drive_enclures = DriveEnclosures(self.__connection)
return self.__drive_enclures
@property
def sas_logical_jbods(self):
"""
Gets the SAS Logical JBODs API client.
Returns:
SasLogicalJbod:
"""
if not self.__sas_logical_jbods:
self.__sas_logical_jbods = SasLogicalJbods(self.__connection)
return self.__sas_logical_jbods
@property
def labels(self):
"""
Gets the Labels API client.
Returns:
Labels:
"""
if not self.__labels:
self.__labels = Labels(self.__connection)
return self.__labels
@property
def index_resources(self):
"""
Gets the Index Resources API client.
Returns:
IndexResources:
"""
if not self.__index_resources:
self.__index_resources = IndexResources(self.__connection)
return self.__index_resources
@property
def alerts(self):
"""
Gets the Alerts API client.
Returns:
Alerts:
"""
if not self.__alerts:
self.__alerts = Alerts(self.__connection)
return self.__alerts
@property
def events(self):
"""
Gets the Events API client.
Returns:
Events:
"""
if not self.__events:
self.__events = Events(self.__connection)
return self.__events
@property
def os_deployment_plans(self):
"""
Gets the Os Deployment Plans API client.
Returns:
OsDeploymentPlans:
"""
if not self.__os_deployment_plans:
self.__os_deployment_plans = OsDeploymentPlans(self.__connection)
return self.__os_deployment_plans
@property
def os_deployment_servers(self):
"""
Gets the Os Deployment Servers API client.
Returns:
OsDeploymentServers:
"""
if not self.__os_deployment_servers:
self.__os_deployment_servers = OsDeploymentServers(self.__connection)
return self.__os_deployment_servers
@property
def certificate_rabbitmq(self):
"""
Gets the Certificate RabbitMQ API client.
Returns:
CertificateRabbitMQ:
"""
if not self.__certificate_rabbitmq:
self.__certificate_rabbitmq = CertificateRabbitMQ(self.__connection)
return self.__certificate_rabbitmq
@property
def users(self):
"""
Gets the Users API client.
Returns:
Users:
"""
if not self.__users:
self.__users = Users(self.__connection)
return self.__users
@property
def appliance_device_read_community(self):
"""
Gets the ApplianceDeviceReadCommunity API client.
Returns:
ApplianceDeviceReadCommunity:
"""
if not self.__appliance_device_read_community:
self.__appliance_device_read_community = ApplianceDeviceReadCommunity(self.__connection)
return self.__appliance_device_read_community
@property
def appliance_device_snmp_v1_trap_destinations(self):
"""
Gets the ApplianceDeviceSNMPv1TrapDestinations API client.
Returns:
ApplianceDeviceSNMPv1TrapDestinations:
"""
if not self.__appliance_device_snmp_v1_trap_destinations:
self.__appliance_device_snmp_v1_trap_destinations = ApplianceDeviceSNMPv1TrapDestinations(self.__connection)
return self.__appliance_device_snmp_v1_trap_destinations
@property
def appliance_device_snmp_v3_trap_destinations(self):
"""
Gets the ApplianceDeviceSNMPv3TrapDestinations API client.
Returns:
ApplianceDeviceSNMPv3TrapDestinations:
"""
if not self.__appliance_device_snmp_v3_trap_destinations:
self.__appliance_device_snmp_v3_trap_destinations = ApplianceDeviceSNMPv3TrapDestinations(self.__connection)
return self.__appliance_device_snmp_v3_trap_destinations
@property
def appliance_device_snmp_v3_users(self):
"""
Gets the ApplianceDeviceSNMPv3Users API client.
Returns:
ApplianceDeviceSNMPv3Users:
"""
if not self.__appliance_device_snmp_v3_users:
self.__appliance_device_snmp_v3_users = ApplianceDeviceSNMPv3Users(self.__connection)
return self.__appliance_device_snmp_v3_users
@property
def appliance_node_information(self):
"""
Gets the ApplianceNodeInformation API client.
Returns:
ApplianceNodeInformation:
"""
if not self.__appliance_node_information:
self.__appliance_node_information = ApplianceNodeInformation(self.__connection)
return self.__appliance_node_information
@property
def appliance_time_and_locale_configuration(self):
"""
Gets the ApplianceTimeAndLocaleConfiguration API client.
Returns:
ApplianceTimeAndLocaleConfiguration:
"""
if not self.__appliance_time_and_locale_configuration:
self.__appliance_time_and_locale_configuration = ApplianceTimeAndLocaleConfiguration(self.__connection)
return self.__appliance_time_and_locale_configuration
@property
def versions(self):
"""
Gets the Version API client.
Returns:
Version:
"""
if not self.__versions:
self.__versions = Versions(self.__connection)
return self.__versions
@property
def backups(self):
"""
Gets the Backup API client.
Returns:
Backups:
"""
if not self.__backups:
self.__backups = Backups(self.__connection)
return self.__backups
@property
def login_details(self):
"""
Gets the login details
Returns:
List of login details
"""
if not self.__login_details:
self.__login_details = LoginDetails(self.__connection)
return self.__login_details
@property
def licenses(self):
"""
Gets all the licenses
Returns:
List of licenses
"""
if not self.__licenses:
self.__licenses = Licenses(self.__connection)
return self.__licenses
| mit | 2,875,821,582,522,807,000 | 30.598394 | 143 | 0.627173 | false |
RuthAngus/chronometer | chronometer/fit_dispersion.py | 1 | 2000 | import numpy as np
from action_age_evolution import calc_dispersion
import emcee
import corner
import matplotlib.pyplot as plt
plotpar = {'axes.labelsize': 18,
'font.size': 10,
'legend.fontsize': 15,
'xtick.labelsize': 18,
'ytick.labelsize': 18,
'text.usetex': True}
plt.rcParams.update(plotpar)
def lnprob(pars, x, y, yerr):
sz0, t1, beta, hsz = pars
model = calc_dispersion([np.exp(sz0), np.exp(t1), beta, np.exp(hsz)], x)
return sum(-.5*((model - y)/yerr)**2) + lnprior(pars)
def lnprior(pars):
lnsz0, lnt1, beta, lnhsz = pars
if -20 < lnsz0 < 20 and -20 < lnt1 < 20 and -100 < beta < 100 \
and -20 < lnhsz < 20:
return 0.
else:
return -np.inf
if __name__ == "__main__":
time = np.linspace(0, 14, 100)
sz0 = 50.
sr0 = 50.
t1 = .1
tm = 10.
beta = .33
R0 = 1.
Rc = 1.
hsz = 9.
hsr = 9.
solar_radius = 8.
hr = 2.68/solar_radius
# Today
sr = 34.
sz = 25.1
zpar_init = np.array([np.log(sz0), np.log(t1), beta, np.log(hsz)])
rpar_init = np.array([np.log(sr0), np.log(t1), beta, np.log(hsz)])
sigma_z = calc_dispersion([sz0 + 5, t1, beta + .2, hsz], time)
sigma_r = calc_dispersion([sr0 + 5, t1, beta + .2, hsz], time)
print(lnprob(zpar_init, time, sigma_z, sigma_z*.1))
x, y, yerr = time, sigma_z, sigma_z*.1
ndim, nwalkers, nsteps = len(zpar_init), 24, 10000
p0 = [1e-4*np.random.rand(ndim) + zpar_init for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=[x, y, yerr])
pos, _, _ = sampler.run_mcmc(p0, 500)
sampler.reset()
sampler.run_mcmc(pos, nsteps)
flat = np.reshape(sampler.chain, (nwalkers*nsteps, ndim))
# flat[:, :2] = np.exp(flat[:, :2])
# flat[:, 3:] = np.exp(flat[:, 3:])
labels = ["$\ln \sigma_{z0}$", "$t_1$", "$\\beta$", "$\sigma_{Hz}$"]
fig = corner.corner(flat, labels=labels)
fig.savefig("zcorner")
| mit | 5,217,928,310,203,504,000 | 27.985507 | 78 | 0.5585 | false |
mozilla/universal-search-recommendation | recommendation/mozlog/middleware.py | 1 | 2336 | import json
import re
import time
from flask import current_app, request
IS_PROTOCOL = r'^[^\s]+\:\S'
IS_HOSTNAME = r'^[^\s]+\.\S'
LOG_PATH_BLACKLIST = [
'/favicon.ico',
'/__heartbeat__',
'/__lbheartbeat__',
'/nginx_status',
'/robots.txt',
'/images'
]
def request_timer():
"""
before_request middleware that attaches the processing start time to the
request object, for later performance assessment.
"""
request.start_time = time.time()
def request_summary(response):
"""
after_request middleware that generates and logs a mozlog-formatted log
about the request.
Read more:
https://github.com/mozilla/universal-search/blob/master/docs/metrics.md
https://github.com/mozilla-services/Dockerflow/blob/master/docs/mozlog.md
"""
request.finish_time = time.time()
response.direct_passthrough = False
if request.path in LOG_PATH_BLACKLIST:
return response
log = {}
query = request.args.get('q')
log['agent'] = request.headers.get('User-Agent')
log['errno'] = 0 if response.status_code < 400 else response.status_code
log['lang'] = request.headers.get('Accept-Language')
log['method'] = request.method
log['path'] = request.path
log['t'] = (request.finish_time - request.start_time) * 1000 # in ms
if query:
data = response.get_data(as_text=True)
try:
body = json.loads(data)
except json.decoder.JSONDecodeError:
body = {}
query = query.lower()
log['predicates.query_length'] = len(query) > 20
log['predicates.is_protocol'] = (re.match(IS_PROTOCOL, query) is not
None)
log['predicates.is_hostname'] = (re.match(IS_HOSTNAME, query) is not
None)
if not any([log['predicates.query_length'],
log['predicates.is_protocol'],
log['predicates.is_hostname']]):
log['query'] = query if query else None
log['status_code'] = response.status_code
classifiers = body.get('enhancements')
log['classifiers'] = (list(classifiers.keys()) if classifiers else
[])
current_app.logger.info('', extra=log)
return response
| mpl-2.0 | 9,072,272,571,820,170,000 | 28.948718 | 78 | 0.587329 | false |
Gargamel1989/Seasoning-old | Seasoning/authentication/views/account_views.py | 1 | 6951 | from django.contrib.auth.decorators import login_required
from django.contrib.auth import get_user_model
from authentication.forms import AccountSettingsForm, DeleteAccountForm,\
CheckActiveAuthenticationForm
from authentication.models import NewEmail, User
from django.contrib import messages
from django.contrib.sites.models import RequestSite
from django.shortcuts import render, redirect
from django.http.response import Http404
from django.views.decorators.debug import sensitive_post_parameters
from django.contrib.auth.views import login as django_login, logout
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.forms import PasswordChangeForm, SetPasswordForm
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.db.models.aggregates import Avg, Count
def login(request):
return django_login(request, template_name='authentication/login.html',
authentication_form=CheckActiveAuthenticationForm)
@login_required
def account_settings(request, user_id=None):
viewing_self = False
try:
if user_id is None or user_id == request.user.id:
user = get_user_model().objects.prefetch_related('recipes').get(id=request.user.id)
viewing_self = True
else:
user = get_user_model().objects.prefetch_related('recipes').get(id=user_id)
except get_user_model().DoesNotExist:
raise Http404
recipes_list = user.recipes.all().order_by('-rating')
try:
averages = user.recipes.all().aggregate(Avg('footprint'), Avg('rating'))
most_used_veganism = max(user.recipes.values('veganism').annotate(dcount=Count('veganism')), key=lambda i: i['dcount'])['veganism']
except ValueError:
averages = {'footprint__avg': None,
'rating__avg': None}
most_used_veganism = None
# Split the result by 9
paginator = Paginator(recipes_list, 9)
page = request.GET.get('page')
try:
recipes = paginator.page(page)
except PageNotAnInteger:
recipes = paginator.page(1)
except EmptyPage:
recipes = paginator.page(paginator.num_pages)
if request.is_ajax():
return render(request, 'includes/recipe_summaries.html', {'recipes': recipes})
return render(request, 'authentication/account_settings.html', {'viewed_user': user,
'viewing_other': not viewing_self,
'recipes': recipes,
'average_fp': 4*averages['footprint__avg'],
'average_rating': averages['rating__avg'],
'most_used_veganism': most_used_veganism})
@login_required
def account_settings_profile(request):
"""
Allow a user to change his account settings
If the user has changed his email address, an activation email will be sent to this new
address. The new address will not be activated until the link in this email has been
clicked.
If the user has an alternate email that should be activated, this will also be displayed
on this page.
"""
context = {}
user = get_user_model().objects.get(id=request.user.id)
if request.method == "POST":
form = AccountSettingsForm(request.POST, request.FILES, instance=user)
if form.is_valid():
if form.new_email is not None:
# Send an activation email to the new email
NewEmail.objects.create_inactive_email(user, form.new_email, RequestSite(request))
messages.add_message(request, messages.INFO, _('An email has been sent to the new email address provided by you. Please follow the instructions '
'in this email to complete the changing of your email address.'))
# New email address has been replaced by old email address in the form, so it will not be saved until activated
form.save()
user = get_user_model().objects.get(id=request.user.id)
else:
form = AccountSettingsForm(instance=user)
try:
new_email = NewEmail.objects.get(user=request.user)
context['new_email'] = new_email.email
except NewEmail.DoesNotExist:
pass
context['form'] = form
context['user'] = user
return render(request, 'authentication/account_settings_profile.html', context)
@login_required
def account_settings_social(request):
return render(request, 'authentication/account_settings_social.html')
@login_required
def account_settings_privacy(request):
return render(request, 'authentication/account_settings_privacy.html')
@login_required
def change_email(request, activation_key):
"""
This checks if the given activation key belongs to the current users new,
inactive email address. If so, this new email address is activated, and
the users old email address is deleted.
"""
activated = NewEmail.objects.activate_email(request.user, activation_key)
if activated:
messages.add_message(request, messages.INFO, _('Your email address has been successfully changed.'))
return redirect(account_settings)
raise Http404
@sensitive_post_parameters()
@login_required
def change_password(request,
template_name='authentication/password_change_form.html',
password_change_form=PasswordChangeForm):
"""
Provides a form where the users password can be changed.
"""
if request.user.password == '!':
password_change_form = SetPasswordForm
if request.method == "POST":
form = password_change_form(user=request.user, data=request.POST)
if form.is_valid():
form.save()
messages.add_message(request, messages.INFO, _('Your password has been successfully changed.'))
return redirect(account_settings)
form = password_change_form(user=request.user)
return render(request, template_name, {'form': form})
@login_required
def account_delete(request):
"""
Provides a method for deleting the users account
"""
if request.method == 'POST':
form = DeleteAccountForm(request.POST)
if form.is_valid():
user = User.objects.get(pk=request.user.id)
logout(request)
user.delete()
return redirect('/')
else:
form = DeleteAccountForm()
return render(request, 'authentication/account_delete.html', {'form': form})
| gpl-3.0 | 177,147,796,472,188,670 | 40.907407 | 161 | 0.625522 | false |
pythoneasyway/python-class | class8.py | 1 | 1656 | #!/usr/bin/python
#: Title : class8.py
#: Date :
#: Author : [email protected]
#: Description : Class number 8
#: - exercises with lists
#: - adding steps into circle() to change the shape
#: Version : 1.0
# define 2 lists
even_list = list()
odd_list = list()
# we'll use the numbers from 1 to 1000
for i in range(1,1001):
# % is modulo, which is here the remainder of the division of number by 2
if i % 2 == 0:
# add the even number to the list
even_list.append(i)
else:
# add the odd number to the list
odd_list.append(i)
print "the odd numbers are ", odd_list
print "the even numbers are ", even_list
# import everything from the file colors_lib
from colors_lib import *
# print out the color_list defined in the previous imported module
print color_list
# total of colors
print "the total of colors is", len(color_list)
import turtle as t
t.showturtle()
total = len(color_list)
index = 1
t.up()
t.goto(0,-350)
t.down()
for i in color_list:
t.color(i)
if index <100:
# create first triangle
t.circle(index, steps = 3)
elif index<200:
# create the square
t.circle(index, steps = 4)
elif index<250:
# creae the pentagon
t.circle(index, steps = 5)
elif index<300:
# create the hexagon
t.circle(index, steps = 6)
elif index<350:
# last circle
t.circle(index)
else:
# change the background
t.bgcolor(i)
# print in the title the color's name and the number of the color.
t.title(i+" "+str(index))
t.speed(0)
index = index +1
# finish
t.done()
| mit | 3,465,932,965,838,154,000 | 23 | 77 | 0.618961 | false |
gdsfactory/gdsfactory | pp/components/extend_ports_list.py | 1 | 1197 | from typing import Any, Dict, List, Optional
from pp.cell import cell
from pp.component import Component
from pp.components.straight_heater import straight_with_heater
from pp.port import Port, auto_rename_ports
from pp.types import ComponentOrFactory
@cell
def extend_ports_list(
ports: List[Port],
extension_factory: ComponentOrFactory = straight_with_heater,
extension_settings: Optional[Dict[str, Any]] = None,
extension_port_name: str = "W0",
) -> Component:
"""Returns a component with extension to list of ports."""
c = Component()
extension_settings = extension_settings or {}
extension = (
extension_factory(**extension_settings)
if callable(extension_factory)
else extension_factory
)
for i, port in enumerate(ports):
extension_ref = c << extension
extension_ref.connect(extension_port_name, port)
for port_name, port in extension_ref.ports.items():
c.add_port(f"{i}_{port_name}", port=port)
auto_rename_ports(c)
return c
if __name__ == "__main__":
import pp
c = pp.c.mmi1x2()
cr = extend_ports_list(ports=c.get_ports_list())
c.add_ref(cr)
c.show()
| mit | 2,180,702,165,455,955,500 | 26.204545 | 65 | 0.663325 | false |
tksn/phoneauto | phoneauto/scriptgenerator/scriptgenerator_ui.py | 1 | 26121 | # -*- coding: utf-8 -*-
"""scriptgenerator GUI
:copyright: (c) 2015 by tksn
:license: MIT
"""
# pylint: disable=invalid-name
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-few-public-methods
from __future__ import unicode_literals, print_function
import contextlib
import logging
import math
import platform
import tkinter
import tkinter.font
from tkinter import ttk
import time
from PIL import Image, ImageTk, ImageDraw, ImageFont
from phoneauto.scriptgenerator.exception import (
UiInconsitencyError, UiObjectNotFound)
from phoneauto.scriptgenerator.screenrecord import Screenrecord
def get_filedialog(): # pragma: no cover
"""Returns filedialog module object
Returns appropriate filedialog module depending on sys.version.
The reason doing this is because python.future's tkinter.filedialog
is alias to FileDialog, not to tkFileDialog.
"""
import sys
if sys.version_info.major >= 3:
import tkinter.filedialog
return tkinter.filedialog
else:
import tkFileDialog
return tkFileDialog
@contextlib.contextmanager
def display_wait(root_window):
"""Displays wait icon while context is alive"""
root_window.config(cursor='wait')
root_window.update()
yield
root_window.config(cursor='')
class ScriptGeneratorUI(object):
"""Automation script generator UI"""
_SCR_REFRESH_INTERVAL = 100
_HVIEW_REFRESH_INTERVAL = 3
_HVIEW_REFRESH_INTERVAL_AFTER_SCR_REFRESH = 1
_MOUSE_MOVE_THRESH = 20
_CLICKCIRCLE_RADIUS = 5
def __init__(self,
screen_size=(480, 800),
platform_sys=None,
timeouts=None):
"""Initialization
Args:
scale (float):
magnification scale which is used when screenshot
is displayed in this UI
"""
self.logger = logging.getLogger(__name__)
self.logger.info('initialization start')
self._controller = None
self._scale = None
self._screenshot = None
self._mouse_action = None
self.hierarchy_view_timestamp = 0
timeouts = timeouts or {}
self._wait_timeouts = {}
default_timeouts = {
'idle': 5000, 'update': 1000, 'exists': 5000, 'gone': 5000}
for name, default_value in default_timeouts.items():
self._wait_timeouts[name] = timeouts.get(name, default_value)
self._hold_timer_id = None
self._root = None
self._platform = platform_sys or platform.system()
self._screenrecord = Screenrecord(
width=screen_size[0], height=screen_size[1])
self._build_ui()
self.logger.info('initialization end')
def run(self, controller):
"""Launches UI and enter the event loop
Args:
controller (object):
scriptgenerator object
"""
self._controller = controller
self._enable_ui()
try:
self._root.mainloop()
finally:
if self._screenrecord:
self._screenrecord.join()
self._screenrecord = None
def _build_ui(self):
"""Creates UI components and builds up application UI"""
from tkinter import N, W, E, S
self._root = tkinter.Tk()
self._root.title('phoneauto-scriptgenerator')
mainframe = ttk.Frame(self._root, name='mainframe')
mainframe.grid(row=0, column=0, sticky=(N, W, E, S))
canvas = self._create_canvas(mainframe)
canvas.grid(row=1, column=0, columnspan=3, sticky=(N, W, E, S))
back_button = ttk.Button(
mainframe, text='Back', name='back_button')
back_button.grid(row=2, column=0, sticky=(N, W, E, S))
home_button = ttk.Button(
mainframe, text='Home', name='home_button')
home_button.grid(row=2, column=1, sticky=(N, W, E, S))
recent_button = ttk.Button(
mainframe, text='Recent Apps', name='recent_button')
recent_button.grid(row=2, column=2, sticky=(N, W, E, S))
sidebar = ttk.Frame(self._root, name='sidebar')
sidebar.grid(row=0, column=1, sticky=(N, W, E, S))
self._build_sidebar(sidebar)
self._root.update()
def _create_canvas(self, parent):
"""Displays placeholder (Initializing message) screen
before actual screenshot is aquired
"""
from tkinter import NW
screencap = self._screenrecord.capture_oneshot()
placeholder_tk = ImageTk.PhotoImage(screencap)
canvas = tkinter.Canvas(parent,
width=screencap.width, height=screencap.height,
name='canvas')
image_id = canvas.create_image(0, 0, anchor=NW, image=placeholder_tk)
text = 'Initializing'
text_x, text_y = screencap.width / 2, screencap.height / 2
text_id = canvas.create_text(
text_x, text_y, text=text, fill='white',
font=('Courier', 32), tag='init_text')
bgrect_id = canvas.create_rectangle(
canvas.bbox(text_id), fill='black', tag='init_text_bg')
canvas.tag_lower(bgrect_id, text_id)
self._screenshot = {'image': placeholder_tk, 'id': image_id,
'size': screencap.size}
return canvas
@staticmethod
def _build_sidebar(sidebar):
"""Constructs side panel"""
def button(master, widget_options, pack_options=None):
"""Creates a button"""
pack_options = pack_options or {'fill': tkinter.X}
btn = ttk.Button(master, **widget_options)
btn.pack(**pack_options)
def label(master, widget_options, pack_options=None):
"""Creates a label"""
pack_options = (pack_options or
{'fill': tkinter.X, 'anchor': tkinter.NW})
btn = ttk.Label(master, **widget_options)
btn.pack(**pack_options)
def separator(master, widget_options, pack_options=None):
"""Creates a separator"""
pack_options = pack_options or {'fill': tkinter.X, 'pady': 5}
sep = ttk.Separator(master, **widget_options)
sep.pack(**pack_options)
button(sidebar, {'name': 'refresh_button', 'text': 'Refresh'})
button(sidebar, {'name': 'screenshot_button', 'text': 'Screenshot'})
separator(sidebar, {'orient': tkinter.HORIZONTAL})
button(sidebar, {'name': 'power_button', 'text': 'Power'})
button(sidebar,
{'name': 'notification_button', 'text': 'Notification'})
button(sidebar,
{'name': 'quicksettings_button', 'text': 'QuickSettings'})
button(sidebar, {'name': 'volume_up_button', 'text': 'Volume Up'})
button(sidebar, {'name': 'volume_down_button', 'text': 'Volume Down'})
label(sidebar, {'text': 'Orientation:'})
frm = ttk.Frame(sidebar, name='orientation_frame')
def orient_button(name, text):
"""Orientation button"""
button(frm, {'name': name, 'text': text, 'width': 2},
{'side': tkinter.LEFT})
orient_button('orientation_natural', 'N')
orient_button('orientation_left', 'L')
orient_button('orientation_right', 'R')
orient_button('orientation_upsidedown', 'U')
orient_button('orientation_unfreeze', 'Z')
frm.pack()
separator(sidebar, {'orient': tkinter.HORIZONTAL})
label(sidebar, {'text': 'Insert line to script:'})
button(sidebar,
{'name': 'ins_screenshot_cap',
'text': 'screenshot capture'})
button(sidebar,
{'name': 'ins_wait_idle', 'text': 'wait.idle'})
button(sidebar,
{'name': 'ins_wait_update', 'text': 'wait.update'})
separator(sidebar, {'orient': tkinter.HORIZONTAL})
text = tkinter.Text(sidebar, width=30, name='infotext')
text.pack(padx=3, pady=2)
def _enable_ui(self):
"""2nd phase initialization - activate UI"""
self._bind_commands_to_widgets()
self._acquire_hierarchy_view()
self._set_screen_scale()
self._screenrecord.start()
self._kick_video_update()
self._refresh_screen()
canvas = self._root.nametowidget('mainframe.canvas')
canvas.delete('init_text')
canvas.delete('init_text_bg')
def _bind_commands_to_widgets(self):
"""Initialization after controller became available"""
def bind_custom_command(widget_name, command):
self._root.nametowidget(widget_name).config(command=command)
def bind_command(widget_name, command_name, **command_kwargs):
bind_custom_command(widget_name,
self.__get_command_wrap(command_name,
**command_kwargs))
bind_command('mainframe.back_button', 'press_key',
key_name='BACK')
bind_command('mainframe.home_button', 'press_key',
key_name='HOME')
bind_command('mainframe.recent_button', 'press_key',
key_name='APP_SWITCH')
bind_custom_command('sidebar.refresh_button',
lambda _: self._acquire_hierarchy_view())
bind_custom_command('sidebar.screenshot_button',
self._take_screenshot)
bind_command('sidebar.power_button', 'press_key',
key_name='POWER')
bind_command('sidebar.notification_button',
'open_notification')
bind_command('sidebar.quicksettings_button',
'open_quick_settings')
bind_command('sidebar.volume_up_button', 'press_key',
key_name='VOLUME_UP')
bind_command('sidebar.volume_down_button', 'press_key',
key_name='VOLUME_DOWN')
bind_command('sidebar.orientation_frame.orientation_natural',
'set_orientation', orientation='natural')
bind_command('sidebar.orientation_frame.orientation_left',
'set_orientation', orientation='left')
bind_command('sidebar.orientation_frame.orientation_right',
'set_orientation', orientation='right')
bind_command(
'sidebar.orientation_frame.orientation_upsidedown',
'set_orientation', orientation='upsidedown')
bind_command('sidebar.orientation_frame.orientation_unfreeze',
'set_orientation', orientation='unfreeze')
bind_command('sidebar.ins_screenshot_cap',
'insert_screenshot_capture')
bind_command('sidebar.ins_wait_idle', 'insert_wait',
for_what='idle', timeout=self._wait_timeouts['idle'])
bind_command('sidebar.ins_wait_update', 'insert_wait',
for_what='update',
timeout=self._wait_timeouts['update'])
canvas = self._root.nametowidget('mainframe.canvas')
canvas.bind('<Motion>', self._on_mouse_motion)
canvas.bind('<Leave>', self._on_mouse_leave)
canvas.bind('<Button-1>', self._on_mouse_left_down)
canvas.bind('<ButtonRelease-1>', self._on_mouse_left_up)
canvas.bind('<B1-Motion>', self._on_mouse_b1motion)
rbutton_events = (
('<Button-2>', '<ButtonRelease-2>', '<B2-Motion>')
if self._platform == 'Darwin'
else ('<Button-3>', '<ButtonRelease-3>', '<B3-Motion>'))
canvas.bind(rbutton_events[0], self._on_mouse_right_down)
canvas.bind(rbutton_events[1], self._on_mouse_right_up)
canvas.bind(rbutton_events[2], self._on_mouse_b1motion)
def _kick_video_update(self):
"""Workaround: Some movements on the device's screen are needed
in order to pull up first few frames from the device..
"""
self._screenrecord.kick()
def _refresh_hierarchy_view(self, screen_refreshed):
if self._controller is None:
return
interval = (self._HVIEW_REFRESH_INTERVAL_AFTER_SCR_REFRESH
if screen_refreshed else self._HVIEW_REFRESH_INTERVAL)
hierarchy_view_age = time.time() - self.hierarchy_view_timestamp
if hierarchy_view_age > interval:
self._acquire_hierarchy_view()
def _refresh_screen(self):
from tkinter import NW
frame = None
while not self._screenrecord.queue.empty():
frame = self._screenrecord.queue.get_nowait()
hierarchy_view_age = time.time() - self.hierarchy_view_timestamp
if frame:
disp_frame = ImageTk.PhotoImage(frame)
canvas = self._root.nametowidget('mainframe.canvas')
canvas.delete(self._screenshot['id'])
canvas.config(width=self._screenrecord.width,
height=self._screenrecord.height)
all_other_items = canvas.find_all()
image_id = canvas.create_image(0, 0, anchor=NW, image=disp_frame)
if all_other_items:
canvas.tag_lower(image_id, all_other_items[0])
self._screenshot = {'image': disp_frame, 'id': image_id}
self._refresh_hierarchy_view(frame)
self._root.after(self._SCR_REFRESH_INTERVAL, self._refresh_screen)
def _acquire_hierarchy_view(self):
"""Acquires screenshot from the device, and place it on the UI's canvas
Returns:
Tkinter.Canvas: canvas object
"""
self._controller.execute('update_view_dump')
self.hierarchy_view_timestamp = time.time()
def _set_screen_scale(self):
"""Sets screen scale information"""
self._scale = self._screenrecord.get_scale()
def _descale(self, coord):
"""Converts a coordinate from canvas-coordinats to
device-screen-coorinates
Args:
coord (tuple): coordinats (x, y)
"""
return int(coord[0] / self._scale[0]), int(coord[1] / self._scale[1])
def _on_mouse_leave(self, event):
"""Callback for mouse leave event
Args:
event (object): event information which is passed by Tk framework
"""
canvas = self._root.nametowidget('mainframe.canvas')
canvas.delete('object_rect')
def _on_mouse_motion(self, event):
"""Callback for mouse motion event
Args:
event (object): event information which is passed by Tk framework
"""
canvas = self._root.nametowidget('mainframe.canvas')
canvas.delete('object_rect')
text = self._root.nametowidget('sidebar.infotext')
text.delete(1.0, tkinter.END)
command_args = {'start': self._descale((event.x, event.y))}
obj_info = self._controller.execute(
'get_hierarchy_view_object_info', command_args)
if obj_info:
bounds = obj_info['visibleBounds']
def scale(coord):
"""Scale coordinates from actual screen -> view"""
return (
int(coord[0] * self._scale[0]),
int(coord[1] * self._scale[1]))
xy0 = scale((bounds['left'], bounds['top']))
xy1 = scale((bounds['right'], bounds['bottom']))
canvas.create_rectangle(
xy0[0], xy0[1], xy1[0], xy1[1],
outline='red', width=2, tag='object_rect')
for k, v in obj_info.items():
v = v or '-'
text.insert(tkinter.END, '{0}: {1}\n'.format(k, v))
def _on_mouse_b1motion(self, event):
"""Callback for left-button motion event
Args:
event (object): event information which is passed by Tk framework
"""
self._mouse_action['current'] = event.x, event.y
self._draw_mouse_action()
def _mouse_moved(self):
"""Queries if mouse is moved"""
xS, yS = self._mouse_action['start']
xC, yC = self._mouse_action['current']
return math.hypot(xC - xS, yC - yS) > self._MOUSE_MOVE_THRESH
def _draw_mouse_action(self, erase=False):
"""Draws mouse action (swipe, drag, etc) on the screen"""
canvas = self._root.nametowidget('mainframe.canvas')
canvas.delete('down_point')
canvas.delete('move_line')
if erase:
return
xS, yS = self._mouse_action['start']
xC, yC = self._mouse_action['current']
color = ('blue' if self._mouse_action['left_or_right'] == 'left'
else 'yellow')
fill = color
canvas.create_line(xS, yS, xC, yC,
fill=color, width=2, tag='move_line')
def oval_coords(radius):
"""Returns oval coordinates"""
tl = tuple(p - radius for p in (xS, yS))
br = tuple(p + radius for p in (xS, yS))
return (tl[0], tl[1], br[0], br[1])
canvas.create_oval(*oval_coords(self._CLICKCIRCLE_RADIUS),
outline=color, fill=fill, tag='down_point')
def _on_mouse_left_down(self, event):
"""Callback for mouse left-button-down event
Args:
event (object): event information which is passed by Tk framework
"""
x, y = event.x, event.y
self._mouse_action = {
'start': (x, y),
'current': (x, y),
'left_or_right': 'left'
}
self._draw_mouse_action()
def _on_mouse_left_up(self, event):
"""Callback for left-button-up event
Args:
event (object): Event information which is passed by Tk framework
"""
cur = event.x, event.y
self._mouse_action['current'] = cur
if self._mouse_moved():
self._left_2point_action_menu(cur)
else:
self._left_1point_action_menu(cur)
self._draw_mouse_action(erase=True)
def _on_mouse_right_down(self, event):
"""Callback for mouse right-button-down event
Args:
event (object): event information which is passed by Tk framework
"""
x, y = event.x, event.y
self._mouse_action = {
'start': (x, y),
'current': (x, y),
'left_or_right': 'right'
}
self._draw_mouse_action()
def _on_mouse_right_up(self, event):
"""Callback for right-button-up event
Args:
event (object): Event information which is passed by Tk framework
"""
cur = event.x, event.y
self._mouse_action['current'] = cur
if self._mouse_moved():
self._right_2point_action_menu(cur)
else:
self._right_1point_action_menu(cur)
self._draw_mouse_action(erase=True)
def __get_command_wrap(self, command_name, **aditional_args):
"""Returns wrapped controller command"""
command_args = dict(aditional_args)
if self._mouse_action:
command_args['start'] = self._descale(self._mouse_action['start'])
command_args['end'] = self._descale(self._mouse_action['current'])
def command_wrap():
"""controller command execution"""
try:
with display_wait(self._root):
retval = self._controller.execute(
command_name, command_args)
return retval
except (UiObjectNotFound, UiInconsitencyError):
self._acquire_hierarchy_view()
return command_wrap
def _left_1point_action_menu(self, position):
"""Displays 1-point left-click menu"""
menu = tkinter.Menu(self._root, name='menu')
menu.add_command(
label='Click(xy)',
command=self.__get_command_wrap('click_xy'))
menu.add_command(
label='Long click(xy)',
command=self.__get_command_wrap('long_click_xy'))
menu.post(*position)
def _left_2point_action_menu(self, position):
"""Displays 2-points left-click menu"""
menu = tkinter.Menu(self._root, name='menu')
menu.add_command(
label='Swipe(xy -> xy)',
command=self.__get_command_wrap('swipe_xy_to_xy',
options={'steps': 10}))
menu.add_command(
label='Drag(xy -> xy)',
command=self.__get_command_wrap('drag_xy_to_xy'))
menu.add_command(
label='Drag(object -> xy)',
command=self.__get_command_wrap('drag_object_to_xy'))
menu.add_command(
label='Fling',
command=self.__get_command_wrap('fling'))
menu.add_command(
label='Scroll',
command=self.__get_command_wrap('scroll'))
menu.post(*position)
def _right_1point_action_menu(self, position):
"""Displays 1-point right-click menu"""
menu = tkinter.Menu(self._root, name='menu')
menu.add_command(
label='Click(object)',
command=self.__get_command_wrap('click_object'))
menu.add_command(
label='Click(object) and wait',
command=self.__get_command_wrap(
'click_object', wait=self._wait_timeouts['update']))
menu.add_command(
label='Long click(object)',
command=self.__get_command_wrap('long_click_object'))
menu.add_command(
label='Clear text',
command=self.__get_command_wrap('clear_text'))
menu.add_command(
label='Enter text',
command=lambda: self._text_action(
'enter_text', lambda text: {'text': text}))
menu.add_command(label='Pinch in', command=lambda: self._pinch('In'))
menu.add_command(label='Pinch out', command=lambda: self._pinch('Out'))
menu.add_separator()
menu.add_command(
label='Insert wait-exists',
command=self.__get_command_wrap(
'insert_wait_object',
for_what='exists', timeout=self._wait_timeouts['exists']))
menu.add_command(
label='Insert wait-gone',
command=self.__get_command_wrap(
'insert_wait_object',
for_what='gone', timeout=self._wait_timeouts['gone']))
menu.post(*position)
def _right_2point_action_menu(self, position):
"""Displays 2-points right-click menu"""
menu = tkinter.Menu(self._root, name='menu')
menu.add_command(
label='Swipe(object + direction)',
command=self.__get_command_wrap('swipe_object_with_direction'))
menu.add_command(
label='Drag(object -> object)',
command=self.__get_command_wrap('drag_object_to_object'))
menu.add_command(
label='Fling to end',
command=self.__get_command_wrap('fling_to_end'))
menu.add_command(
label='Scroll to end',
command=self.__get_command_wrap('scroll_to_end'))
menu.add_command(
label='Scroll to text',
command=lambda: self._text_action(
'scroll_to', lambda text: {'options': {'text': text}}))
menu.post(*position)
def _text_action(self, command_name, command_kwargs_gen):
"""Callback for Enter text event"""
from tkinter import NW
# Create a dialog on the canvas
canvas = self._root.nametowidget('mainframe.canvas')
top = tkinter.Toplevel(canvas, name='textentrywindow')
# Place a TextEntry on the dialog
entry = ttk.Entry(top, name='textentry')
entry.grid(row=0, column=0, sticky=NW)
def on_ok():
"""Callback for ok-click"""
text = entry.get()
top.destroy()
self._root.after(
0, self.__get_command_wrap(command_name,
**command_kwargs_gen(text)))
# Place a OK button on the dialog
ok_button = ttk.Button(top, text='OK', command=on_ok, name='ok_button')
ok_button.grid(row=0, column=1, sticky=NW)
canvas.wait_window(top)
def _pinch(self, in_or_out):
"""Pinch-in/out event handler implementation"""
from tkinter import NW, SE, StringVar
# Create a dialog on the canvas
canvas = self._root.nametowidget('mainframe.canvas')
top = tkinter.Toplevel(canvas, name='pinchwindow')
# Place a TextEntry on the dialog
pinch_label_text = 'Pinch {0}:'.format(in_or_out)
lebel0 = ttk.Label(top, text=pinch_label_text, name='pinchlabel')
lebel0.grid(row=0, column=0, sticky=NW)
slider = ttk.Scale(top, value=1.0, name='pinchinslider')
slider.grid(row=0, column=1, sticky=NW)
lebel1 = ttk.Label(top, text='Steps:', name='steplabel')
lebel1.grid(row=1, column=0, sticky=NW)
stepsStr = StringVar(value='10')
entry = ttk.Entry(top, textvariable=stepsStr, name='steps')
entry.grid(row=1, column=1, sticky=NW)
def on_ok():
"""Callback for ok-click"""
percent = int(slider.get() * 100)
steps = int(stepsStr.get())
top.destroy()
self._root.after(0, self.__get_command_wrap(
'pinch',
in_or_out=in_or_out,
options={
'percent': percent,
'steps': steps
}))
# Place a OK button on the dialog
ok_button = ttk.Button(top, text='OK', command=on_ok, name='ok_button')
ok_button.grid(row=0, column=2, rowspan=2, sticky=(NW, SE))
canvas.wait_window(top)
def _take_screenshot(self):
"""Callback for Take Screenshot"""
filename = get_filedialog().asksaveasfilename(defaultextension='.png')
if not filename:
return
with display_wait(self._root):
scr = self._controller.execute('get_screenshot')
scr.save(filename)
| mit | -2,263,941,709,933,397,800 | 37.526549 | 79 | 0.567015 | false |
b3orn/mania | mania/compiler.py | 1 | 3056 | # -*- coding: utf-8 -*-
'''
mania.compiler
~~~~~~~~~~~~~~
:copyright: (c) 2014 by Björn Schulz.
:license: MIT, see LICENSE for more details.
'''
from __future__ import absolute_import
import logging
import io
import mania.types
import mania.instructions
logger = logging.getLogger(__name__)
class Placeholder(object):
def __init__(self, instruction):
self.instruction = instruction
class Builder(object):
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
self.constants = [name]
self.instructions = []
@property
def module(self):
return mania.types.Module(
name=self.name,
entry_point=self.entry_point,
constants=self.constants,
instructions=self.instructions
)
def constant(self, value):
if value in self.constants:
return self.constants.index(value)
self.constants.append(value)
return len(self.constants) - 1
def index(self):
return len(self.instructions)
def add(self, instruction):
index = self.index()
self.instructions.append(instruction)
return index
def replace(self, index, instruction):
self.instructions[index] = instruction
class Compiler(object):
def __init__(self, name=None):
self.name = name or mania.types.Symbol('')
self.builder = Builder(self.name, 0)
def compile(self, code):
raise NotImplementedError('"eval" needs to be implemented in subclasses')
class SimpleCompiler(Compiler):
def compile(self, module):
for element in module:
self.compile_any(element)
self.builder.add(mania.instructions.Eval())
self.builder.add(mania.instructions.Exit())
return self.builder.module
def compile_any(self, code):
if isinstance(code, mania.types.Pair):
self.compile_pair(code)
elif isinstance(code, mania.types.Quoted):
self.compile_quoted(code)
elif isinstance(code, mania.types.Quasiquoted):
self.compile_quasiquoted(code)
elif isinstance(code, mania.types.Unquoted):
self.compile_unquoted(code)
else:
self.compile_constant(code)
def compile_pair(self, code):
self.compile_any(code.head)
self.compile_any(code.tail)
self.builder.add(mania.instructions.BuildPair())
def compile_quoted(self, code):
self.compile_any(code.value)
self.builder.add(mania.instructions.BuildQuoted())
def compile_quasiquoted(self, code):
self.compile_any(code.value)
self.builder.add(mania.instructions.BuildQuasiquoted())
def compile_unquoted(self, code):
self.compile_any(code.value)
self.builder.add(mania.instructions.BuildUnquoted())
def compile_constant(self, code):
index = self.builder.constant(code)
self.builder.add(mania.instructions.LoadConstant(index))
| mit | -7,002,074,155,497,473,000 | 22.867188 | 81 | 0.629787 | false |
tshi04/machine-learning-codes | GAN-tf-ff/dc_gen.py | 1 | 2717 | import re
import math
import numpy as np
import tensorflow as tf
import tensorflow.contrib as tc
from utils import *
class generator(object):
def __init__(self, data_name='MNIST'):
self.data_name = data_name
def __call__(self, input_data, img_shape, reuse=False, name='generator'):
if self.data_name == 'MNIST':
self.img_shape = img_shape
[fh, fw, fd] = img_shape
[fh2, fw2, fd2] = [int(fh/2), int(fw/2), 64]
[fh4, fw4, fd4] = [int(fh2/2), int(fw2/2), 128]
[batch_size, in_shape] = np.array(input_data.shape, dtype='int').tolist()
with tf.variable_scope(name) as self.gs:
if reuse:
self.gs.reuse_variables()
h_fc1, w_fc1, b_fc1 = linear(input_data, fh4*fw4*fd4, name='dfc1')
dconv1 = tf.reshape(h_fc1, [-1, fh4, fw4, fd4])
dconv1 = leakyrelu(dconv1, name='dconv1')
dconv2, w_dconv2, b_dconv2 = dconv2d(dconv1, [batch_size, fh2, fw2, fd2], name='dconv2')
dconv2 = leakyrelu(dconv2)
dconv3, w_dconv3, b_dconv3 = dconv2d(dconv2, [batch_size, fh, fw, fd], name='dconv3')
dconv3 = tf.nn.tanh(dconv3)
return dconv3
if self.data_name == 'CIFAR-100':
self.img_shape = img_shape
[fh, fw, fd] = img_shape
[fh2, fw2, fd2] = [int(fh/2), int(fw/2), 64]
[fh4, fw4, fd4] = [int(fh2/2), int(fw2/2), fd2*2]
[fh8, fw8, fd8] = [int(fh4/2), int(fw4/2), fd4*2]
[batch_size, in_shape] = np.array(input_data.shape, dtype='int').tolist()
with tf.variable_scope(name) as self.gs:
if reuse:
self.gs.reuse_variables()
h_fc1, w_fc1, b_fc1 = linear(input_data, fh8*fw8*fd8, name='dfc1')
dconv1 = tf.reshape(h_fc1, [-1, fh8, fw8, fd8])
dconv1 = leakyrelu(batch_norm(dconv1, name='dc1'), name='dconv1')
dconv2, w_dconv2, b_dconv2 = dconv2d(dconv1, [batch_size, fh4, fw4, fd4], name='dconv2')
dconv2 = leakyrelu(batch_norm(dconv2, name='dc2'))
dconv3, w_dconv3, b_dconv3 = dconv2d(dconv2, [batch_size, fh2, fw2, fd2], name='dconv3')
dconv3 = leakyrelu(batch_norm(dconv3, name='dc3'))
dconv4, w_dconv4, b_dconv4 = dconv2d(dconv3, [batch_size, fh, fw, fd], name='dconv4')
dconv4 = tf.nn.tanh(dconv4)
return dconv4
@property
def vars(self):
return tf.contrib.framework.get_variables(self.gs)
| gpl-3.0 | 7,319,559,567,595,346,000 | 35.716216 | 104 | 0.517483 | false |
cbitterfield/JobCard | archive/videoinfo.py | 1 | 4627 | '''
Created on Sep 30, 2017
@author: colin
'''
#===============================================================================
# Import
#===============================================================================
import os
import subprocess
from string import Template
import logging
logger = logging.getLogger(__name__)
#===============================================================================
# Setup Commnands for Produce and Exists
#===============================================================================
CMD_PRODUCE = ''
CMD_EXISTS = ''
#===============================================================================
# Module Global Variables
#===============================================================================
MESSAGE = ''
ERROR = ''
NEWLINE = '\n'
Error = False
def main (source, output, component, jobcard, config, noexec):
#===============================================================================
# Module Global Variables
#===============================================================================
CURL=config['locations']['curl']
CONVERT=config['locations']['convert']
FFMPEG=config['locations']['ffmpeg']
FFPROBE=config['locations']['ffprobe']
MOGRIFY=config['locations']['mogrify']
FONT=config['boxcover']['font']
logger.debug("CURL = " + CURL)
logger.debug("CONVERT = " + CONVERT)
logger.debug("FFMPEG = " + FFMPEG)
logger.debug("FFPROBE = " + FFPROBE)
logger.debug("MOGRIFY = " + MOGRIFY)
logger.debug("FONT = " + FONT)
logger.info("Produce - Module Main - Start")
# Start Code here
logger.info("Produce - Module Main - Start")
return(Error)
def produce(source, output, component, jobcard, config, noexec):
logger.info("Produce - Start")
FFPROBE=config['locations']['ffprobe']
projectno = jobcard['clipinfo']['projectno']
edgeid = jobcard['clipinfo']['edgeid']
prime_dubya = jobcard['clipinfo']['prime_dubya']
video = source + "/" + jobcard['video']['src']
videoName = os.path.basename(video)
pathName = os.path.dirname( source + "/" + video)
set_src = source + "/" + jobcard[component]['src']
set_dir = jobcard[component]['out_dir']
set_width = jobcard[component]['set_width']
set_height = jobcard[component]['set_height']
if jobcard[component]['suffix'] == None:
set_suffix = "__"
else:
set_suffix = jobcard[component]['suffix']
set_ext = jobcard[component]['ext']
set_timed = jobcard[component]['timed']
set_name = jobcard[component]['name']
Error = False
logger.info("\tGetting Information about video:" + video )
logger.info("\tDestination: "+ destination)
destination = output + "/" + projectno + "/" + prime_dubya + "/" + edgeid
# Create Directories if needed
if not os.path.isdir(destination + "/" + set_name + "/" + set_dir) and not noexec:
os.makedirs(destination + "/" + set_name + "/" + set_dir,0777)
logger.info("Creating Directory: " + destination + "/" + set_name + "/" + set_dir)
else:
logger.info("Creating Directory: " + destination + "/" + set_name + "/" + set_dir)
for format in ['csv', 'json', 'xml']:
logger.info("Output Video Information in " + format )
logger.info("Output Location:\n" + pathName)
CMD = FFPROBE + " -v error -show_format -show_streams -print_format " + format + " '" + video + "' > '" + destination +"/" + edgeid + "-info" +"."+ format +"'"
logger.warning("\tCommand:\n\t" + CMD)
if noexec:
logger.warning("No execute")
else:
result = subprocess.Popen(CMD, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdoutdata, stderrdata = result.communicate()
status = result.returncode
if not status == 0:
Error = True
logger.error("Command Error Code: " + status)
else:
logger.info("Return Code Successul")
logger.info("Produce - End")
return(Error)
def exists(source, output, component, jobcard, config, noexec):
logger.info("Exists - Start")
logger.error("Not Valid")
Error = True
logger.info("Exists - End")
return(Error)
def ignore(source, output, component, jobcard, config, noexec):
logger.info("Ignore - Start")
logger.info("Ignore - End")
return(Error) | gpl-3.0 | 2,473,814,790,306,609,000 | 31.363636 | 172 | 0.497947 | false |
Ginkooo/i3lock-multiimage | tests/reader.py | 1 | 1407 | # Copyright (C) 2017 Piotr Czajka <[email protected]>
# Author: Piotr Czajka <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import tempfile
from unittest import TestCase, mock
from src.images.reader import Reader
class ReaderTests(TestCase):
"""Reader class tests"""
def setUp(self):
filename = 'exaplefilename'
self.tmpdir = tempfile.gettempdir()
self.path = os.path.join(self.tmpdir, filename)
with open(self.path, 'w'):
pass
def tearDown(self):
os.remove(self.path)
@mock.patch('src.images.image.Image')
def test_can_return_images_iter(self, Image):
images = Reader.get_images(self.tmpdir)
self.assertTrue(images)
self.assertEqual('Image', next(images).__class__.__name__)
| gpl-3.0 | 6,423,110,217,609,996,000 | 33.317073 | 71 | 0.705046 | false |
tmhorne/celtx | extensions/python/dom/nsdom/domcompile.py | 1 | 4935 | # ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is mozilla.org code
#
# The Initial Developer of the Original Code is mozilla.org.
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Hammond: initial author
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
# A utility for compiling Python code, using features not available via
# the builtin compile.
#
# (a) It is not possible to compile the body of a Python function, without the
# function declaration. ie, 'return None' will always give a syntax error when
# passed to compile.
# (b) It is very tricky to compile code with the line-number starting at
# anything other than zero.
#
# Both of these are solved by this module, which uses the 'compiler' module
# XXX - sad side-effect is that Unicode is not correctly supported -
# PyCF_SOURCE_IS_UTF8 is not exposed via compiler (in 2.3 at least)
# On the upside here, all 'src' params are unicode today, so expansion here
# requires no interface changes.
from compiler import parse, syntax, compile
from compiler.pycodegen import ModuleCodeGenerator
import compiler.ast
import new
def _fix_src(src):
# windows first - \r\n -> \n, then for mac, remaining \r -> \n
# Trailing whitespace can cause problems - make sure a final '\n' exists.
return src.replace("\r\n", "\n").replace("\r", "\n") + "\n"
# from compiler.misc.set_filename - but we also adjust lineno attributes.
def set_filename_and_offset(filename, offset, tree):
"""Set the filename attribute to filename on every node in tree"""
worklist = [tree]
while worklist:
node = worklist.pop(0)
node.filename = filename
if node.lineno is not None:
node.lineno += offset
worklist.extend(node.getChildNodes())
def parse_function(src, func_name, arg_names, defaults=[]):
tree = parse(src, "exec")
defaults = [compiler.ast.Const(d) for d in defaults]
# Insert a Stmt with function object.
try:
decs = compiler.ast.Decorators([])
except AttributeError:
# 2.3 has no such concept (and different args!)
func = compiler.ast.Function(func_name, arg_names, defaults, 0, None,
tree.node)
else:
# 2.4 and later
func = compiler.ast.Function(decs, func_name, arg_names, defaults, 0, None,
tree.node)
stmt = compiler.ast.Stmt((func,))
tree.node = stmt
syntax.check(tree)
return tree
def compile_function(src, filename, func_name, arg_names, defaults=[],
# more args to come...
lineno=0):
assert filename, "filename is required"
try:
tree = parse_function(_fix_src(src), func_name, arg_names, defaults)
except SyntaxError, err:
err.lineno += lineno
err.filename = filename
raise SyntaxError, err
set_filename_and_offset(filename, lineno, tree)
gen = ModuleCodeGenerator(tree)
return gen.getCode()
# And a 'standard' compile, but with the filename offset feature.
def compile(src, filename, mode='exec', flags=None, dont_inherit=None, lineno=0):
if flags is not None or dont_inherit is not None or mode != 'exec':
raise RuntimeError, "not implemented yet"
try:
tree = parse(_fix_src(src), mode)
except SyntaxError, err:
err.lineno += lineno
err.filename = filename
raise SyntaxError, err
set_filename_and_offset(filename, lineno, tree)
gen = ModuleCodeGenerator(tree)
return gen.getCode()
| mpl-2.0 | 78,361,717,112,699,170 | 39.45082 | 83 | 0.684904 | false |
x5zone/Mynote | bet365/test.py | 1 | 5052 | #!/usr/bin/python
# coding:utf-8
import gzip
import re
import time
import redis
from urllib2 import urlopen
import urlparse
import bs4
r = redis.Redis(host='localhost',port=6379,db=0)
def save2redis(match_id,key,value):
r.hset(match_id,key,value)
def getfromredis(match_id):
return r.hgetall(match_id)
def main():
#for match_id in range(86000,86001):
for match_id in range(66481,99999):
if match_id % 3 == 0:
time.sleep(0.1)
if get_pool_result(match_id):
get_fb_match_hhad(match_id)
def get_pool_result(match_id):
base_url = "http://info.sporttery.cn/football/pool_result.php?id="
base_url = "%s%d" % (base_url,match_id)
print base_url
html = urlopen(base_url).read()
html = html.decode('gbk')
#print dir(html),type(html)
soup = bs4.BeautifulSoup(html, "html.parser")
match_begin = re.sub(r'[^- :0-9]', "", soup.find_all('span',class_='Centers')[0].contents[0]).strip()
if match_begin == '':
return False
match_begin = time.mktime(time.strptime(match_begin,"%Y-%m-%d %H:%M"))
if time.time()-7200 < match_begin:
print "last match_id %d" % match_id
exit()
#return False
#比赛结果
for i,tag in enumerate(soup.find_all('tr',class_='Tr3 Tr_normal')):
if i == 0:
continue
for j,x in enumerate(tag.find_all('td')):
if j != 4:
continue
bifen = re.sub(r'[^:0-9]', "", str(x))
if bifen == "":
return False
else:
save2redis(match_id,'result', bifen)
print match_id, " : ", bifen
break
return True
#总进球数
"""
keys = []
for i,tag in enumerate(soup.find_all('tr',class_='Tr3 Tr_normal bg')):
if i != 2:
continue
for j,x in enumerate(tag.find_all('td')):
print j,type(x),x.contents[0]
if j == 0:
keys.append('scoresnum_time')
else:
keys.append(x.contents[0])
"""
def get_fb_match_hhad(match_id):
"""
获取竞彩胜平负赔率,彩民支持率,让球胜平负赔率数据
match_id,彩民投票支持率胜,平,负,误差值,竞彩胜平负固定赔率胜,平,负,胜率,平率,负率,发布日期,发布时间
"""
base_url = "http://info.sporttery.cn/football/info/fb_match_hhad.php?m="
base_url = "%s%d" % (base_url,match_id)
print base_url
html = urlopen(base_url).read()
html = html.decode('gbk')
soup = bs4.BeautifulSoup(html, "html.parser")
tag = soup.find_all('div',class_='floatR font12')
odds = []
"""
for x in tag:
for i,y in enumerate(x):
if i == 0:
continue
if y.__class__ == bs4.element.Tag:
for z in y:
print re.sub(r'\D', "", z)
else:
print re.sub(r'\D', "", y)
if i == 4:
break
# 让球赔率忽略
for i,tag in enumerate(soup.find_all('tr',class_='Tr3')):
print "odds:",i
for j,x in enumerate(tag.find_all('td')):
if x.__class__ == bs4.element.Tag:
for y in x:
print re.sub(r'[^.:0-9]',"",str(y))
else:
print re.sub(r'[^.:0-9]',"",x)
if j == 5:
break
#if i == 6:
#break
"""
# 99家赔率
for i,tag in enumerate(soup.find_all('tr',class_='Tr33')):
#print "odds:",i
key = 'null'
for j,x in enumerate(tag.find_all('td')):
#print j,x
if j == 1:
key = x.contents[0]
if type(key) == bs4.element.Tag:
key = key.contents[0]
if type(key) == bs4.element.Tag:
key = key.contents[0]
if j < 2:
continue
#print j,x
if x.__class__ == bs4.element.Tag:
for y in x:
if type(y) == bs4.element.Tag:
y = y.contents[0]
value = re.sub(r'[^.:0-9]',"",y)
print key+str(j),value
save2redis(match_id,key+str(j), value)
break
else:
value = re.sub(r'[^.:0-9]',"",x)
print key+str(j),value
save2redis(match_id,key+str(j), value)
if (i<3 and j == 10) or j == 13:
break
if __name__ == '__main__':
main()
exit()
for i in (86000,87000,87001,87002):
get_pool_result(i)
#num = get_fb_match_hhad(i)
#print "results:",num
exit()
#print html
soup = bs4.BeautifulSoup(html, "html.parser")
baijia = soup.find("",{"class":"Tr33"})
exp1 = re.compile("(?isu)<tr[^>]*>(.*?)</tr>")
h = re.findall(r'<td[^>]*><a[^>]*>(.*?)</a></td>', baijia, re.I|re.M)
print h
#for dd in soup.select('.searchResult tr') if dd.contents[1].name != 'th':
| unlicense | 2,712,315,846,516,340,700 | 30.779221 | 105 | 0.483449 | false |
avian2/ec3k | ec3k.py | 1 | 12641 | """Software receiver for EnergyCount 3000
Copyright (C) 2015 Tomaz Solc <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from gnuradio import digital
from gnuradio import gr, blocks, filter, analog
import itertools
import math
import os.path
import osmosdr
import select
import signal
import subprocess
import tempfile
import threading
import time
def which(program):
for path in os.environ["PATH"].split(os.pathsep):
fpath = os.path.join(path, program)
if os.path.isfile(fpath) and os.access(fpath, os.X_OK):
return fpath
return None
class InvalidPacket(Exception): pass
class EnergyCount3KState:
"""EnergyCount 3000 transmitter state.
This object contains fields contained in a single radio
packet:
id -- 16-bit ID of the device
time_total -- time in seconds since last reset
time_on -- time in seconds since last reset with non-zero device power
energy -- total energy in Ws (watt-seconds)
power_current -- current device power in watts
power_max -- maximum device power in watts (reset at unknown intervals)
reset_counter -- total number of transmitter resets
device_on_flag -- true if device is currently drawing non-zero power
timestamp -- UNIX timestamp of the packet reception (not accurate)
"""
CRC = 0xf0b8
def __init__(self, hex_bytes):
bits = self._get_bits(hex_bytes)
bits = [ not bit for bit in bits ]
bits = self._descrambler([18, 17, 13, 12, 1], bits)
bits = [ not bit for bit in bits ]
bits = self._bit_unstuff(bits)
bits = self._bit_shuffle(bits)
nibbles = self._get_nibbles(bits)
self._check_crc(nibbles)
self._decode_packet(nibbles)
def _get_bits(self, hex_bytes):
"""Unpacks hex printed data into individual bits"""
bits = []
for hex_byte in hex_bytes:
i = int(hex_byte, 16)
for n in xrange(8):
bits.append(bool((i<<n) & 0x80))
return bits
def _get_nibbles(self, bits):
"""Shift bits into bytes, MSB first"""
nibbles = [0] * (len(bits) / 4)
for n, bit in enumerate(bits):
nibbles[n/4] |= (int(bit) << (3-n%4))
return nibbles
def _bit_shuffle(self, bits):
"""Weird bit shuffling operation required"""
nbits = []
# first, invert byte bit order
args = [iter(bits)] * 8
for bit_group in itertools.izip_longest(fillvalue=False, *args):
nbits += reversed(bit_group)
return nbits
def _descrambler(self, taps, bits):
"""Multiplicative, self-synchronizing scrambler"""
nbits = []
state = [ False ] * max(taps)
for bit in bits:
out = bit
for tap in taps:
out = out ^ state[tap-1]
nbits.append(out)
state = [ bit ] + state[:-1]
return nbits
def _bit_unstuff(self, bits):
"""Bit stuffing reversal.
6 consecutive 1s serve as a packet start/stop condition.
In the packet, one zero is stuffed after 5 consecutive 1s
"""
nbits = []
start = False
cnt = 0
for n, bit in enumerate(bits):
if bit:
cnt += 1
if start:
nbits.append(bit)
else:
if cnt < 5:
if start:
nbits.append(bit)
elif cnt == 5:
pass
elif cnt == 6:
start = not start
else:
raise InvalidPacket("Wrong bit stuffing: %d concecutive ones" % cnt)
cnt = 0
return nbits
def _crc_ccitt_update(self, crc, data):
assert data >= 0
assert data < 0x100
assert crc >= 0
assert crc <= 0x10000
data ^= crc & 0xff
data ^= (data << 4) & 0xff
return ((data << 8) | (crc >> 8)) ^ (data >> 4) ^ (data << 3)
def _check_crc(self, nibbles):
if len(nibbles) != 84:
raise InvalidPacket("Wrong length: %d" % len(nibbles))
crc = 0xffff
for i in xrange(0, 82, 2):
crc = self._crc_ccitt_update(crc, nibbles[i] * 0x10 + nibbles[i+1])
if crc != self.CRC:
raise InvalidPacket("CRC mismatch: %d != %d" % (crc, self.CRC))
def _unpack_int(self, nibbles):
i = 0
for nibble in nibbles:
i = (i * 0x10) + nibble
return i
def _decode_packet(self, nibbles):
start_mark = self._unpack_int( nibbles[ 0: 1])
if start_mark != 0x9:
raise InvalidPacket("Unknown start mark: 0x%x (please report this)" % (start_mark,))
self.id = self._unpack_int( nibbles[ 1: 5])
time_total_low = nibbles[ 5: 9]
pad_1 = self._unpack_int( nibbles[ 9:13])
time_on_low = nibbles[13:17]
pad_2 = self._unpack_int( nibbles[17:24])
energy_low = nibbles[24:31]
self.power_current = self._unpack_int( nibbles[31:35]) / 10.0
self.power_max = self._unpack_int( nibbles[35:39]) / 10.0
# unknown? (seems to be used for internal calculations)
self.energy_2 = self._unpack_int( nibbles[39:45])
# nibbles[45:59]
time_total_high = nibbles[59:62]
pad_3 = self._unpack_int( nibbles[62:67])
energy_high = nibbles[67:71]
time_on_high = nibbles[71:74]
self.reset_counter = self._unpack_int( nibbles[74:76])
flags = self._unpack_int( nibbles[76:77])
pad_4 = self._unpack_int( nibbles[77:78])
# crc = self._unpack_int( nibbles[78:82])
# We don't really care about the end mark, or whether it got
# corrupted, since it's not covered by the CRC check.
#end_mark = self._unpack_int( nibbles[82:84])
#if end_mark != 0x7e:
# raise InvalidPacket("Invalid end mark: %d" % (end_mark,))
if pad_1 != 0:
raise InvalidPacket("Padding 1 not zero: 0x%x (please report this)" % (pad_1,))
if pad_2 != 0:
raise InvalidPacket("Padding 2 not zero: 0x%x (please report this)" % (pad_2,))
if pad_3 != 0:
raise InvalidPacket("Padding 3 not zero: 0x%x (please report this)" % (pad_3,))
if pad_4 != 0:
raise InvalidPacket("Padding 4 not zero: 0x%x (please report this)" % (pad_4,))
self.timestamp = time.time()
self.time_total = self._unpack_int(time_total_high + time_total_low)
self.time_on = self._unpack_int(time_on_high + time_on_low)
self.energy = self._unpack_int(energy_high + energy_low)
if flags == 0x8:
self.device_on_flag = True
elif flags == 0x0:
self.device_on_flag = False
else:
raise InvalidPacket("Unknown flag value: 0x%x (please report this)" % (flags,))
# Set properties for compatibility with older ec3k module versions
self.uptime = self.time_total
self.since_reset = self.time_on
self.energy_1 = self.energy
self.current_power = self.power_current
self.max_power = self.power_max
def __str__(self):
if self.device_on_flag:
flag = '*'
else:
flag = ' '
return ("id : %04x\n"
"time total : %d seconds\n"
"time on %s : %d seconds\n"
"energy %s : %d Ws\n"
"power current : %.1f W\n"
"power max : %.1f W\n"
"reset counter : %d") % (
self.id,
self.time_total,
flag, self.time_on,
flag, self.energy,
self.power_current,
self.power_max,
self.reset_counter)
class EnergyCount3K:
"""Object representing EnergyCount 3000 receiver"""
def __init__(self, id=None, callback=None, freq=868.402e6, device=0, osmosdr_args=None):
"""Create a new EnergyCount3K object
Takes the following optional keyword arguments:
id -- ID of the device to monitor
callback -- callable to call for each received packet
freq -- central frequency of the channel on which to listen for
updates (default is known to work for European devices)
device -- rtl-sdr device to use
osmosdr_args -- any additional OsmoSDR arguments (e.g. "offset_tune=1")
If ID is None, then packets for all devices will be received.
callback should be a function of a callable object that takes
one EnergyCount3KState object as its argument.
"""
self.id = id
self.callback = callback
self.freq = freq
self.device = device
self.osmosdr_args = osmosdr_args
self.want_stop = True
self.state = None
self.noise_level = -90
def start(self):
"""Start the receiver"""
assert self.want_stop
self.want_stop = False
self.threads = []
self._start_capture()
capture_thread = threading.Thread(target=self._capture_thread)
capture_thread.start()
self.threads.append(capture_thread)
self._setup_top_block()
self.tb.start()
def stop(self):
"""Stop the receiver and clean up"""
assert not self.want_stop
self.want_stop = True
for thread in self.threads:
thread.join()
self.tb.stop()
self.tb.wait()
self._clean_capture()
def get(self):
"""Get the last received state
Returns data from the last received packet as a
EnergyCount3KState object.
"""
return self.state
def _log(self, msg):
"""Override this method to capture debug information"""
pass
def _start_capture(self):
self.tempdir = tempfile.mkdtemp()
self.pipe = os.path.join(self.tempdir, "ec3k.pipe")
os.mkfifo(self.pipe)
self.capture_process = None
try:
for program in ["capture", "capture.py"]:
fpath = which(program)
if fpath is not None:
self.capture_process = subprocess.Popen(
[fpath, "-f", self.pipe],
bufsize=1,
stdout=subprocess.PIPE)
return
raise Exception("Can't find capture binary in PATH")
except:
self._clean_capture()
raise
def _clean_capture(self):
if self.capture_process:
self.capture_process.send_signal(signal.SIGTERM)
self.capture_process.wait()
self.capture_process = None
os.unlink(self.pipe)
os.rmdir(self.tempdir)
def _capture_thread(self):
while not self.want_stop:
rlist, wlist, xlist = select.select([self.capture_process.stdout], [], [], 1)
if rlist:
line = rlist[0].readline()
fields = line.split()
if fields and (fields[0] == 'data'):
self._log("Decoding packet")
try:
state = EnergyCount3KState(fields[1:])
except InvalidPacket, e:
self._log("Invalid packet: %s" % (e,))
continue
if (not self.id) or (state.id == self.id):
self.state = state
if self.callback:
self.callback(self.state)
def _noise_probe_thread(self):
while not self.want_stop:
power = self.noise_probe.level()
self.noise_level = 10 * math.log10(max(1e-9, power))
self._log("Current noise level: %.1f dB" % (self.noise_level,))
self.squelch.set_threshold(self.noise_level+7.0)
time.sleep(1.0)
def _setup_top_block(self):
self.tb = gr.top_block()
samp_rate = 96000
oversample = 10
# Radio receiver, initial downsampling
args = "rtl=%d,buffers=16" % (self.device,)
if self.osmosdr_args:
args += ",%s" % (self.osmosdr_args,)
osmosdr_source = osmosdr.source(args=args)
osmosdr_source.set_sample_rate(samp_rate*oversample)
osmosdr_source.set_center_freq(self.freq, 0)
osmosdr_source.set_freq_corr(0, 0)
osmosdr_source.set_gain_mode(True, 0)
osmosdr_source.set_gain(0, 0)
taps = filter.firdes.low_pass(1, samp_rate*oversample, 90e3, 8e3,
filter.firdes.WIN_HAMMING, 6.76)
low_pass_filter = filter.fir_filter_ccf(oversample, taps)
self.tb.connect((osmosdr_source, 0), (low_pass_filter, 0))
# Squelch
self.noise_probe = analog.probe_avg_mag_sqrd_c(0, 1.0/samp_rate/1e2)
self.squelch = analog.simple_squelch_cc(self.noise_level, 1)
noise_probe_thread = threading.Thread(target=self._noise_probe_thread)
noise_probe_thread.start()
self.threads.append(noise_probe_thread)
self.tb.connect((low_pass_filter, 0), (self.noise_probe, 0))
self.tb.connect((low_pass_filter, 0), (self.squelch, 0))
# FM demodulation
quadrature_demod = analog.quadrature_demod_cf(1)
self.tb.connect((self.squelch, 0), (quadrature_demod, 0))
# Binary slicing, transformation into capture-compatible format
add_offset = blocks.add_const_vff((-1e-3, ))
binary_slicer = digital.binary_slicer_fb()
char_to_float = blocks.char_to_float(1, 1)
multiply_const = blocks.multiply_const_vff((255, ))
float_to_uchar = blocks.float_to_uchar()
pipe_sink = blocks.file_sink(gr.sizeof_char*1, self.pipe)
pipe_sink.set_unbuffered(False)
self.tb.connect((quadrature_demod, 0), (add_offset, 0))
self.tb.connect((add_offset, 0), (binary_slicer, 0))
self.tb.connect((binary_slicer, 0), (char_to_float, 0))
self.tb.connect((char_to_float, 0), (multiply_const, 0))
self.tb.connect((multiply_const, 0), (float_to_uchar, 0))
self.tb.connect((float_to_uchar, 0), (pipe_sink, 0))
| gpl-3.0 | 8,677,844,192,735,593,000 | 26.126609 | 89 | 0.665849 | false |
deepmind/dm_alchemy | dm_alchemy/symbolic_alchemy_wrapper.py | 1 | 7573 | # Lint as: python3
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Wrapper for a 3d alchemy to keep a symbolic alchemy in sync."""
from dm_alchemy import get_meta_data
from dm_alchemy import symbolic_alchemy
from dm_alchemy.types import event_unpacking
from dm_alchemy.types import stones_and_potions
from dm_alchemy.types import unity_python_conversion
from dm_alchemy.types import utils
import dm_env
def _add_to_obs(obs, to_add, name):
if isinstance(obs, tuple):
return obs + (to_add,)
if isinstance(obs, dict):
obs[name] = to_add
return obs
if isinstance(obs, list):
return obs + [to_add]
# If it is not already a tuple, dict or list, then make it a tuple.
return obs, to_add
class SymbolicAlchemyWrapper(dm_env.Environment):
"""Take a 3d alchemy environment and keep a symbolic env in sync with it."""
def __init__(
self, env3d, level_name, see_chemistries=None,
see_symbolic_observation=False):
self.env3d = env3d
value_coefficients, value_offset, _, bonus, _ = get_meta_data.to_meta_data(
level_name)
reward_weights = stones_and_potions.RewardWeights(
coefficients=value_coefficients, offset=value_offset, bonus=bonus)
self.env_symbolic = symbolic_alchemy.SymbolicAlchemy(
chemistry_gen=lambda: self.chemistry,
reward_weights=reward_weights,
items_gen=lambda unused_trial_number: self.items,
num_trials=10,
see_chemistries=see_chemistries,
observe_used=True,
)
self.items = utils.TrialItems(stones=[], potions=[])
self._perceived_stones = []
self._perceived_potions = []
self.chemistry = None
self.see_symbolic_observation = see_symbolic_observation
self._trial_in_progress = False
self._trial_has_started = False
def process_step_events(self, events):
for event in events:
if 'TrialEnded' in event.name:
self._trial_has_started = False
self.items = utils.TrialItems(stones=[], potions=[])
self._perceived_stones = []
self._perceived_potions = []
elif 'TrialStarted' in event.name:
self._trial_has_started = True
# At this point we should have all stones and potions and the chemistry.
aligned_stones = [
stones_and_potions.align(stone, self.chemistry.rotation)
for stone, _ in self._perceived_stones]
latent_stones = [self.chemistry.stone_map.apply(stone)
for stone in aligned_stones]
stones = [
stones_and_potions.Stone(i, stone.latent_coords)
for (_, i), stone in zip(self._perceived_stones, latent_stones)]
latent_potions = [self.chemistry.potion_map.apply(potion)
for potion, _ in self._perceived_potions]
potions = [
stones_and_potions.Potion(i, potion.latent_dim, potion.latent_dir)
for (_, i), potion in zip(self._perceived_potions, latent_potions)]
self.items = utils.TrialItems(stones=stones, potions=potions)
# When we get an event saying that the new trial has started in the 3d
# version it should be safe to end the previous trial in the symbolic
# version.
if self._trial_in_progress:
self.env_symbolic.end_trial()
if self.env_symbolic.is_last_step():
self.env_symbolic.reset()
# Once the first trial is started there is always a trial in progress
# from then on.
self._trial_in_progress = True
elif 'PotionUsed' in event.name:
potion_inst_id, stone_inst_id = event_unpacking.unpack_potion_used(
event)
stone_ind = self.env_symbolic.game_state.get_stone_ind(
stone_inst=stone_inst_id)
potion_ind = self.env_symbolic.game_state.get_potion_ind(
potion_inst=potion_inst_id)
# Take an action putting the stone in the potion.
self.env_symbolic.step_slot_based_action(utils.SlotBasedAction(
stone_ind=stone_ind, potion_ind=potion_ind))
elif 'StoneUsed' in event.name:
stone_inst_id = event_unpacking.unpack_stone_used(event)
stone_ind = self.env_symbolic.game_state.get_stone_ind(
stone_inst=stone_inst_id)
# Take an action putting the stone in the cauldron.
self.env_symbolic.step_slot_based_action(utils.SlotBasedAction(
stone_ind=stone_ind, cauldron=True))
elif 'ChemistryCreated' in event.name:
chem, rot = event_unpacking.unpack_chemistry_and_rotation(event)
self.chemistry = unity_python_conversion.from_unity_chemistry(chem, rot)
else:
potions = event_unpacking.get_potions([event])
stones = event_unpacking.get_stones([event])
if (potions or stones) and self._trial_has_started:
self.items = utils.TrialItems(stones=[], potions=[])
self._perceived_stones = []
self._perceived_potions = []
self._trial_has_started = False
self._perceived_potions.extend(potions)
self._perceived_stones.extend(stones)
def step(self, action) -> dm_env.TimeStep:
timestep = self.env3d.step(action)
# If a symbolic action has occurred take the action in the symbolic
# environment.
self.process_step_events(self.env3d.events())
return self.add_observations(timestep)
def reset(self) -> dm_env.TimeStep:
timestep = self.env3d.reset()
self.items = utils.TrialItems(stones=[], potions=[])
self._perceived_stones = []
self._perceived_potions = []
self._trial_has_started = False
self.process_step_events(self.env3d.events())
return self.add_observations(timestep)
def add_observations(self, timestep):
new_observation = timestep.observation
symbolic_observation = self.env_symbolic.observation()
if self.see_symbolic_observation:
new_observation = _add_to_obs(
new_observation, symbolic_observation['symbolic_obs'], 'symbolic_obs')
for name in self.env_symbolic.see_chemistries.keys():
new_observation = _add_to_obs(
new_observation, symbolic_observation[name], name)
return dm_env.TimeStep(
step_type=timestep.step_type, reward=timestep.reward,
discount=timestep.discount, observation=new_observation)
def observation_spec(self):
obs_spec = self.env3d.observation_spec()
if self.see_symbolic_observation:
symbolic_obs = self.env_symbolic.observation_spec()['symbolic_obs']
obs_spec = _add_to_obs(obs_spec, symbolic_obs, 'symbolic_obs')
for name in self.env_symbolic.see_chemistries.keys():
chem_obs_spec = self.env_symbolic.observation_spec()[name]
obs_spec = _add_to_obs(obs_spec, chem_obs_spec, name)
return obs_spec
def action_spec(self):
return self.env3d.action_spec()
# Forward other attribute lookups to the 3d environment.
def __getattr__(self, name):
return getattr(self.env3d, name)
| apache-2.0 | 2,939,894,921,043,146,000 | 41.785311 | 80 | 0.663938 | false |
miguelfrde/openautomata | openautomata/regex.py | 1 | 3734 |
from automata import *
from collections import defaultdict
OR = '|'
CLOSURE = '*'
POS_CLOSURE = '+'
WILD_CARD = '.'
SYMBOLS = (')', '(', OR, CLOSURE, POS_CLOSURE)
def balanced_parenthesis(txt):
count = 0
for c in txt:
if c == '(': count += 1
if c == ')': count -= 1
if count < 0: return False
return count == 0
class RegularExpression:
def __init__(self, regex_str):
if not balanced_parenthesis(regex_str):
raise Exception("Parenthesis not balanced.")
self.regex = '(' + regex_str + ')'
self.nfa = None
self.dfa = DFA.from_nfa(self.__get_nfa())
self.dfa.minimize()
def __get_nfa(self):
"Regular Expression to NFA"
alphabet = set(c for c in self.regex if c not in SYMBOLS)
nfa = NFA(alphabet)
nfa.set_initial(0)
nfa.add_final(len(self.regex) - 1)
stack = list()
N = len(self.regex)
for i, c in enumerate(self.regex):
ind = i
if c in alphabet:
nfa.add_transition(i, i + 1, c)
elif c == '(':
nfa.add_transition(i, i + 1, EPSILON)
stack.append(i)
elif c == ')':
nfa.add_transition(i, i + 1, EPSILON)
ind = stack.pop()
tmplist = list()
# Adds a transition between every or and the closing parenthesis
while self.regex[ind] == OR:
tmplist.append(ind)
nfa.add_transition(ind, i, EPSILON)
ind = stack.pop()
# Adds a transition between the opening parenthesis and every or
for n in tmplist:
nfa.add_transition(ind, n + 1, EPSILON)
elif c == OR:
stack.append(i)
elif c in (CLOSURE, POS_CLOSURE):
nfa.add_transition(i, i + 1, EPSILON)
if i < N - 1 and self.regex[i + 1] in (CLOSURE, POS_CLOSURE):
if self.regex[i + 1] == CLOSURE:
nfa.add_transition(ind, i + 1, EPSILON)
nfa.add_transition(i + 1, ind, EPSILON)
nfa.states.remove(N)
nfa.transition = defaultdict(set, [(k, v) for k, v in nfa.transition.iteritems()
if N not in v])
return nfa
def __str__(self):
return self.regex[1:-1]
def matches(self, text):
"Match the regular expression against the text"
state = self.dfa.initial_state
for i, letter in enumerate(text):
try:
state = self.dfa.get_transition(state, letter)
except SymbolNotInAlphabetError:
return (False, i)
result = any(map(lambda s: s in state, (f for f in self.dfa.final_states)))
return (result, len(text))
def search(self, text):
"Search for all matches of a regular expression in a text"
current_states = list()
result = list()
for i, c in enumerate(text):
current_states.append((i, {self.dfa.initial_state}))
new_states = list()
for c in set([WILD_CARD, c]):
if c not in self.dfa.alphabet: continue
for initial, s in current_states:
t = self.dfa.get_transition(s, c)
if not t: continue
new_states.append((initial, t))
if self.dfa.contains_final(t):
yield (initial, i, text[initial:i+1])
current_states = new_states
if __name__ == '__main__':
r = RegularExpression("a.e")
print list(r.search("ade"))
| mit | -8,601,334,346,309,791,000 | 34.226415 | 88 | 0.502678 | false |
openlawlibrary/pygls | tests/lsp/test_code_action.py | 1 | 4987 | ############################################################################
# Copyright(c) Open Law Library. All rights reserved. #
# See ThirdPartyNotices.txt in the project root for additional notices. #
# #
# Licensed under the Apache License, Version 2.0 (the "License") #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http: // www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
import unittest
from typing import List, Optional, Union
from pygls.lsp.methods import CODE_ACTION
from pygls.lsp.types import (CodeAction, CodeActionContext, CodeActionKind, CodeActionOptions,
CodeActionParams, Command, Diagnostic, Position, Range,
TextDocumentIdentifier)
from ..conftest import CALL_TIMEOUT, ClientServer
class TestCodeAction(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.client_server = ClientServer()
cls.client, cls.server = cls.client_server
@cls.server.feature(
CODE_ACTION,
CodeActionOptions(code_action_kinds=[CodeActionKind.Refactor])
)
def f(params: CodeActionParams) -> Optional[List[Union[Command, CodeAction]]]:
if params.text_document.uri == 'file://return.list':
return [
CodeAction(title='action1'),
CodeAction(title='action2', kind=CodeActionKind.Refactor),
Command(title='cmd1', command='cmd1', arguments=[1, 'two']),
]
else:
return None
cls.client_server.start()
@classmethod
def tearDownClass(cls):
cls.client_server.stop()
def test_capabilities(self):
capabilities = self.server.server_capabilities
assert capabilities.code_action_provider
assert capabilities.code_action_provider.code_action_kinds == [CodeActionKind.Refactor]
def test_code_action_return_list(self):
response = self.client.lsp.send_request(
CODE_ACTION,
CodeActionParams(
text_document=TextDocumentIdentifier(uri='file://return.list'),
range=Range(
start=Position(line=0, character=0),
end=Position(line=1, character=1),
),
context=CodeActionContext(
diagnostics=[
Diagnostic(
range=Range(
start=Position(line=0, character=0),
end=Position(line=1, character=1),
),
message='diagnostic'
)
],
only=[CodeActionKind.Refactor]
)
)
).result(timeout=CALL_TIMEOUT)
assert response[0]['title'] == 'action1'
assert response[1]['title'] == 'action2'
assert response[1]['kind'] == CodeActionKind.Refactor
assert response[2]['title'] == 'cmd1'
assert response[2]['command'] == 'cmd1'
assert response[2]['arguments'] == [1, 'two']
def test_code_action_return_none(self):
response = self.client.lsp.send_request(
CODE_ACTION,
CodeActionParams(
text_document=TextDocumentIdentifier(uri='file://return.none'),
range=Range(
start=Position(line=0, character=0),
end=Position(line=1, character=1),
),
context=CodeActionContext(
diagnostics=[
Diagnostic(
range=Range(
start=Position(line=0, character=0),
end=Position(line=1, character=1),
),
message='diagnostic',
)
],
only=[CodeActionKind.Refactor],
)
)
).result(timeout=CALL_TIMEOUT)
assert response is None
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 7,890,839,024,633,215,000 | 40.907563 | 95 | 0.487868 | false |
SimFre/EasyMoney | EasyMoney.py | 1 | 2677 | #!/usr/local/bin/python3
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from ImportBase import ImportBase
from DbConnection import DbConnection
from Ui_MainWindow import Ui_MainWindow
from Control_MainWindow import Control_MainWindow
if __name__ == '__main__':
#dbFile = "/Users/laban/Documents/Ekonomi/Transactions.db"
dbFile = "/home/laban/Documents/Ekonomi/Transactions.db"
with DbConnection(dbFile, False) as db:
print("Database:", dbFile)
# Initiate data class
ib = ImportBase(db)
app = QtWidgets.QApplication(sys.argv)
window = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(window)
ctl = Control_MainWindow(ui, ib)
window.show()
app.exec_()
# SAS Eurobonus Mastercard
# inputFilename = "/Users/laban/Documents/Ekonomi/SAS Eurobonus Mastercard/"
# card = "Fredriksson Simon (nnnnnn******nnnn)"
# ib.importFile(inputFilename + "Kontoutdrag-201405.xlsx", card)
# ib.importFile(inputFilename + "Kontoutdrag-201406.xlsx", card)
# ib.importFile(inputFilename + "Kontoutdrag-201407.xlsx", card)
# ib.importFile(inputFilename + "Kontoutdrag-201408.xlsx", card)
# ib.importFile(inputFilename + "Kontoutdrag-201409.xlsx", card)
# ib.importFile(inputFilename + "Kontoutdrag-201410.xlsx", card)
# ib.importFile(inputFilename + "Kontoutdrag-201411.xlsx", card)
# ib.importFile(inputFilename + "Kontoutdrag-201412.xlsx", card)
# ib.importFile(inputFilename + "Kontoutdrag-201501.xlsx", card)
# ib.importFile(inputFilename + "Kontoutdrag-201502.xlsx", card)
# Diners
# inputFilename = "/Users/laban/Documents/Ekonomi/Diners Club/"
# card = "Diners Club"
# ib.importFile(inputFilename + "Diners20140618.xls", card)
# ib.importFile(inputFilename + "Diners20140721.xls", card)
# ib.importFile(inputFilename + "Diners20140819.xls", card)
# ib.importFile(inputFilename + "Diners20140918.xls", card)
# ib.importFile(inputFilename + "Diners20141021.xls", card)
# ib.importFile(inputFilename + "Diners20141118.xls", card)
# ib.importFile(inputFilename + "Diners20141218.xls", card)
# ib.importFile(inputFilename + "Diners20150120.xls", card)
# ib.importFile(inputFilename + "Diners20150217.xls", card)
# Swedbank
# inputFilename = "/Users/laban/Documents/Ekonomi/Swedbank/Swedbank_20140530-20150304.txt"
# card = "Privatkonto (nnnnn.nnnnnnnnnn)"
# codepage = "utf8"
# ib.importFile(inputFilename, card, codepage)
| gpl-2.0 | 3,995,125,351,226,500,000 | 44.372881 | 98 | 0.661188 | false |
hans-t/sg-bus-arrival | app/test_bus_stop.py | 1 | 3031 | import unittest
import unittest.mock as mock
import bus_stop
patcher = mock.patch('bus_stop.requests')
requests_mock = patcher.start()
patcher = mock.patch('bus_stop.redis')
redis_mock = patcher.start()
class Response:
def __init__(self, data, status_code):
self.data = data
self.status_code = status_code
def json(self):
return self.data
class TestGetArrivalTime(unittest.TestCase):
def setUp(self):
patcher = mock.patch('bus_stop.datetime')
self.addCleanup(patcher.stop)
self.datetime_mock = patcher.start()
self.now = '2016-03-07T13:09:53.373022'
self.datetime_mock.utcnow.return_value.isoformat.return_value = self.now
def test_200_returns_json_data(self):
json_data = {
'BusStopID': '83139',
'Services': [
{"ServiceNo": "15"},
],
}
requests_mock.get.return_value = Response(data=json_data, status_code=200)
output = bus_stop.get_arrival_time('83139')
json_data['currentTime'] = self.now
self.assertEqual(output, json_data)
def test_non_200_returns_empty_services(self):
# obsolete
requests_mock.get.return_value = Response(data={'Services': []}, status_code=500)
output = bus_stop.get_arrival_time('83140')
self.assertEqual(output, None)
def test_non_200_returns_placeholder(self):
requests_mock.get.return_value = Response(data={'Services': []}, status_code=500)
output = bus_stop.get_arrival_time('83140')
self.assertEqual(output, {'placeholder': None})
class TestGetInfo(unittest.TestCase):
def setUp(self):
patcher = mock.patch('bus_stop.get_attributes')
self.addCleanup(patcher.stop)
self.get_attributes_mock = patcher.start()
patcher = mock.patch('bus_stop.get_arrival_time')
self.addCleanup(patcher.stop)
self.get_arrival_time_mock = patcher.start()
def test_valid_bus_stop_returns_arrival_data_with_bus_stop_details(self):
arrival_data = {
'BusStopID': '83279',
'Services': [
{"ServiceNo": "15"},
],
}
self.get_arrival_time_mock.return_value = arrival_data
self.get_attributes_mock.return_value = {'Road': 'Tuas Ave 7', 'Description': 'Opp Blk 37'}
output = bus_stop.get_info('83219')
arrival_data['Road'] = 'Tuas Ave 7'
arrival_data['Description'] = 'Opp Blk 37'
self.assertEqual(output, arrival_data)
def test_invalid_bus_stop_returns_empty_services(self):
# obsolete
self.get_attributes_mock.return_value = {}
output = bus_stop.get_info('83279')
self.assertEqual(output, {'Services': []})
def test_invalid_bus_stop_returns_placeholder(self):
self.get_attributes_mock.return_value = {}
output = bus_stop.get_info('83279')
self.assertEqual(output, {'placeholder': None})
if __name__ == '__main__':
unittest.main() | mit | 5,705,633,543,433,465,000 | 31.956522 | 99 | 0.615968 | false |
zhaofengli/refill | backend/refill/models/context.py | 1 | 6166 | from concurrent.futures import ThreadPoolExecutor
from importlib import import_module
from ..utils import Utils
from uuid import uuid1
import mwparserfromhell
import celery.utils.log
import logging
import re
class Context:
def __init__(self):
"""Initialize the context
Note:
This does not depend on Celery. If no Celery task is attached,
Celery-related methods are noop.
"""
self._task = None
self._page = None
self.preferences = {}
self.changes = []
self.errors = []
self.transforms = []
self.transformMetadata = {}
self.currentTransform = None
self.currentTransformIndex = 0
self.wikicode = None
self.origWikicode = ''
self.uuid = str(uuid1())
self.executor = ThreadPoolExecutor(max_workers=10)
self.getLogger = logging.getLogger
self.logging = self.getLogger('refill')
def attachTask(self, task):
"""Attach a Celery Task object
"""
self._task = task
self.getLogger = celery.utils.log.get_logger
self.logging = self.getLogger('refill')
def attachPage(self, page):
"""Attach a pywikibot page
"""
self._page = page
def setPreferences(self, preferences):
"""Set user preferences
"""
self.preferences = preferences
def getPreference(self, preference: str, default: str = None):
"""Get user preference
"""
return self.preferences.get(preference, default)
def applyTransforms(self, wikicode: str):
"""Apply scheduled transforms on the wikicode
"""
self.wikicode = mwparserfromhell.parse(Utils.protectMarkers(wikicode, self.uuid))
self.origWikicode = wikicode
for index, transform in enumerate(self.transforms):
self.currentTransform = transform
self.currentTransformIndex = index
self._updateState()
transform.apply(self.wikicode)
def getResult(self):
"""Get the final result as Celery metadata
"""
return self._generateTaskMetadata()
def getPage(self):
"""Get the associated pywikibot Page object
"""
if self._page:
return self._page
return False
def getDateFormat(self):
"""Get the preferred date format of the page
"""
page = self.getPage()
if not page:
return False
lang = page.site.lang
userPreference = self.getPreference('dateFormat', {}).get(lang, False)
if not self.wikicode:
return userPreference
if lang == 'en':
try:
hint = next(self.wikicode.ifilter_templates(
recursive=False,
matches=lambda e: re.match(r'^(U|u)se (mdy|dmy) dates$', str(e.name)),
))
except StopIteration:
return userPreference
return 'mdy' if 'mdy' in str(hint.name) else 'dmy'
return userPreference
def reportProgress(self, state: str, percentage: float, metadata: dict):
"""Report progress of the current transform
"""
name = self.currentTransform.__class__.__name__
self.transformMetadata[name] = {
'state': state,
'percentage': percentage,
'metadata': metadata,
}
self._updateState()
def reportChange(self, change: dict):
"""Report a change to the wikicode by the current transform
"""
change['transform'] = self.currentTransform.__class__.__name__
self.changes.append(change)
return len(self.changes) - 1
def reportError(self, error: dict):
"""Report an error encountered during the current transform
"""
error['transform'] = self.currentTransform.__class__.__name__
self.errors.append(error)
return len(self.errors) - 1
def _updateState(self):
"""Actually send our state to Celery
"""
if self._task:
self._task.update_state(state='PROGRESS', meta=self._generateTaskMetadata())
def _generateTaskMetadata(self):
"""Generate task metadata for Celery
"""
# Generate percentage
name = self.currentTransform.__class__.__name__
ind = self.currentTransformIndex
if name in self.transformMetadata and \
'percentage' in self.transformMetadata[name]:
ind += self.transformMetadata[name]['percentage']
percentage = ind / len(self.transforms)
# Generate partial wikicode
wikicode = str(self.wikicode) if self.wikicode else ''
# Generate wiki page information
if self._page:
site = self._page.site
family = site.family
wikipage = {
'fam': family.name,
'code': site.code,
'lang': site.lang,
'page': self._page.title(),
'upage': self._page.title(underscore=True),
'domain': site.hostname(),
'path': site.path(),
'protocol': site.protocol(),
'editTime': self._page.editTime().totimestampformat(),
'startTime': site.getcurrenttimestamp(),
}
else:
wikipage = {}
cleanWikicode = Utils.unprotectMarkers(Utils.unmarkWikicode(wikicode), self.uuid)
markedWikicode = Utils.unprotectMarkers(wikicode, self.uuid)
return {
'overall': {
'percentage': percentage,
'currentTransform': self.currentTransform.__class__.__name__,
'currentTransformIndex': self.currentTransformIndex,
'totalTransforms': len(self.transforms),
},
'transforms': self.transformMetadata,
'changes': self.changes,
'errors': self.errors,
'wikicode': cleanWikicode,
'markedWikicode': markedWikicode,
'origWikicode': self.origWikicode,
'wikipage': wikipage,
}
| bsd-2-clause | 8,985,644,120,945,626,000 | 31.452632 | 90 | 0.572494 | false |
e-koch/VLA_Lband | 14B-088/HI/imaging/imaging_tests/HI_testing_channel_clean.py | 1 | 2992 |
import sys
import numpy as np
import os
from tasks import clean, feather
'''
Cleans an MS with a single channel given a mask and a model
'''
default("clean")
major, minor, revision = casadef.casa_version.split('.')
casa_version = 100 * int(major) + 10 * int(minor) + int(revision)
vis = sys.argv[-6]
model = sys.argv[-5]
mask = sys.argv[-4]
use_all_fields = True if sys.argv[-3] == "T" else False
use_multiscale = True if sys.argv[-2] == "T" else False
use_tclean = True if sys.argv[-1] == "T" else False
if model == "None":
model = None
if mask == "None":
mask = None
if use_tclean:
if casa_version < 450:
raise Warning("tclean only works for CASA versions above 4.5.")
if use_all_fields:
field = 'M33*'
else:
# Drop M33_3, the incorrect pointing.
field = ",".join(["M33_{}".format(i) for i in range(1, 15)
if i not in [3, 7]]) + ", M33_7_center"
if use_multiscale:
multiscale = [0, 4, 8, 20, 40, 80]
# Different progression based on # pixels within a beam
# multiscale = list(np.array([0, 1, 3, 9, 27, 81]) * 4)
# multiscale = list(np.array([0, 2, 5]) * 4)
else:
multiscale = []
out_root = "{0}.CASAVer_{1}.Model_{2}.Mask_{3}.AllFields_{4}.MScale_{5}" \
".Tclean_{6}".format(vis[:-3],
casa_version,
"T" if model is not None else "F",
"T" if mask is not None else "F",
"T" if use_all_fields else "F",
"T" if use_multiscale else "F",
"T" if use_tclean else "F")
if use_tclean:
from tasks import tclean
tclean(vis=vis, imagename=out_root + '.clean', field=field,
restfreq='1420.40575177MHz', specmode='cube', nchan=1,
start=1, width=1, cell='3arcsec', scales=multiscale,
niter=200000, threshold="1.8mJy/bm", gain=0.1, imsize=[2560, 2560],
gridder='mosaic', weighting='natural', veltype='radio', pblimit=0.2,
interpolation='linear', startmodel=model, usemask='user', mask=mask,
phasecenter='J2000 01h33m50.904 +30d39m35.79',
)
else:
clean(vis=vis, imagename=out_root + '.clean', field=field,
restfreq='1420.40575177MHz',
mode='channel', width=1, nchan=1, start=1,
cell='3arcsec', multiscale=multiscale,
threshold='1.8mJy/beam', imagermode='mosaic', gain=0.1,
imsize=[2560, 2560], weighting='natural', robust=0.0, niter=200000,
pbcor=True, minpb=0.2, interpolation='linear', usescratch=False,
phasecenter='J2000 01h33m50.904 +30d39m35.79', veltype='radio',
modelimage=model, mask=mask)
# Run feathering with the model
if model is not None:
if os.path.exists(out_root + ".clean.image"):
feather(imagename=out_root + ".clean.image.feathered",
highres=out_root + ".clean.image",
lowres=model)
| mit | -1,046,448,077,323,866,400 | 34.2 | 79 | 0.573529 | false |
our-city-app/oca-backend | src/rogerthat/web_client/pages/web_client.py | 1 | 2973 | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.5@@
import threading
from datetime import datetime
import webapp2
from dateutil.relativedelta import relativedelta
from webapp2 import Request, Response
from rogerthat.templates import get_language_from_request
from rogerthat.web_client.models import COOKIE_KEY, WebClientSession, SESSION_EXPIRE_TIME
class CurrentRequest(threading.local):
def __init__(self):
self.session = None # type: WebClientSession
def set_session(self, session):
self.session = session
def get_session(self):
return self.session
_current_request = CurrentRequest()
del CurrentRequest
def get_current_web_session():
# type: () -> WebClientSession
return _current_request.get_session()
class WebRequestHandler(webapp2.RequestHandler):
session = None # type: WebClientSession
def get(self, *args, **kwargs):
session = handle_web_request(self.request, self.response)
_current_request.set_session(session)
self.response.set_cookie(COOKIE_KEY, str(session.id), max_age=SESSION_EXPIRE_TIME, httponly=True)
def get_language(self):
session = get_current_web_session()
return session.language if session else get_language_from_request(self.request)
def handle_web_request(request, response):
# type: (Request, Response) -> WebClientSession
cookie = request.cookies.get(COOKIE_KEY)
now = datetime.now()
web_session = None
should_save = False
if cookie:
try:
session_id = long(cookie)
web_session = WebClientSession.create_key(session_id).get()
# Only update the session once per day
if web_session and now > (web_session.last_use_date + relativedelta(days=1)):
web_session.last_use_date = now
should_save = True
except ValueError:
# Cookie is not an integer/long
pass
language = get_language_from_request(request)
if not web_session:
web_session = WebClientSession(last_use_date=now, language=language)
should_save = True
if web_session.language != language:
web_session.language = language
should_save = True
if should_save:
web_session.put()
response.set_cookie(COOKIE_KEY, str(web_session.id), max_age=SESSION_EXPIRE_TIME, httponly=True)
return web_session
| apache-2.0 | -7,659,937,712,498,835,000 | 32.404494 | 105 | 0.690885 | false |
quantmind/lux | tests/mail/test_smtp.py | 1 | 1139 | from lux.utils import test
from lux.ext.smtp import EmailBackend
class SmtpTest(test.AppTestCase):
config_file = 'tests.mail'
@classmethod
def beforeAll(cls):
email = cls.app.email_backend
email.send_mails = email._send_mails
def test_backend(self):
backend = self.app.email_backend
self.assertIsInstance(backend, EmailBackend)
def test_send_mail(self):
backend = self.app.email_backend
sent = backend.send_mail(to='[email protected]',
subject='Hello!',
message='This is a test message')
self.assertEqual(sent, 1)
def test_send_html_mail(self):
backend = self.app.email_backend
sent = backend.send_mail(to='[email protected]',
subject='Hello!',
html_message='<p>This is a test</p>')
self.assertEqual(sent, 1)
message, _ = backend.sent.pop()
body = message[2].decode('utf-8')
self.assertEqual(message[1][0], '[email protected]')
self.assertTrue('<p>This is a test</p>' in body)
| bsd-3-clause | -7,785,915,141,844,986,000 | 33.515152 | 70 | 0.56453 | false |
0sw4l/villas-de-san-pablo | apps/habilidades_blandas/migrations/0001_initial.py | 1 | 1800 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-21 18:47
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('personas', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Capacitacion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=50)),
],
options={
'verbose_name': 'Capacitacion',
'verbose_name_plural': 'Capacitaciones',
},
),
migrations.CreateModel(
name='HabilidadBlanda',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('estado_certificado', models.CharField(choices=[('entregado', 'entregado'), ('en proceso', 'en proceso'), ('pendiente', 'pendiente')], max_length=30)),
('tipo_alerta', models.CharField(choices=[('baja', 'baja'), ('media', 'media'), ('alta', 'alta')], max_length=30, verbose_name='alertas')),
('test', models.BooleanField(default=False, verbose_name='Test de habilidades blandas')),
('observaciones', models.CharField(blank=True, max_length=100, null=True)),
('capacitacion', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='habilidades_blandas.Capacitacion')),
('persona', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='item_e', to='personas.Persona')),
],
),
]
| mit | 489,150,653,691,920,600 | 42.902439 | 168 | 0.587778 | false |