repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/prior/__init__.py | from typing import Tuple
import numpy as np
class Prior:
def __init__(
self,
# todo is may be better to pass tensor as arguments and unify whether we use tensor/np array
X_train: np.array,
y_train: np.array,
):
super(Prior, self).__init__()
assert len(X_train) == len(y_train)
assert X_train.ndim == 2
assert y_train.ndim == 2
self.dim = X_train.shape[1]
def predict(self, X: np.array) -> Tuple[np.array, np.array]:
"""
:param X: features with shape (n, dim)
:return: two arrays with shape (n,)
"""
pass | 647 | 26 | 104 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/experiments/optimizer_styles.py | from matplotlib import cm
from experiments.optimizer_names import names
def _method_dict():
cmap = cm.Set1
def style(prior: bool = False, copula: bool = False):
ms = 's' if prior else ""
ls = '--' if copula else '-'
return ls, ms
rs_copula_color = cmap(0)
rs_color = cmap(0)
gcp_color = cmap(1)
gp_color = cmap(1)
styles = {
names.GCP_prior: (style(prior=True, copula=True), gcp_color),
names.GCP_prior_mo: (style(prior=True, copula=True), gcp_color),
names.GCP: (style(prior=False, copula=True), gcp_color),
names.GP_prior: (style(prior=True, copula=False), gp_color),
names.GP_prior_mo: (style(prior=True, copula=False), gp_color),
names.GP: (style(prior=False, copula=False), gp_color),
names.CTS_prior: (style(prior=True, copula=True), rs_copula_color),
names.CTS_prior_mo: (style(prior=True, copula=True), rs_copula_color),
names.TS_prior: (style(prior=True), rs_color),
names.TS_prior_mo: (style(prior=True), rs_color),
names.RS: (style(prior=False), rs_color),
names.AUTORANGE_GP: (style(), cmap(2)),
names.WS_BEST: (style(), cmap(3)),
names.AUTORANGE_GP: (style(), cmap(4)),
names.ABLR: (style(), cmap(2)),
names.ABLR_COPULA: (style(copula=True), cmap(2)),
names.BOHB: (style(), cmap(6)),
names.REA: (style(), cmap(7)),
names.REINFORCE: (style(), cmap(8)),
names.GCP_ho_prior: (style(), "black"),
names.CTS_ho_prior: (style(), "black"),
names.EHI: (style(), cmap(2)),
names.SMS: (style(), cmap(3)),
names.SUR: (style(), cmap(4)),
names.EMI: (style(), cmap(5)),
names.SGPT: (style(), cmap(9)),
names.SGPT_COPULA: (style(copula=True), cmap(9)),
}
return styles
def optimizer_style(method: str):
styles = _method_dict()
#method = method.strip(names.MO_suffix)
assert method in styles, f"method {method} is missing a style"
return styles[method]
if __name__ == '__main__':
import matplotlib.pyplot as plt
m = list(_method_dict().items())
plt.figure(figsize=(5, 5))
for i, (method, ((ls, ms), color)) in enumerate(m):
plt.plot(range(10), [i] * 10, ls=ls, marker=ms, color=color, label=method)
plt.legend()
plt.show()
| 2,359 | 32.239437 | 82 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/experiments/evaluate_optimizer_task.py | import argparse
import logging
import os
from functools import partial
from pathlib import Path
import pandas as pd
import numpy as np
from blackbox import BlackboxOffline
from blackbox.load_utils import evaluation_split_from_task, blackbox_from_task
from optimizer.benchmark import benchmark
from optimizer.gaussian_process import GP
from optimizer.gaussian_process_functional_prior import G3P
from optimizer.random_search import RS
from optimizer.thompson_sampling_functional_prior import TS
def evaluate(
task: str,
optimizer: str,
prior: str,
num_seeds: int,
num_evaluations: int,
output_folder: str,
):
optimizers = {
"GP": partial(GP, normalization="standard"),
"GCP": partial(GP, normalization="gaussian"),
"RS": RS,
"GP+prior": partial(G3P, normalization="standard", prior=prior),
"GCP+prior": partial(G3P, normalization="gaussian", prior=prior),
"TS": partial(TS, normalization="standard", prior=prior),
"CTS": partial(TS, normalization="gaussian", prior=prior),
}
logging.info(f"Evaluating {optimizer} on {task} with {num_seeds} seeds and {num_evaluations} evaluations.")
Xys_train, (X_test, y_test) = evaluation_split_from_task(test_task=task)
candidates = X_test
blackbox = BlackboxOffline(
X=X_test,
y=y_test,
)
X = np.vstack([X for X, _ in Xys_train] + [X_test])
bounds = np.vstack([X.min(axis=0), X.max(axis=0)])
optimizer_factory = partial(
optimizers[optimizer],
bounds=bounds,
input_dim=blackbox.input_dim,
output_dim=blackbox.output_dim,
evaluations_other_tasks=Xys_train,
)
# (num_seeds, num_evaluations, dim)
X, y = benchmark(
optimizer_factory=optimizer_factory,
blackbox=blackbox,
candidates=candidates,
num_seeds=num_seeds,
num_evaluations=num_evaluations,
verbose=False,
)
# (num_seeds, num_evaluations,)
y = y.squeeze(axis=-1)
df = pd.DataFrame([
{"seed": seed, "iteration": iteration, "value": y[seed, iteration]}
for seed in range(num_seeds)
for iteration in range(num_evaluations)
])
df["blackbox"] = blackbox_from_task(task)
df["task"] = task
df["optimizer"] = optimizer
df.to_csv(Path(output_folder) / "result.csv.zip", index=False)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str, required=True)
parser.add_argument('--optimizer', type=str, required=True)
parser.add_argument('--prior', type=str, default="sklearn")
parser.add_argument('--num_seeds', type=int, default=30)
parser.add_argument('--num_evaluations', type=int, default=100)
parser.add_argument('--output_folder', type=str)
args = parser.parse_args()
if args.output_folder is not None:
output_folder = args.output_folder
else:
output_folder = os.getenv("SLURMAKER_JOBPATH")
assert output_folder is not None, \
"if you dont pass an output folder as argument, " \
"you must set it with SLURMAKER_JOBPATH environment variable"
logging.info(f"evaluating: {args}")
for key, val in args.__dict__.items():
logging.info(f"[{key}]:{val}")
evaluate(
task=args.task,
optimizer=args.optimizer,
num_seeds=args.num_seeds,
num_evaluations=args.num_evaluations,
output_folder=output_folder,
prior=args.prior,
)
| 3,590 | 29.956897 | 111 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/experiments/figure_illustration.py | import seaborn as sns
import matplotlib.pyplot as plt
from pathlib import Path
import pandas as pd
import numpy as np
from optimizer.normalization_transforms import GaussianTransform
from blackbox.offline import evaluations_df, deepar
df = evaluations_df(deepar)
df = df[df.task.isin(["traffic", "electricity", "solar"])]
df["hp_learning_rate"] = df.hp_learning_rate_log.apply(np.exp)
df["hp_context_length_ratio"] = df.hp_context_length_ratio_log.apply(np.exp)
df["hp_num_batches_per_epoch"] = df.hp_num_batches_per_epoch_log.apply(np.exp)
#fig, axes = plt.subplots(1, 3)
# plot learning rate vs CRPS
#ax = sns.lmplot(x="hp_learning_rate", y="metric_CRPS", hue="task", data=df,)
#ax = sns.scatterplot(data=df, x='hp_learning_rate', y='metric_CRPS', hue='task')
#ax.set(xscale="log")
#ax.set_xlabel("x (learning rate)")
#ax.set_ylabel("y")
height = 4
aspect = 1.2
ax = sns.lmplot(
x="hp_learning_rate", y="metric_CRPS", hue="task", ci=None,
data=df, height=height, aspect=aspect, legend_out=False,
fit_reg=False
)
ax.set(xscale="log", yscale="log")
ax.ax.set_ylim(0.02,)
ax.ax.set_xlabel("x (learning rate)")
ax.ax.set_ylabel("y")
plt.tight_layout()
plt.savefig("y_plot.jpg")
plt.show()
# plot learning rate vs CRPS mapped through psi = Phi^{-1} o F
for task in df.task.unique():
y = df.loc[df.loc[:, "task"] == task, "metric_CRPS"].values.reshape(-1, 1)
z = GaussianTransform(y).transform(y)
df.loc[df.loc[:, "task"] == task, "z"] = z.reshape(-1)
#ax = sns.scatterplot(data=df, x='hp_learning_rate', y='z', hue='task')
#ax.set_ylabel("z = Psi(y)")
ax = sns.lmplot(
x="hp_learning_rate",
y="z",
hue="task",
legend=False,
data=df,
ci=None,
height=height,
aspect=aspect
)
ax.set(xscale="log")
ax.ax.set_xlabel("x (learning rate)")
ax.ax.set_ylabel("z")
plt.tight_layout()
plt.savefig("z_plot.jpg")
plt.show()
ax = sns.lmplot(
x="hp_learning_rate",
y="z",
hue="task",
legend=False,
data=df,
ci=None,
height=height,
aspect=aspect,
fit_reg=False,
)
ax.set(xscale="log")
ax.ax.set_xlabel("x (learning rate)")
ax.ax.set_ylabel("z")
plt.tight_layout()
plt.savefig("z_scatter.jpg")
plt.show()
| 2,194 | 22.858696 | 81 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/experiments/load_results.py | from typing import Optional
import pandas as pd
from pathlib import Path
from blackbox.offline import evaluations_df
from blackbox.load_utils import error_metric
path = Path(__file__).parent
def postprocess_results(df):
# keeps only 70 iteration for NAS and 100 for other blackboxes as described in the paper
# in case where optimizer fails, we put their evaluation value to the maximum of the task (note that when computing
# the rolling best, this is equivalent of forward filling with the best value observed)
task_max = df.groupby('task').max()['value']
missing_mask = df.loc[:, "value"].isna()
if sum(missing_mask) > 0:
df.loc[missing_mask, 'value'] = df.loc[missing_mask, 'task'].apply(lambda task: task_max[task])
# only keep 100 iteration
df = df[(df.iteration < 100) & (df.seed < 30)]
# for NAS, not more than 70 iteration as explained in the paper
df = df[(df.blackbox != "nas_bench102") | (df.iteration < 70)]
return df
def min_max_tasks():
"""
:return: two series mapping task name to min and max respectively.
"""
res = []
for bb, metric in error_metric.items():
offline_evals = evaluations_df(bb)
res.append(offline_evals.groupby('task').agg(['min', 'max'])[metric])
y_min = pd.concat([x['min'] for x in res])
y_max = pd.concat([x['max'] for x in res])
return y_min, y_max
def add_adtm(df):
"""
:param df:
:return: dataframe with a column ADTM added measuring (best - min_task) / (max_task - min_task)
"""
df.loc[:, 'best'] = df.groupby(['task', 'optimizer', 'seed']).cummin().loc[:, 'value']
y_min, y_max = min_max_tasks()
df = df.join(other=y_min, on='task', lsuffix='dataset_')
df = df.join(other=y_max, on='task', lsuffix='dataset_')
df.loc[:, "ADTM"] = (df.loc[:, "best"] - df.loc[:, "min"]) / (df.loc[:, "max"] - df.loc[:, "min"])
return df
def load_results(file):
df = pd.read_csv(file)
df = postprocess_results(df)
return df
def load_results_paper(do_add_adtm: bool = True):
df = load_results(path / "results_paper.csv.zip")
if do_add_adtm:
df = add_adtm(df)
return df
def load_results_reimplem(filename: str = "results_reimplem.csv.zip"):
return load_results(path / filename)
| 2,293 | 31.309859 | 119 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/experiments/figure2.py | from pathlib import Path
from typing import List
import pandas as pd
import matplotlib.pyplot as plt
import os
import numpy as np
from experiments.load_results import load_results_paper
from experiments.optimizer_names import names
from experiments.optimizer_styles import optimizer_style
from experiments.table2 import adtm_scores
path = Path(__file__).parent
def plot_per_task(scores_per_task: pd.DataFrame, optimizers_to_plot: List[str]):
import seaborn as sns
from matplotlib.patches import Patch
sns.set()
sns.set_style("white")
# load RMSEs from csv
rmses = pd.read_csv(
Path(__file__).parent / 'rmse.csv',
header=None, names=['task', 'rmse']
).set_index('task')['rmse']
# show task in x, ADTM improvement over RS on the y-axis
cols = {'rmse': rmses}
for method in optimizers_to_plot:
cols[method] = scores_per_task[method].reset_index()[['task', method]].set_index('task')[method]
dd = pd.DataFrame(cols).sort_values(by='rmse').reset_index().rename(columns={'index': 'task'})
dd['task_and_rmse'] = dd.apply(lambda x: f"{x.task} (%.2f)" % x.rmse, axis=1)
styles, colors = zip(*[optimizer_style(method) for method in optimizers_to_plot])
hatches = tuple(['///' if 'Copula' in m else None for m in optimizers_to_plot])
fig, axes = plt.subplots(3, 9, figsize=(20, 5), sharex=True, sharey='row')
axes = np.ravel(axes)
for i, row in dd.iterrows():
y = [row[m] for m in optimizers_to_plot]
bars = axes[i].bar(x=range(len(colors)), height=y, color=colors, label=optimizers_to_plot)
for bar, h in zip(bars, hatches):
bar.set_hatch(h)
axes[i].set_xlabel(row['task_and_rmse'], fontsize=14)
axes[i].set_ylim([-1, 1])
# plot legend on the last subplots
custom_lines = []
for c, h in zip(colors, hatches):
p = Patch(facecolor=c, hatch=h)
custom_lines.append(p)
axes[-1].spines['right'].set_visible(False)
axes[-1].spines['top'].set_visible(False)
axes[-1].spines['left'].set_visible(False)
axes[-1].spines['bottom'].set_visible(False)
axes[-1].legend(custom_lines, optimizers_to_plot, fontsize=10, loc='center')
plt.subplots_adjust(wspace=0.0)
plt.xticks([], [])
plt.tight_layout(h_pad=0, w_pad=0)
filename = Path(__file__).parent / f'hpo/figures/ADTM_per_task.pdf'
os.makedirs(filename.parent, exist_ok=True)
print(filename)
plt.savefig(str(filename))
plt.show()
if __name__ == '__main__':
df_paper = load_results_paper()
optimizers_to_plot = [
names.GCP_prior,
names.CTS_prior,
names.WS_BEST,
names.AUTORANGE_GP,
names.ABLR,
names.ABLR_COPULA,
names.SGPT,
names.SGPT_COPULA,
]
scores_per_blackbox, scores_per_task = adtm_scores(df_paper, optimizers_to_plot)
plot_per_task(scores_per_task=scores_per_task, optimizers_to_plot=optimizers_to_plot)
| 2,967 | 31.615385 | 104 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/experiments/optimizer_names.py |
class names:
# put names into a class to add structure and avoid having lots of imports
RS = "RS"
# ablation
GP = "GP"
GCP_ho_prior = "GCP + homosk. prior"
GCP = "GCP"
GCP_prior = "GCP + prior (ours)"
GP_prior = "GP + prior"
CTS_ho_prior = "CTS + homosk. prior"
CTS_prior = "CTS (ours)"
TS_prior = "TS"
GP_prior = "GP + prior"
# multi-objectives
MO_suffix = " + MO"
GP_prior_mo = GP_prior + MO_suffix
GP_mo = GP + MO_suffix
GCP_prior_mo = "GCP + prior" + MO_suffix + " (ours)"
GCP_mo = GCP + MO_suffix
CTS_prior_mo = "CTS + prior" + MO_suffix + " (ours)"
TS_prior_mo = TS_prior + MO_suffix
# baselines
WS_BEST = 'WS GP'
AUTORANGE_GP = "AutoGP"
AUTORANGE_RS = "AutoRS"
BOHB = 'BOHB'
REA = 'R-EA'
REINFORCE = 'REINFORCE'
ABLR = "ABLR"
ABLR_COPULA = 'ABLR Copula'
SGPT = "SGPT"
SGPT_COPULA = "SGPT Copula"
EHI = "EHI"
SMS = "SMS"
SUR = "SUR"
EMI = "EMI"
def method_name(dataset_name):
for prefix in ["fcnet", "xgboost"]:
if prefix in dataset_name:
return prefix
if 'nas102' in dataset_name:
return 'NAS'
return "DeepAR"
def rename_results(df):
rename_dict = {
'ablr_norm_fixed_set_tr': names.ABLR,
'ablr_copula': names.ABLR_COPULA,
'copula_gp_1_5_random_fix_sigma_5_tr': names.GCP_ho_prior,
'copula_gp_1_5_random_pred_sigma_5_tr': names.GCP_prior,
'copula_gp_1_5_random_pred_sigma_std_5_tr': names.GP_prior,
'copula_rs_1_fix_sigma_tr': names.CTS_ho_prior,
'copula_rs_1_pred_sigma_std_tr': names.TS_prior,
'copula_rs_1_pred_sigma_tr': names.CTS_prior,
'gp_fixed_set_tr': names.GP,
'random_fixed_set_tr': names.RS,
'warm-start-gp-top1-1init': names.WS_BEST,
'auto-range-gp': names.AUTORANGE_GP,
'copula_gp_no_proir': names.GCP,
'sgpt_0.01': names.SGPT,
#'sgpt_0.10': names.SGPT_010,
#'sgpt_1.00': names.SGPT_100,
'sgpt_0.01_copula': names.SGPT_COPULA
}
df.method = df.method.apply(lambda name: rename_dict[name] if name in rename_dict else "")
df = df.loc[df.method != "", :]
df.dataset = df.dataset.apply(
lambda name: name.replace("xgboost_", "")
.replace("_max_resource", "")
.replace("fcnet_", "")
.replace("nas102_", "")
.replace("_lookup", "")
)
df = df[df.dataset != 'skin_nonskin']
return df | 2,513 | 27.247191 | 94 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/experiments/table2.py | from typing import List, Optional
import pandas as pd
import numpy as np
from pathlib import Path
from blackbox.offline import deepar, fcnet, xgboost, nas102
from experiments.load_results import load_results_paper
from experiments.optimizer_names import names
path = Path(__file__).parent
def adtm_scores(df, optimizers_to_plot = None, baseline: Optional[str] = "RS"):
# return adtm table per blackbox and per dataset
scores_df = df.groupby(["blackbox", "task", "optimizer", "iteration"])[
"ADTM"
].mean().reset_index().pivot_table(
values='ADTM',
columns=['optimizer'],
index=['blackbox', 'task', 'iteration'],
)
rel_scores = (scores_df[[baseline]].values - scores_df.values) / scores_df[[baseline]].values
rel_scores_df = pd.DataFrame(rel_scores, index=scores_df.index, columns=scores_df.columns).reset_index(
level=2).drop(
columns='iteration')
scores_per_task = rel_scores_df.groupby(['blackbox', 'task']).mean()
avg_scores_per_blackbox = rel_scores_df.groupby(['blackbox']).mean()
if optimizers_to_plot is not None:
avg_scores_per_blackbox = avg_scores_per_blackbox[optimizers_to_plot]
scores_per_task = scores_per_task[optimizers_to_plot]
scores_per_blackbox = avg_scores_per_blackbox.T[["DeepAR", "FCNET", "XGBoost", "nas_bench102"]]
return scores_per_blackbox, scores_per_task
def rank(scores_per_task: pd.DataFrame, blackboxes: List[str]):
ranks = {}
for b in blackboxes:
ranks[b] = scores_per_task.transpose()[b].rank(ascending=False).mean(axis=1)
return pd.DataFrame(ranks)
if __name__ == '__main__':
df_paper = load_results_paper()
print(df_paper.head())
baseline = names.RS
renamed_baseline = f"{names.RS} (baseline)"
df_paper.optimizer = df_paper.optimizer.apply(lambda name: renamed_baseline if name == baseline else name)
optimizers_to_plot = [
renamed_baseline,
names.TS_prior,
names.CTS_prior,
names.GP_prior,
names.GCP,
names.GCP_prior,
names.GP,
names.AUTORANGE_GP,
names.WS_BEST,
names.ABLR,
names.ABLR_COPULA,
names.SGPT,
names.SGPT_COPULA,
names.BOHB,
names.REA,
names.REINFORCE,
]
scores_per_blackbox, scores_per_task = adtm_scores(
df_paper,
optimizers_to_plot,
baseline=renamed_baseline,
)
print(scores_per_blackbox.to_string())
print(scores_per_blackbox.to_latex(float_format='%.2f', na_rep='-'))
rank_df = rank(scores_per_task=scores_per_task, blackboxes=[deepar, fcnet, xgboost, nas102])
print(rank_df.to_string())
print(rank_df.to_latex(float_format='%.1f', na_rep='-'))
# generates "dtm (rank)" numbers dataframe so that it can be exported easily in latex
dtm_and_rank_values = []
for x, y in zip(scores_per_blackbox.values.reshape(-1), rank_df.values.reshape(-1)):
dtm_and_rank_values.append("{:.2f}".format(x) + " (" + "{:.1f}".format(y) + ")")
dtm_and_rank = pd.DataFrame(
np.array(dtm_and_rank_values).reshape(rank_df.shape),
index=rank_df.index,
columns=rank_df.columns
)
print(dtm_and_rank.to_latex())
| 3,258 | 30.038095 | 110 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/experiments/__init__.py | 0 | 0 | 0 | py |
|
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/experiments/table2-new-implem.py | import os
import pandas as pd
from pathlib import Path
from experiments.load_results import load_results_paper, load_results_reimplem, add_adtm
from experiments.optimizer_names import names
from experiments.table2 import adtm_scores, rank
path = Path(__file__).parent
if __name__ == '__main__':
df_paper = load_results_paper(do_add_adtm=False)
df_reimplem = load_results_reimplem()
df = pd.concat([df_paper, df_reimplem], sort=False)
print(df.optimizer.unique())
optimizers_to_plot = [
"RS",
names.CTS_prior,
"CTS (sklearn)",
"CTS (pytorch)",
names.GCP_prior,
"GCP+prior (sklearn)",
"GCP+prior (pytorch)",
]
df = add_adtm(df)
scores_per_blackbox, scores_per_task = adtm_scores(df, optimizers_to_plot)
print(scores_per_blackbox.to_string())
print(scores_per_blackbox.to_latex(float_format='%.2f', na_rep='-')) | 912 | 26.666667 | 88 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/experiments/figure1.py | from pathlib import Path
import matplotlib.pyplot as plt
from blackbox.offline import deepar, fcnet, xgboost, nas102
from experiments.load_results import load_results_paper
from experiments.optimizer_names import names
from experiments.optimizer_styles import optimizer_style
path = Path(__file__).parent
def plot_optimizers(df, ax, blackbox, optimizers, legend: bool = False):
df_plot = df.loc[df.optimizer.isin(optimizers), :]
pivot_df = df_plot.loc[df_plot.blackbox == blackbox, :].groupby(
['blackbox', 'optimizer', 'iteration']
)['ADTM'].mean().reset_index().pivot_table(
index='iteration', columns='optimizer', values='ADTM'
).dropna()
# reorder optimizers to original list order
optimizers = [m for m in optimizers if m in pivot_df]
style, color = zip(*[optimizer_style(optimizer) for optimizer in optimizers])
pivot_df[optimizers].plot(
ax=ax,
title=blackbox,
color=list(color),
style=[a + b for a, b in style],
# marker=list(marker),
markevery=20,
alpha=0.8,
lw=2.5,
)
ax.grid()
if blackbox == 'DeepAR':
ax.set_ylim([None, 1e-2])
if blackbox == 'fcnet':
ax.set_ylim([None, 0.3])
if blackbox == 'xgboost':
ax.set_ylim([1e-2, 0.3])
if blackbox == 'NAS':
ax.set_xlim([None, 65])
# ax.set_ylim([0.001, None])
ax.set_yscale('log')
ax.set_ylabel('ADTM')
if not legend:
ax.get_legend().remove()
else:
ax.legend(loc="upper right")
if __name__ == '__main__':
df = load_results_paper()
blackboxes = [deepar, fcnet, xgboost, nas102]
optimizers_to_plot = [
[
names.RS,
names.GP,
names.AUTORANGE_GP,
names.WS_BEST,
names.ABLR,
names.CTS_prior,
names.GCP_prior,
# 'BOHB', 'R-EA', 'REINFORCE',
],
[
names.GP,
names.GP_prior,
names.GCP,
names.GCP_prior,
names.TS_prior,
names.CTS_prior,
]
]
fig, axes = plt.subplots(4, 2, figsize=(10, 12), sharex='row', sharey='row')
for i, blackbox in enumerate(blackboxes):
for j, optimizers in enumerate(optimizers_to_plot):
plot_optimizers(df, blackbox=blackbox, ax=axes[i, j], optimizers=optimizers, legend=(i == 0))
plt.savefig("adtm.pdf")
plt.show()
| 2,457 | 27.581395 | 105 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/optimizer/benchmark.py | import gc
import logging
import sys
import traceback
from typing import Tuple, Callable
import numpy as np
from tqdm import tqdm
from blackbox import Blackbox
from misc import set_seed
from optimizer import Optimizer
def benchmark(
num_evaluations: int,
optimizer_factory: Callable[[], Optimizer],
blackbox: Blackbox,
candidates: np.array,
num_seeds: int,
verbose: bool = False,
) -> Tuple[np.array]:
"""
For each seed, the optimizer is run 'num_evaluations'.
:param num_evaluations:
:param optimizer_factory:
:param blackbox:
:param candidates:
:param num_seeds:
:param verbose:
:return: two tensors of shape (num_seeds, num_evaluations, X) where X = [input_dim, output_dim]
"""
seeds = range(num_seeds)
#if verbose:
# seeds = tqdm(seeds)
seeds = tqdm(seeds)
Xs = np.empty((num_seeds, num_evaluations, blackbox.input_dim))
Xs[:] = np.nan
ys = np.empty((num_seeds, num_evaluations, blackbox.output_dim))
ys[:] = np.nan
for seed in seeds:
try:
set_seed(seed)
optimizer = optimizer_factory()
for i in range(num_evaluations):
x = optimizer.sample(candidates)
y = blackbox(x)
if verbose:
logging.info(f"criterion {y} for arguments {x}")
optimizer.observe(x=x, y=y)
Xs[seed, i] = x
ys[seed, i] = y
# memory leaks without gc, not sure why, perhaps a reference cycle
gc.collect()
del optimizer
except Exception:
print("seed evaluation failed")
traceback.print_exc(file=sys.stdout)
pass
return Xs, ys | 1,777 | 27.677419 | 99 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/optimizer/gaussian_process_functional_prior.py | from typing import Optional, Tuple, Callable, Union, List
import logging
import numpy as np
import torch
from gpytorch import ExactMarginalLogLikelihood
from gpytorch.constraints import GreaterThan
from gpytorch.likelihoods import GaussianLikelihood
from torch import Tensor
from torch.distributions import Normal
from botorch import fit_gpytorch_model
from botorch.acquisition import ExpectedImprovement, ScalarizedObjective
from botorch.models import SingleTaskGP
from botorch.models.model import Model
from botorch.optim import optimize_acqf
from botorch.utils.transforms import t_batch_mode_transform
from blackbox import Blackbox
from constants import num_gradient_updates
from misc.artificial_data import artificial_task1
from optimizer.gaussian_process import GP
from optimizer.thompson_sampling_functional_prior import TS
def residual_transform(y, mu_pred, sigma_pred):
return (y - mu_pred) / sigma_pred
def residual_transform_inv(z, mu_pred, sigma_pred):
return z * sigma_pred + mu_pred
def scale_posterior(mu_posterior, sigma_posterior, mu_est, sigma_est):
mean = mu_posterior * sigma_est + mu_est
sigma = (sigma_posterior * sigma_est)
return mean, sigma
class ShiftedExpectedImprovement(ExpectedImprovement):
"""
Applies ExpectedImprovement taking care to shift residual posterior with the predicted
prior mean and variance
:param model:
:param best_f: best value observed (not residual but actual value)
:param mean_std_predictor:
:param objective:
:param maximize:
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
mean_std_predictor: Callable[[np.array], Tuple[np.array, np.array]],
objective: Optional[ScalarizedObjective] = None,
maximize: bool = True,
) -> None:
super(ShiftedExpectedImprovement, self).__init__(model=model, best_f=best_f, objective=objective,
maximize=maximize)
self.mean_std_predictor = mean_std_predictor
@t_batch_mode_transform(expected_q=1)
def forward(self, X: Tensor) -> Tensor:
"""
:param X: A (..., 1, input_dim) batched tensor of input_dim design points.
Expected Improvement is computed for each point individually,
i.e., what is considered are the marginal posteriors, not the
joint.
:return: A (...) tensor of Expected Improvement values at the
given design points `X`.
"""
with torch.no_grad():
# both (..., 1,)
# (..., input_dim)
X_features = X.detach().numpy().squeeze(1)
mu_est, sigma_est = self.mean_std_predictor(X_features)
# both (..., 1, 1)
mu_est = torch.Tensor(mu_est).unsqueeze(1)
sigma_est = torch.Tensor(sigma_est).unsqueeze(1)
posterior = self._get_posterior(X=X)
mean, sigma = scale_posterior(
mu_posterior=posterior.mean,
sigma_posterior=posterior.variance.clamp_min(1e-6).sqrt(),
mu_est=mu_est,
sigma_est=sigma_est,
)
u = (mean - self.best_f.expand_as(mean)) / sigma
if not self.maximize:
u = -u
normal = Normal(torch.zeros_like(u), torch.ones_like(u))
ucdf = normal.cdf(u)
updf = torch.exp(normal.log_prob(u))
ei = sigma * (updf + u * ucdf)
return ei.squeeze(dim=-1).squeeze(dim=-1)
class ShiftedThompsonSampling(ExpectedImprovement):
"""
Applies Thompson sampling taking care to shift residual posterior with the predicted
prior mean and variance
:param model:
:param best_f:
:param mean_std_predictor:
:param objective:
:param maximize:
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
mean_std_predictor: Callable[[np.array], Tuple[np.array, np.array]],
objective: Optional[ScalarizedObjective] = None,
maximize: bool = True,
) -> None:
super(ShiftedThompsonSampling, self).__init__(model=model, best_f=best_f, objective=objective,
maximize=maximize)
self.mean_std_predictor = mean_std_predictor
@t_batch_mode_transform(expected_q=1)
def forward(self, X: Tensor) -> Tensor:
"""
:param X: A `... x 1 x d`-dim batched tensor of `d`-dim design points.
Expected Improvement is computed for each point individually,
i.e., what is considered are the marginal posteriors, not the
joint.
:return: A `...` tensor of Expected Improvement values at the
given design points `X`.
"""
with torch.no_grad():
# both (..., 1,)
mu_est, sigma_est = self.mean_std_predictor(X)
posterior = self._get_posterior(X=X)
mean, sigma = scale_posterior(
mu_posterior=posterior.mean,
sigma_posterior=posterior.variance.clamp_min(1e-9).sqrt(),
mu_est=mu_est,
sigma_est=sigma_est,
)
normal = Normal(torch.zeros_like(mean), torch.ones_like(mean))
u = normal.sample() * sigma + mean
if not self.maximize:
u = -u
return u.squeeze(dim=-1).squeeze(dim=-1)
class G3P(GP):
def __init__(
self,
input_dim: int,
output_dim: int,
bounds: Optional[np.array] = None,
evaluations_other_tasks: Optional[List[Tuple[np.array, np.array]]] = None,
num_gradient_updates: int = num_gradient_updates,
normalization: str = "standard",
prior: str = "pytorch",
):
super(G3P, self).__init__(
input_dim=input_dim,
output_dim=output_dim,
bounds=bounds,
normalization=normalization,
)
self.initial_sampler = TS(
input_dim=input_dim,
output_dim=output_dim,
evaluations_other_tasks=evaluations_other_tasks,
num_gradient_updates=num_gradient_updates,
normalization=normalization,
prior=prior,
)
def _sample(self, candidates: Optional[np.array] = None) -> np.array:
if len(self.X_observed) < self.num_initial_random_draws:
return self.initial_sampler.sample(candidates=candidates)
else:
z_observed = torch.Tensor(self.transform_outputs(self.y_observed.numpy()))
with torch.no_grad():
# both (n, 1)
#mu_pred, sigma_pred = self.thompson_sampling.prior(self.X_observed)
mu_pred, sigma_pred = self.initial_sampler.prior.predict(self.X_observed)
mu_pred = torch.Tensor(mu_pred)
sigma_pred = torch.Tensor(sigma_pred)
# (n, 1)
r_observed = residual_transform(z_observed, mu_pred, sigma_pred)
# build and fit GP on residuals
gp = SingleTaskGP(
train_X=self.X_observed,
train_Y=r_observed,
likelihood=GaussianLikelihood(noise_constraint=GreaterThan(1e-3)),
)
mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
fit_gpytorch_model(mll)
acq = ShiftedExpectedImprovement(
model=gp,
best_f=z_observed.min(dim=0).values,
mean_std_predictor=self.initial_sampler.prior.predict,
maximize=False,
)
if candidates is None:
candidate, acq_value = optimize_acqf(
acq,
bounds=self.bounds_tensor,
q=1,
num_restarts=5,
raw_samples=100,
)
# import matplotlib.pyplot as plt
# x = torch.linspace(-1, 1).unsqueeze(dim=-1)
# x = torch.cat((x, x * 0), dim=1)
# plt.plot(x[:, 0].flatten().tolist(), acq(x.unsqueeze(dim=1)).tolist())
# plt.show()
return candidate[0]
else:
# (N,)
ei = acq(torch.Tensor(candidates).unsqueeze(dim=-2))
return torch.Tensor(candidates[ei.argmax()])
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
num_evaluations = 10
Xy_train, X_test, y_test = artificial_task1()
blackbox = Blackbox(
input_dim=2,
output_dim=1,
eval_fun=lambda x: x.sum(axis=-1, keepdims=True),
)
optimizer = G3P(
input_dim=blackbox.input_dim,
output_dim=blackbox.output_dim,
evaluations_other_tasks=Xy_train,
num_gradient_updates=2,
)
candidates = X_test
for i in range(num_evaluations):
x = optimizer.sample(candidates)
#x = optimizer.sample()
y = blackbox(x)
logging.info(f"criterion {y} for arguments {x}")
optimizer.observe(x=x, y=y)
| 9,128 | 33.711027 | 105 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/optimizer/thompson_sampling_functional_prior.py | import logging
from typing import Optional, List, Tuple
import numpy as np
from constants import num_gradient_updates
from optimizer import Optimizer
from optimizer.normalization_transforms import from_string
from optimizer.random_search import RS
from prior.mlp_pytorch import ParametricPrior
from prior.mlp_sklearn import ParametricPriorSklearn
class TS(Optimizer):
def __init__(
self,
input_dim: int,
output_dim: int,
bounds: Optional[np.array] = None,
evaluations_other_tasks: Optional[List[Tuple[np.array, np.array]]] = None,
num_gradient_updates: int = num_gradient_updates,
normalization: str = "standard",
prior: str = "pytorch",
):
super(TS, self).__init__(
input_dim=input_dim,
output_dim=output_dim,
evaluations_other_tasks=evaluations_other_tasks,
bounds=bounds,
)
# todo add option for data transform
assert evaluations_other_tasks is not None
X_train = np.concatenate([X for X, y in evaluations_other_tasks], axis=0)
normalizer = from_string(normalization)
z_train = np.concatenate([normalizer(y).transform(y) for X, y in evaluations_other_tasks], axis=0)
prior_dict = {
"sklearn": ParametricPriorSklearn,
"pytorch": ParametricPrior,
}
logging.info(f"fit prior {prior}")
self.prior = prior_dict[prior](
X_train=X_train,
y_train=z_train,
num_gradient_updates=num_gradient_updates,
)
logging.info("prior fitted")
def _sample(self, candidates: Optional[np.array] = None) -> np.array:
if candidates is None:
num_random_candidates = 10000
# since Thompson Sampling selects from discrete set of options,
# when no candidates are given we draw random candidates
candidates = self.draw_random_candidates(num_random_candidates)
mu_pred, sigma_pred = self.prior.predict(candidates)
samples = np.random.normal(loc=mu_pred, scale=sigma_pred)
return candidates[np.argmin(samples)]
def draw_random_candidates(self, num_random_candidates: int):
random_sampler = RS(
input_dim=self.input_dim,
output_dim=self.output_dim,
bounds=self.bounds,
)
candidates = np.stack([random_sampler.sample() for _ in range(num_random_candidates)])
return candidates
| 2,528 | 35.128571 | 106 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/optimizer/gaussian_process.py | import logging
from typing import Optional
import numpy as np
import torch
from botorch import fit_gpytorch_model
from botorch.acquisition import ExpectedImprovement
from botorch.models import SingleTaskGP
from botorch.optim import optimize_acqf
from botorch.utils.transforms import normalize
from gpytorch import ExactMarginalLogLikelihood
from gpytorch.constraints import GreaterThan
from gpytorch.likelihoods import GaussianLikelihood
from blackbox import Blackbox, BlackboxOffline
from constants import num_initial_random_draws
from misc import set_seed
from misc.artificial_data import artificial_task1
from optimizer import Optimizer
from optimizer.normalization_transforms import from_string
from optimizer.random_search import RS
class GP(Optimizer):
def __init__(
self,
input_dim: int,
output_dim: int,
bounds: Optional[np.array] = None,
normalization: str = "standard",
evaluations_other_tasks=None,
):
super(GP, self).__init__(
input_dim=input_dim,
output_dim=output_dim,
evaluations_other_tasks=evaluations_other_tasks,
bounds=bounds,
)
# maintains observations
# (num_observations, input_dim)
self.X_observed = torch.empty(size=(0, input_dim))
# (num_observations, output_dim)
self.y_observed = torch.empty(size=(0, output_dim))
self.num_initial_random_draws = num_initial_random_draws
self.normalizer = from_string(normalization)
self.initial_sampler = RS(
input_dim=input_dim,
output_dim=output_dim,
bounds=bounds,
)
self.bounds_tensor = torch.Tensor(self.bounds)
def expected_improvement(self, model, best_f):
return ExpectedImprovement(
model=model,
best_f=best_f,
maximize=False,
)
def transform_outputs(self, y: np.array):
psi = self.normalizer(y)
z = psi.transform(y)
return z
def _sample(self, candidates: Optional[np.array] = None) -> np.array:
if len(self.X_observed) < self.num_initial_random_draws:
return self.initial_sampler.sample(candidates=candidates)
else:
z_observed = torch.Tensor(self.transform_outputs(self.y_observed.numpy()))
# build and fit GP
gp = SingleTaskGP(
train_X=self.X_observed,
train_Y=z_observed,
# special likelihood for numerical Cholesky errors, following advice from
# https://www.gitmemory.com/issue/pytorch/botorch/179/506276521
likelihood=GaussianLikelihood(noise_constraint=GreaterThan(1e-3)),
)
mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
fit_gpytorch_model(mll)
acq = self.expected_improvement(
model=gp,
best_f=z_observed.min(dim=0).values,
)
if candidates is None:
candidate, acq_value = optimize_acqf(
acq, bounds=self.bounds_tensor, q=1, num_restarts=5, raw_samples=100,
)
return candidate[0]
else:
# (N,)
ei = acq(torch.Tensor(candidates).unsqueeze(dim=-2))
return torch.Tensor(candidates[ei.argmax()])
def _observe(self, x: np.array, y: np.array):
# remark, we could fit the GP there so that sampling several times avoid the cost of refitting the GP
self.X_observed = torch.cat((self.X_observed, torch.Tensor(x).unsqueeze(dim=0)), dim=0)
self.y_observed = torch.cat((self.y_observed, torch.Tensor(y).unsqueeze(dim=0)), dim=0)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
num_evaluations = 10
Xy_train, X_test, y_test = artificial_task1(seed=0)
print(y_test[0])
set_seed(0)
blackbox = BlackboxOffline(
X=X_test,
y=y_test,
)
optimizer = GP(
input_dim=blackbox.input_dim,
output_dim=blackbox.output_dim,
)
candidates = X_test
for i in range(num_evaluations):
#x = optimizer.sample(candidates)
x = optimizer.sample()
y = blackbox(x)
logging.info(f"criterion {y} for arguments {x}")
optimizer.observe(x=x, y=y)
| 4,389 | 32.51145 | 109 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/optimizer/__init__.py | from typing import Optional, Tuple, List
import numpy as np
class Optimizer:
def __init__(
self,
input_dim: int,
output_dim: int,
bounds: Optional[np.array] = None,
evaluations_other_tasks: Optional[List[Tuple[np.array, np.array]]] = None,
):
"""
:param input_dim: input dimensions of blackbox arguments
:param output_dim: output dimensions of blackbox output
:param bounds: bounds on the space to sample with shape (2, input_dim), if not specified all coordinates are constrained to [-1, 1]
:param evaluations_other_tasks: List of tuple X, y with shape (num_evaluations, input_dim) and
(num_evaluations, output_dim). We pass as a separate list as the optimizer may need to group evaluations, for
instance for normalizing the data.
:param candidates:
"""
self.input_dim = input_dim
self.output_dim = output_dim
if bounds is None:
self.bounds = np.stack([
np.ones(input_dim) * -1,
np.ones(input_dim)
])
else:
self.bounds = bounds
assert self.bounds.shape == (2, input_dim)
if evaluations_other_tasks is not None:
self.num_tasks = len(evaluations_other_tasks)
for X, y in evaluations_other_tasks:
assert len(X) == len(y)
assert X.shape[1] == input_dim
assert y.shape[1] == output_dim
def sample(self, candidates: Optional[np.array] = None) -> np.array:
"""
:param candidates: optionally a list of candidates when performing constrained search
todo ensure that sampling happens inside this range
:return: sample point with shape (input_dim,)
"""
if candidates is not None:
assert candidates.shape[1] == self.input_dim
x = self._sample(candidates)
assert x.shape == (self.input_dim,)
return x
def _sample(self, candidates: Optional[np.array] = None) -> np.array:
return "override me"
def observe(self, x: np.array, y: np.array):
"""
Update the state after seeing an observation
:param x: shape (input_dim,)
:param y: shape (output_dim,)
"""
assert x.shape == (self.input_dim,)
assert y.shape == (self.output_dim,)
self._observe(x, y)
def _observe(self, x: np.array, y: np.array):
pass
| 2,500 | 35.246377 | 139 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/optimizer/normalization_transforms.py | import numpy as np
from scipy import stats
class GaussianTransform:
"""
Transform data into Gaussian by applying psi = Phi^{-1} o F where F is the truncated ECDF.
:param y: shape (n, dim)
"""
def __init__(self, y: np.array):
assert y.ndim == 2
self.dim = y.shape[1]
self.sorted = y.copy()
self.sorted.sort(axis=0)
@staticmethod
def z_transform(series, values_sorted=None):
# applies truncated ECDF then inverse Gaussian CDF.
if values_sorted is None:
values_sorted = sorted(series)
def winsorized_delta(n):
return 1.0 / (4.0 * n ** 0.25 * np.sqrt(np.pi * np.log(n)))
delta = winsorized_delta(len(series))
def quantile(values_sorted, values_to_insert, delta):
res = np.searchsorted(values_sorted, values_to_insert) / len(values_sorted)
return np.clip(res, a_min=delta, a_max=1 - delta)
quantiles = quantile(
values_sorted,
series,
delta
)
quantiles = np.clip(quantiles, a_min=delta, a_max=1 - delta)
return stats.norm.ppf(quantiles)
def transform(self, y: np.array):
"""
:param y: shape (n, dim)
:return: shape (n, dim), distributed along a normal
"""
assert y.shape[1] == self.dim
# compute truncated quantile, apply gaussian inv cdf
return np.stack([
self.z_transform(y[:, i], self.sorted[:, i])
for i in range(self.dim)
]).T
class StandardTransform:
def __init__(self, y: np.array):
assert y.ndim == 2
self.dim = y.shape[1]
self.mean = y.mean(axis=0, keepdims=True)
self.std = y.std(axis=0, keepdims=True)
def transform(self, y: np.array):
z = (y - self.mean) / np.clip(self.std, a_min=0.001, a_max=None)
return z
def from_string(name: str):
assert name in ["standard", "gaussian"]
mapping = {
"standard": StandardTransform,
"gaussian": GaussianTransform,
}
return mapping[name] | 2,093 | 27.297297 | 94 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/optimizer/random_search.py | from typing import Optional, List, Tuple
import numpy as np
from optimizer import Optimizer
class RS(Optimizer):
def __init__(
self,
input_dim: int,
output_dim: int,
bounds: Optional[np.array] = None,
evaluations_other_tasks: Optional[List[Tuple[np.array, np.array]]] = None,
):
super(RS, self).__init__(
input_dim=input_dim,
output_dim=output_dim,
bounds=bounds,
evaluations_other_tasks=evaluations_other_tasks,
)
def _sample(self, candidates: Optional[np.array] = None) -> np.array:
# if candidates are given, then pick a random one, else draw uniformly from domain
if candidates is not None:
return candidates[np.random.randint(low=0, high=len(candidates))]
else:
a, b = self.bounds
random_draw = (b - a) * np.random.random(self.input_dim, ) + a
return random_draw | 979 | 32.793103 | 90 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/blackbox/offline.py | from pathlib import Path
import pandas as pd
import numpy as np
deepar = 'DeepAR'
fcnet = 'FCNET'
xgboost = 'XGBoost'
nas102 = 'nas_bench102'
metric_error = 'metric_error'
metric_time = 'metric_time'
def evaluations_df(blackbox: str) -> pd.DataFrame:
"""
:returns a dataframe where each row corresponds to one hyperparameter evaluated for one task.
The hyperparamers columns are all prefixed by 'hp_', the metric columns (error, time, etc) are
prefixed by 'metric_' and dataset information are prefixed by 'dataset_' (only available for
DeepAR). Two columns 'task' and 'blackbox' contains the name of the task and of the blackbox.
## DeepAR
Hyperparameters:
* num_layers
* num_cells
* context_length_ratio, context_length_ratio = context_length / prediction_length
* dropout_rate
* learning_rate
* num_batches_per_epoch
Constants:
* epochs = 100
* early_stopping_patience = 5
Dataset specific:
* time_freq
* prediction_length
Metrics:
* CRPS
* train_loss
* throughput
* RMSE
## FCNET
"""
assert blackbox in [deepar, fcnet, xgboost, nas102]
df = pd.read_csv(Path(__file__).parent / f"offline_evaluations/{blackbox}.csv.zip")
return df
if __name__ == '__main__':
df = evaluations_df(deepar)
import seaborn as sns
import matplotlib.pyplot as plt
df["hp_learning_rate"] = df.hp_learning_rate_log.apply(np.exp)
df["hp_context_length_ratio"] = df.hp_context_length_ratio_log.apply(np.exp)
df["hp_num_batches_per_epoch"] = df.hp_num_batches_per_epoch_log.apply(np.exp)
ax = sns.scatterplot(data=df, x='hp_learning_rate', y='metric_CRPS', hue='task')
plt.show()
ax = sns.scatterplot(data=df, x='hp_learning_rate', y='metric_CRPS', hue='task')
ax.set(xscale="log", yscale="log")
plt.show()
ax = sns.scatterplot(data=df, x='hp_context_length_ratio', y='metric_CRPS', hue='task')
ax.set(yscale="log")
plt.show()
ax = sns.scatterplot(data=df, x='hp_num_batches_per_epoch', y='metric_time', hue='task')
ax.set(xscale="log", yscale="log")
plt.show() | 2,136 | 27.878378 | 98 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/blackbox/load_utils.py | import logging
from typing import Tuple, List
import numpy as np
from blackbox.offline import evaluations_df, deepar, fcnet, nas102, xgboost
blackbox_tasks = {
nas102: [
'cifar10',
'cifar100',
'ImageNet16-120'
],
fcnet: [
'naval',
'parkinsons',
'protein',
'slice',
],
deepar: [
'm4-Hourly',
'm4-Daily',
'm4-Weekly',
'm4-Monthly',
'm4-Quarterly',
'm4-Yearly',
'electricity',
'exchange-rate',
'solar',
'traffic',
],
xgboost: [
'a6a',
'australian',
'german.numer',
'heart',
'ijcnn1',
'madelon',
'skin_nonskin',
'spambase',
'svmguide1',
'w6a'
],
}
error_metric = {
deepar: 'metric_CRPS',
fcnet: 'metric_error',
nas102: 'metric_error',
xgboost: 'metric_error',
}
tasks = [task for bb, tasks in blackbox_tasks.items() for task in tasks]
def evaluations_np(
blackbox: str,
test_task: str,
metric_cols: List[str],
min_max_features: bool = False
) -> Tuple[List[Tuple[np.array, np.array]], Tuple[np.array, np.array]] :
"""
:param blackbox:
:param test_task:
:param metric_cols:
:param min_max_features: whether to apply min-max scaling on input features
:return: list of features/evaluations on train task and features/evaluations of the test task.
"""
logging.info(f"retrieving metrics {metric_cols} of blackbox {blackbox} for test-task {test_task}")
df = evaluations_df(blackbox=blackbox)
assert test_task in df.task.unique()
for c in metric_cols:
assert c in df.columns
Xy_dict = {}
for task in sorted(df.task.unique()):
mask = df.loc[:, 'task'] == task
hp_cols = [c for c in sorted(df.columns) if c.startswith("hp_")]
X = df.loc[mask, hp_cols].values
y = df.loc[mask, metric_cols].values
Xy_dict[task] = X, y
# todo it would be better done as a post-processing step
if blackbox in [fcnet, nas102]:
# applies onehot encoding to *all* hp columns as all hps are categories for those two blackboxes
# it would be nice to detect column types or pass it as an argument
from sklearn.preprocessing import OneHotEncoder
enc = OneHotEncoder(handle_unknown='ignore', sparse=False)
hp_cols = [c for c in sorted(df.columns) if c.startswith("hp_")]
enc.fit(df.loc[:, hp_cols])
for task, (X, y) in Xy_dict.items():
X_features = enc.transform(X)
Xy_dict[task] = X_features, y
if min_max_features:
# min-max scaling of input features
from sklearn.preprocessing import MinMaxScaler
X = np.vstack([X for (X, y) in Xy_dict.values()])
scaler = MinMaxScaler().fit(X)
Xy_dict = {t: (scaler.transform(X), y) for (t, (X, y)) in Xy_dict.items()}
Xys_train = [Xy_dict[t] for t in df.task.unique() if t != test_task]
Xy_test = Xy_dict[test_task]
return Xys_train, Xy_test
def blackbox_from_task(task: str) -> str:
for bb, tasks in blackbox_tasks.items():
if task in tasks:
return bb
assert f"unknown task {task}"
def evaluation_split_from_task(test_task: str, min_max_features: bool = True) -> Tuple[np.array, np.array]:
"""
:param test_task:
:param min_max_features: whether inputs are maped to [0, 1] with min-max scaling
:return: list of features/evaluations on train task and features/evaluations of the test task.
"""
blackbox = blackbox_from_task(test_task)
Xys_train, Xy_test = evaluations_np(
blackbox=blackbox,
test_task=test_task,
metric_cols=[error_metric[blackbox]],
min_max_features=min_max_features
)
return Xys_train, Xy_test
if __name__ == '__main__':
Xys_train, (X_test, y_test) = evaluation_split_from_task("a6a")
for task in [
'electricity',
'cifar10',
'australian',
'parkinsons',
]:
Xys_train, (X_test, y_test) = evaluation_split_from_task(task)
print(len(Xys_train), X_test.shape) | 4,186 | 28.076389 | 107 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/blackbox/__init__.py | from typing import Callable
import numpy as np
class Blackbox:
def __init__(
self,
input_dim: int,
output_dim: int,
eval_fun: Callable[[np.array], np.array],
):
self.input_dim = input_dim
self.output_dim = output_dim
self.eval_fun = eval_fun
def __call__(self, x: np.array) -> np.array:
"""
:param x: shape (input_dim,)
:return: shape (output_dim,)
"""
assert x.shape == (self.input_dim,)
y = self.eval_fun(x)
assert y.shape == (self.output_dim,)
return y
class BlackboxOffline(Blackbox):
def __init__(
self,
X: np.array,
y: np.array,
):
"""
A blackbox whose evaluations are already known.
To evaluate a new point, we return the value of the closest known point.
:param input_dim:
:param output_dim:
:param X: list of arguments evaluated, shape (n, input_dim)
:param y: list of outputs evaluated, shape (n, output_dim)
"""
assert len(X) == len(y)
n, input_dim = X.shape
n, output_dim = y.shape
from sklearn.neighbors import KNeighborsRegressor
proj = KNeighborsRegressor(n_neighbors=1).fit(X, y)
super(BlackboxOffline, self).__init__(
input_dim=input_dim,
output_dim=output_dim,
eval_fun=lambda x: proj.predict(x.reshape(1, -1))[0]
) | 1,489 | 27.113208 | 80 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/tst/test_normalization.py | import numpy as np
import pytest
from optimizer.normalization_transforms import GaussianTransform, StandardTransform
@pytest.mark.parametrize("psi_cls", [GaussianTransform, StandardTransform])
def test_gaussian_transform(psi_cls):
n = 1000
tol = 0.05
dim = 2
y = np.random.uniform(size=(n, dim))
psi = psi_cls(y)
z = psi.transform(y)
assert np.allclose(z.mean(axis=0), np.zeros((dim,)), rtol=tol, atol=tol)
assert np.allclose(z.std(axis=0), np.ones((dim,)), rtol=tol) | 503 | 28.647059 | 83 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/tst/test_prior.py | import numpy as np
from prior.mlp_pytorch import ParametricPrior
num_train_examples = 10000
num_test_examples = num_train_examples
dim = 2
num_gradient_updates = 200
lr = 1e-2
def make_random_X_y(num_examples: int, dim: int, noise_std: float):
X = np.random.rand(num_examples, dim)
noise = np.random.normal(scale=noise_std, size=(num_examples, 1))
y = X.sum(axis=-1, keepdims=True) + noise
return X, y
def test_mu_fit():
# test that parametric prior can recover a simple linear function for the mean
noise_std = 0.01
X_train, y_train = make_random_X_y(num_examples=num_train_examples, dim=dim, noise_std=noise_std)
prior = ParametricPrior(
X_train=X_train,
y_train=y_train,
num_gradient_updates=num_gradient_updates,
num_decays=1,
# smaller network for UT speed
num_layers=2,
num_hidden=20,
dropout=0.0,
lr=lr
)
X_test, y_test = make_random_X_y(num_examples=num_test_examples, dim=dim, noise_std=noise_std)
mu_pred, sigma_pred = prior.predict(X_test)
mu_l1_error = np.abs(mu_pred - y_test).mean()
print(mu_l1_error)
assert mu_l1_error < 0.3
def test_sigma_fit():
# test that parametric prior can recover a simple constant function for the variance
noise_std = 0.5
X_train, y_train = make_random_X_y(num_examples=num_train_examples, dim=dim, noise_std=noise_std)
prior = ParametricPrior(
X_train=X_train,
y_train=y_train,
num_gradient_updates=num_gradient_updates,
num_decays=1,
num_layers=2,
num_hidden=20,
dropout=0.0,
lr=lr
)
X_test, y_test = make_random_X_y(num_examples=num_test_examples, dim=dim, noise_std=noise_std)
mu_pred, sigma_pred = prior.predict(X_test)
sigma_l1_error = (sigma_pred.mean() - noise_std)
assert sigma_l1_error < 0.05
| 1,884 | 29.403226 | 101 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/tst/test_optimization.py | import logging
import random
from functools import partial
import numpy as np
import pytest
import torch
from blackbox import Blackbox, BlackboxOffline
from misc import set_seed
from misc.artificial_data import artificial_task1
from optimizer.gaussian_process import GP
from optimizer.gaussian_process_functional_prior import G3P
from optimizer.normalization_transforms import StandardTransform, GaussianTransform
from optimizer.thompson_sampling_functional_prior import TS
from optimizer.random_search import RS
Xy_train, X_test, y_test = artificial_task1()
@pytest.mark.parametrize("blackbox", [
Blackbox(
input_dim=2,
output_dim=1,
eval_fun=lambda x: x.sum(axis=-1, keepdims=True),
),
BlackboxOffline(
X=X_test,
y=y_test,
)
])
def test_blackbox_works_with_optimization(blackbox: Blackbox):
logging.basicConfig(level=logging.INFO)
seed = 3
num_evaluations = 5
optimizer_cls = RS
set_seed(seed)
optimizer = optimizer_cls(
input_dim=blackbox.input_dim,
output_dim=blackbox.output_dim,
evaluations_other_tasks=Xy_train,
)
candidates = X_test
for i in range(num_evaluations):
x = optimizer.sample(candidates)
y = blackbox(x)
logging.info(f"criterion {y} for arguments {x}")
optimizer.observe(x=x, y=y)
@pytest.mark.parametrize("optimizer_cls", [
RS,
# 5 gradient updates to makes it faster as we are only smoke-checking
partial(TS, num_gradient_updates=5, normalization="standard"),
partial(TS, num_gradient_updates=5, normalization="gaussian"),
partial(GP, normalization="standard"),
partial(GP, normalization="gaussian"),
partial(G3P, num_gradient_updates=5, normalization="standard"),
])
@pytest.mark.parametrize("constrained_search", [False, True])
def test_smoke_optimizers(optimizer_cls, constrained_search: bool):
logging.basicConfig(level=logging.INFO)
num_evaluations = 10
blackbox = Blackbox(
input_dim=2,
output_dim=1,
eval_fun=lambda x: x.sum(axis=-1, keepdims=True),
)
optimizer = optimizer_cls(
input_dim=blackbox.input_dim,
output_dim=blackbox.output_dim,
evaluations_other_tasks=Xy_train,
)
candidates = X_test
for i in range(num_evaluations):
if constrained_search:
x = optimizer.sample(candidates)
else:
x = optimizer.sample()
y = blackbox(x)
logging.info(f"criterion {y} for arguments {x}")
optimizer.observe(x=x, y=y)
| 2,572 | 26.967391 | 83 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/tst/test_evaluate.py | import pytest
from experiments.evaluate_optimizer_task import evaluate
@pytest.mark.parametrize("optimizer", [
"RS",
"GP",
"GCP",
# slow:
# "TS",
"CTS",
# "GP+prior",
"GCP+prior",
])
def test_evaluate(optimizer: str):
evaluate(
optimizer=optimizer,
task="electricity",
num_seeds=2,
num_evaluations=10,
output_folder="/tmp/",
prior="sklearn",
) | 434 | 17.125 | 56 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/tst/test_gp.py | import logging
import pytest
from blackbox import Blackbox
from misc.artificial_data import artificial_task1
from optimizer.gaussian_process import GP
@pytest.mark.parametrize("constrained_search", [False, True])
@pytest.mark.parametrize("normalization", ["standard", "gaussian"])
def test_gp(constrained_search: bool, normalization: str):
logging.basicConfig(level=logging.INFO)
num_evaluations = 8
Xy_train, X_test, y_test = artificial_task1()
blackbox = Blackbox(
input_dim=2,
output_dim=1,
eval_fun=lambda x: x.sum(axis=-1, keepdims=True),
)
optimizer = GP(
input_dim=blackbox.input_dim,
output_dim=blackbox.output_dim,
normalization=normalization,
)
candidates = X_test
for i in range(num_evaluations):
x = optimizer.sample(candidates) if constrained_search else optimizer.sample()
y = blackbox(x)
logging.info(f"criterion {y} for arguments {x}")
optimizer.observe(x=x, y=y)
| 1,005 | 26.189189 | 86 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/tst/test_blackbox.py | import numpy as np
from blackbox import BlackboxOffline
def test_blackbox():
n = 20
dim = 2
X_test = np.random.rand(n, dim)
y_test = np.random.rand(n, 1)
blackbox = BlackboxOffline(
X=X_test,
y=y_test,
)
for x, y in zip(X_test, y_test):
assert np.allclose(blackbox(x), y)
| 326 | 19.4375 | 42 | py |
optisplit | optisplit-main/mean.py | import pandas as pd
import os
import numpy as np
from pdb import set_trace as bp
import sys
from pathlib import Path
"""Calculate means of result files."""
def sort_dfs(dfs):
res = []
for df in dfs:
start = df.iloc[:4,:].sort_values(by=[' method'], ascending=False)
end = df.iloc[4:,:].sort_values(by=[' method'], ascending=False)
df_new = pd.concat([start, end])
df_new.set_index(np.arange(len(df)), inplace=True)
res.append(df_new)
return res
if __name__ == '__main__':
name = sys.argv[1]
output_dir = sys.argv[2]
if name not in ['small', 'go', 'xml']:
print('invalid dataset name')
sys.exit(1)
dfs = [pd.read_csv(Path(output_dir, fname), index_col=False) for fname in os.listdir(output_dir) if 'csv' in fname and name in fname and 'mean' not in fname]
print(len(dfs))
if name == 'xml':
dfs = sort_dfs(dfs)
df = pd.concat(dfs).groupby(level=0).mean()
df.insert(0, 'method', dfs[0].values[:,1])
df.insert(0, 'dataset', dfs[0].values[:,0])
df.to_csv(Path(output_dir, f'mean_scores_{name}.csv'), index=False, float_format='%.4f')
# df.to_csv(Path(path, f'mean_scores_{name}.csv'), index=False)
| 1,226 | 26.266667 | 161 | py |
optisplit | optisplit-main/evaluation_metric_experiment.py | import numpy as np
import joblib
import matplotlib.pyplot as plt
import scipy.sparse as sp
import warnings
from copy import deepcopy
from pdb import set_trace as bp
from textwrap import wrap
import cv_balance
np.set_printoptions(formatter={'float': lambda x: "{0:0.5f}".format(x)})
warnings.filterwarnings('ignore', message='Comparing a sparse matrix with 0 using == is inefficient')
def equal(y, ones, n_folds):
"""Equally distributed folds"""
for j, yy in enumerate(y):
for i in range(yy.shape[1]):
yy[:ones[i]//n_folds, i] = 1
targets = np.row_stack(y)
return sp.csr_matrix(targets)
def classes_missing_from_1_fold(y, ones, n_folds):
for j, yy in enumerate(y):
if j == 0:
continue
else:
for i in range(yy.shape[1]):
yy[:ones[i]//(n_folds-1), i] = 1
targets = np.row_stack(y).astype(np.int)
return sp.csr_matrix(targets)
def difference(y, ones, n_folds):
"""Small difference between folds"""
diff = 0.2
for j, yy in enumerate(y):
if j == 0:
for i in range(yy.shape[1]):
yy[:ones[i]//n_folds+(diff*(ones[i]//n_folds)).astype(np.int), i] = 1
elif j== 1:
for i in range(yy.shape[1]):
yy[:ones[i]//n_folds-(diff*(ones[i]//n_folds)).astype(np.int), i] = 1
else:
for i in range(yy.shape[1]):
yy[:ones[i]//n_folds, i] = 1
targets = sp.csr_matrix(np.row_stack(y))
return targets
def mk_y(size, n_folds):
"""Generate the synthetic data"""
y = np.split(np.zeros(size), n_folds)
folds = np.split(np.arange(size[0]), n_folds)
folds = [(np.setdiff1d(np.arange(size[0]), f), f) for f in folds]
ones = np.linspace(start=2*n_folds, stop=size[0]//2, num=100).astype(np.int)
res = {}
res['Equal'] = folds, equal(deepcopy(y), ones, n_folds)
res['Difference'] = folds, difference(deepcopy(y), ones, n_folds)
res['One missing'] = folds, classes_missing_from_1_fold(deepcopy(y), ones, n_folds)
joblib.dump(res, 'results/res.joblib')
def calculate_scores(target_fold_ratio, actual_fold_ratio):
"""Return LD and rLD scores for the given ratios"""
#Notation like in Section 3.
D = 1 # data size
Di = np.linspace(0.01*D, 0.99*D, 100) # number of positives in each class
Sj = D*actual_fold_ratio
Sij = Di*target_fold_ratio
d = Di / D
p = Sij / Sj
rld = np.abs((d-p)/d)
ld = np.abs(p/(1-p) - d/(1-d))
return ld, rld
def plot_measures():
"""Plot LD and rLD scores of folds with given error"""
# get scores
ratios = [(0.2, 0.25), (0.2, 0.3), (0.2, 0.4), (0.2, 0.5)][::-1]
scores = [calculate_scores(*r) for r in ratios]
ld_scores = [s[0] for s in scores]
rld_scores = [s[1] for s in scores]
# plot results
# Score comparison
plt.figure(figsize=(11, 3.8))
plt.subplots_adjust(wspace=0.3, top=0.90, bottom=0.15, right=0.82, left=0.10)
Di = np.linspace(0.01, 0.99, 100)
plt.subplot(1,2,1,)
plt.yscale('log')
plt.plot(Di, np.array(ld_scores).T)
plt.xlabel('$D_i$', fontsize=13)
plt.title('A', fontsize=16)
plt.ylabel('LD', fontsize=13, rotation=0, labelpad=15)
plt.xticks(fontsize=13)
plt.yticks(fontsize=13)
plt.subplot(1,2,2,)
plt.plot(Di, np.array(rld_scores).T)
plt.title('B', fontsize=16)
plt.ylabel('rLD', fontsize=13, rotation=0, labelpad=15)
plt.xlabel('$D_i$', fontsize=13)
plt.xticks(fontsize=13)
plt.yticks(fontsize=13)
title = 'Ratio of positive data points in the fold'
title = '\n'.join(wrap(title, 20))
lg = plt.legend([r[1] for r in ratios], bbox_to_anchor=(1.03, 0.8), loc="upper left", fontsize=13, title=title)
title = lg.get_title()
title.set_fontsize(13)
plt.savefig(f'results/ld_vs_rld.pdf')
# Difference comparison
# calculate pairwise differences between scores
ld_differences = np.array([x - y for i,x in enumerate(ld_scores[::-1]) for j,y in enumerate(ld_scores[::-1]) if i > j]).T
rld_differences = np.array([x - y for i,x in enumerate(rld_scores[::-1]) for j,y in enumerate(rld_scores[::-1]) if i > j]).T
labels = np.array([f'{ratios[i][1]}-{ratios[j][1]}' for i,x in enumerate(ld_scores[::-1]) for j,y in enumerate(ld_scores[::-1]) if i > j]).T
plt.clf()
plt.figure(figsize=(11, 3.8))
plt.subplots_adjust(wspace=0.3, top=0.90, bottom=0.15, right=0.82, left=0.10)
Di = np.linspace(0.01, 0.99, 100)
plt.subplot(1,2,1,)
plt.yscale('log')
plt.plot(Di, ld_differences)
plt.xlabel('$D_i$', fontsize=13)
plt.title('C', fontsize=16)
plt.ylabel('$\Delta LD$', fontsize=13, rotation=0, labelpad=15)
plt.xticks(fontsize=13)
plt.yticks(fontsize=13)
plt.subplot(1,2,2,)
plt.plot(Di, rld_differences)
plt.title('D', fontsize=16)
plt.xlabel('$D_i$', fontsize=13)
plt.ylabel('$\Delta rLD$', fontsize=13, rotation=0, labelpad=15)
plt.xticks(fontsize=13)
plt.yticks(fontsize=13)
plt.legend(labels, bbox_to_anchor=(1.02, 0.8), loc="upper left", fontsize=13)
plt.savefig(f'results/ld_vs_rld_differences.pdf')
def synthetic_data_experiment():
datas = joblib.load('results/res.joblib')
methods = ['rld', 'ld', 'dcp']
for i, name in enumerate(datas):
plt.clf()
data = datas[name]
rld = np.array(cv_balance.rld(data[0], data[1])).ravel()
ld = cv_balance.ld(data[0], data[1])
dcp = cv_balance.cv_evaluate(data[0], data[1], np.array(data[1].sum(axis=0)).ravel(), method='dcp')
res_all = np.column_stack((ld, rld, dcp))
sizes = np.array(data[1].sum(axis=0)).ravel()
if i == 2:
plt.figure(figsize=(6.6, 3.8))
else:
plt.figure(figsize=(5.4, 3.8))
for j, m in enumerate(['.', '+', '2']):
plt.plot(sizes, res_all[:,j], ms=11, marker=m, markevery=0.04, alpha=0.9, linestyle='None')
plt.xscale('symlog', linthreshx=0.000001)
plt.yscale('symlog', linthreshy=0.000001)
plt.ylim(-0.000001, np.max(res_all)+3)
plt.xlabel('Class size', fontsize=16)
plt.ylabel('Score', fontsize=16)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.title(name, x=0.5, y=0.89, fontsize=16)
if i == 2:
lg = plt.legend(['LD', 'rLD', 'DCP'], bbox_to_anchor=(1.05, 0.5), loc="upper left", fontsize=14, title='Measure')
title = lg.get_title()
title.set_fontsize(14)
plt.tight_layout()
plt.savefig(f'results/{name}.pdf')
if __name__ == '__main__':
mk_y((100000,100), 10)
synthetic_data_experiment()
plot_measures()
| 6,729 | 29.87156 | 144 | py |
optisplit | optisplit-main/cv_comparison_experiment.py | import argparse
import sys
import time
import arff
import joblib
import numpy as np
import scipy.sparse as sp
from copy import deepcopy
from datetime import timedelta
from joblib import Parallel, delayed
from pdb import set_trace as bp
from skmultilearn.model_selection import IterativeStratification
from cv_balance import optisplit, random_cv, cv_evaluate, check_folds, rld, ld
sys.path.append('stratified_sampling_for_XML/stratify_function/')
from stratify import stratified_train_test_split
import warnings
warnings.filterwarnings('ignore', message='Comparing a sparse matrix with 0 using == is inefficient')
def load_datasets(dataset_type):
datasets = {}
if dataset_type == 'small':
for dataset in [('mediamill', 101), ('bibtex', 159), ('delicious', 983)]:
print(f'loading {dataset[0]}')
with open(f'data/{dataset[0]}.arff') as f:
data = arff.load(f)
data = np.array(data['data'])
datasets[dataset[0]] = sp.csr_matrix(data[:,-dataset[1]:].astype(np.int))
elif dataset_type == 'go':
for dataset in ['CC', 'MF']:
print(f'loading {dataset}')
data =sp.load_npz(f'data/{dataset}_targets.npz')
class_sizes = data.sum(axis=0)
if np.any(class_sizes == data.shape[0]):
data = data[:, np.array(class_sizes) < data.shape[0]]
datasets[dataset] = data
elif dataset_type == 'xml':
for dataset in ['BP_targets.npz', 'wiki10_31k.npz']:
print(f'loading {dataset}')
data =sp.load_npz(f'data/{dataset}')
class_sizes = data.sum(axis=0)
if np.any(class_sizes == 0):
data = data[:, (np.array(class_sizes) > 0).ravel()]
if np.any(class_sizes == data.shape[0]):
data = data[:, np.array(class_sizes) < data.shape[0]]
datasets[dataset] = data
else:
raise NotImplementedError('unknown datasets')
return datasets
def iterstrat(n_folds, targets, random_state=42):
"""Iterative stratification"""
X = np.zeros((targets.shape[0], 1))
k_fold = IterativeStratification(n_splits=n_folds, random_state=random_state).split(X,targets)
return list(k_fold)
def sois(n_folds, targets, random_state=42):
"""Second order iterative stratification"""
X = np.zeros((targets.shape[0], 1))
k_fold = IterativeStratification(n_splits=n_folds, random_state=random_state, order=2).split(X,targets)
return list(k_fold)
def stratified(n_folds, targets, random_state=42):
"""Stratified sampling"""
res = []
remaining = np.arange(targets.shape[0])
m = targets.shape[0]//n_folds
for i in range(n_folds):
if len(remaining) > m and i < n_folds-1:
s = m/len(remaining)
else:
s = len(remaining)
tt = list(targets[remaining,:].tolil().rows)
X = list(np.zeros((targets.shape[0], 1))[remaining])
split = stratified_train_test_split(X, tt, target_test_size=s, random_state=random_state)
remaining2 = remove(remaining, split[1])
res.append((None, remaining[split[1]]))
remaining = remaining2
res = [(np.setdiff1d(np.arange(targets.shape[0]), f[1]), f[1]) for f in res]
return res
def partitioning_cv(n_folds, targets, random_state=42):
"""Partitioning method based on stratified random sampling"""
np.random.seed(random_state)
frequencies = np.array(np.mean(targets, axis=0)).ravel()
index = list(targets.tolil().rows)
tt = [frequencies[index[i]] for i in range(len(index))]
D = np.array([np.product(t) for t in tt])
index = np.argsort(D)
stratas = np.array_split(index, n_folds)
for i in range(len(stratas)):
np.random.shuffle(stratas[i])
substratas = [np.array_split(s, n_folds) for s in stratas]
folds = []
for j in range(n_folds):
res = []
for i in range(n_folds):
res.append(substratas[i][j])
folds.append((None, np.concatenate(res).ravel()))
folds = [(np.setdiff1d(np.arange(targets.shape[0]), f[1]), f[1]) for f in folds]
return folds
def remove(remaining, split):
remaining2 = np.setdiff1d(remaining, remaining[split])
return remaining2
def improve_split(dataset_type, random_state=42, output_dir='results'):
"""Use optisplit to improve an existing split"""
np.random.seed(random_state)
folds = joblib.load(f'{output_dir}/folds_{dataset_type}_{random_state}.joblib')
res = {}
for dataset in folds.keys():
res[dataset] = {}
for method in folds[dataset].keys():
data = folds[dataset][method]
folds0 = [(np.setdiff1d(np.arange(data[1].shape[0]), f[1]), f[1]) for f in data[0]]
if not check_folds(folds0, data[1]):
bp()
check_folds(folds0, data[1])
print(f'{method}')
start = time.time()
result = optisplit(n_splits=len(data[0]), targets=data[1], seed=random_state,initial_folds=folds0)
elapsed = time.time()-start
runtime = f'Time: {str(timedelta(seconds=elapsed))}'
res[dataset][method] = result, data[1], elapsed
print(runtime)
joblib.dump(res, f'{output_dir}/folds_{dataset_type}_{random_state}_IMPROVED.joblib')
def create_folds(dataset_type, n_folds=5, random_state=42, output_dir='results'):
own_dcp = lambda n_splits, targets, random_seed: optisplit(n_splits, targets, method='dcp', seed=random_seed)
own_rld = lambda n_splits, targets, random_seed: optisplit(n_splits, targets, method='rld', seed=random_seed)
own_ld = lambda n_splits, targets, random_seed: optisplit(n_splits, targets, method='ld', seed=random_seed)
datasets = load_datasets(dataset_type)
if dataset_type in ['small', 'go']:
methods = {'SS':stratified, 'PMBSRS':partitioning_cv, 'IS':iterstrat, 'SOIS':sois, 'own_ld':own_ld, 'own_dcp':own_dcp, 'own_rld':own_rld, 'random':random_cv}
else:
methods = {'own_ld':own_ld, 'own_dcp':own_dcp, 'own_rld':own_rld, 'PMBSRS':partitioning_cv, 'random':random_cv, 'SS':stratified}
res = {}
for dataset in datasets.keys():
print(f'{dataset}')
res[dataset] = {}
for method in methods.keys():
print(f'{method}')
start = time.time()
targets = datasets[dataset]
try:
result = methods[method](n_folds, deepcopy(targets), random_state)
elapsed = time.time()-start
runtime = f'Time: {str(timedelta(seconds=elapsed))}'
res[dataset][method] = result, targets, elapsed
print(runtime)
except:
print(f'Error in {method} on {dataset} - skipped')
joblib.dump(res, f'{output_dir}/folds_{dataset_type}_{random_state}.joblib')
def example_distribution(folds, targets):
k = len(folds)
res = 0
for j in range(k):
Sj = len(folds[j][1])
cj = targets.shape[0]*(1/k)
res += np.abs(Sj - cj)
return (1/k)*res
def evaluate_folds(dataset_type, random_state, output_dir):
folds = joblib.load(f'{output_dir}/folds_{dataset_type}_{random_state}.joblib')
res = {}
for dataset in folds.keys():
res[dataset] = {}
for method in folds[dataset].keys():
data = folds[dataset][method]
targets = data[1]
class_sizes = np.array(targets.sum(axis=0)).ravel()
# remove empty classes if they exists
targets = targets[:, np.where(class_sizes > 0)[0]]
class_sizes = np.array(targets.sum(axis=0)).ravel()
dcp = cv_evaluate(data[0], targets, class_sizes, method='dcp')
ED = example_distribution(data[0], targets)
LD = np.mean(ld(data[0], targets))
rld_score = np.mean(rld(data[0], targets))
dcp_score = np.mean(dcp)
runtime = data[2]
res[dataset][method] = {'ED':ED, 'LD':LD, 'dcp':dcp_score, 'rld':rld_score, 'runtime':runtime}
tostr = lambda x: str(x).replace('[','').replace(']','').replace('\'', '')
with open(f'{output_dir}/scores_{dataset_type}_{random_state}.csv', 'w') as f:
fields = 'dataset, method, ED, LD, dcp, rld, runtime\n'
f.write(fields)
for dataset, results in res.items():
for method, scores in results.items():
score_str = tostr([v for v in list(scores.values())])
f.write(f'{dataset},{method},{score_str}\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dataset_type', type=str, help='small, go or xml')
parser.add_argument('random_state', type=int)
parser.add_argument('output_dir', type=str)
parser.add_argument('-e', '--evaluation', action='store_true', help='run evaluations')
parser.add_argument('-i', '--improve', action='store_true', help='improve existing folds')
parser.add_argument('-c', '--create', action='store_true', help='create folds')
args = parser.parse_args()
if args.create:
create_folds(dataset_type=args.dataset_type, random_state=args.random_state, output_dir=args.output_dir)
if args.evaluation:
evaluate_folds(dataset_type=args.dataset_type, random_state=args.random_state, output_dir=args.output_dir)
if args.improve:
improve_split(dataset_type=args.dataset_type, random_state=args.random_state, output_dir=args.output_dir)
| 9,525 | 37.723577 | 166 | py |
optisplit | optisplit-main/cv_balance.py | import time
import numpy as np
import scipy.sparse as sp
from copy import deepcopy
from datetime import timedelta
from pdb import set_trace as bp
def rld(folds, targets):
tt = deepcopy(targets)
res = []
di = np.array(tt.sum(axis=0)).ravel() / tt.shape[0]
for f in folds:
pij = np.array(tt[f[1]].sum(axis=0)).ravel() / len(f[1])
res.append((abs((di - pij)/di)))
res = np.stack(res)
return res.mean(axis=0)
def dcp(folds, targets):
tt = deepcopy(targets)
res = []
Si = np.array(tt.sum(axis=0)).ravel()
for f in folds:
Sji = np.array(tt[f[1]].sum(axis=0)).ravel()
res.append(Sji)
res = np.stack(res)
return (res / Si).max(axis=0) - 1/len(folds)
def ld(folds, targets):
tt = deepcopy(targets)
res = []
di = np.array(tt.sum(axis=0)).ravel() / tt.shape[0]
di = np.where(di == 1, (tt.shape[0]-1)/tt.shape[0], di) # avoid division by zero
for f in folds:
pij = np.array(tt[f[1]].sum(axis=0)).ravel() / len(f[1])
pij = np.where(pij == 1, (len(f[1])-1)/len(f[1]), pij)
res.append(abs((pij/(1-pij) - di/(1-di))))
res = np.stack(res)
return res.mean(axis=0)
def cv_evaluate(folds, targets, method='original'):
"""Return X, Y evaluation metrics for a cv"""
if method == 'dcp':
res = np.array(dcp(folds, targets)).ravel()
elif method == 'rld':
res = np.array(rld(folds, targets)).ravel()
elif method == 'ld':
res = np.array(ld(folds, targets)).ravel()
else:
raise NotImplementedError('invalid method')
return np.array(res).ravel()
def transfer_sequences(class_index, arr0, arr1, n_transfer, A, targets, sequences=None):
"""Transfer contents of class_index array from arr0 to arr1"""
arr0_index = np.intersect1d(class_index, arr0).astype(np.int)
# select sequences with smallest number of other features
tt = np.array(targets[arr0_index, :].sum(axis=1)).ravel()
if sequences is not None:
# use precomputed transfer index
transfer_index = sequences
else:
# select sequences with fewest other classes to be transferred
transfer_index = arr0_index[tt.argsort()[:n_transfer]]
# move arr0 to arr1
arr1 = np.concatenate((arr1, transfer_index)).astype(np.int)
arr0 = np.setdiff1d(arr0, transfer_index).astype(np.int)
return arr0, arr1, transfer_index
def balance(targets, A, folds, n_splits):
n_transfer = calc_transfer(targets, A, folds, n_splits)
class_index = np.where(targets[:,A].toarray().ravel() > 0)[0]
excess = np.array([])
# process folds with too many test cases
for i, n in enumerate(n_transfer):
if n_transfer[i] < 0:
tr_index = folds[i][0]
test_index = folds[i][1]
test_index, tr_index, transfer_index = transfer_sequences(class_index, test_index, tr_index, abs(n_transfer[i]), A, targets)
excess = np.concatenate((excess, transfer_index))
folds[i] = tr_index, test_index #?
else:
continue
# process folds with too few test cases
for i, n in enumerate(n_transfer):
if n_transfer[i] > 0:
tr_index = folds[i][0]
test_index = folds[i][1]
sequences = excess[:abs(n_transfer[i])]
excess = np.setdiff1d(excess, sequences)
tr_index, test_index, transfer_index = transfer_sequences(class_index, tr_index, test_index, n_transfer[i], A, targets, sequences=sequences)
folds[i] = tr_index, test_index #?
else:
continue
assert len(excess) == 0,'Failed to distribute all sequences'
return folds, n_transfer
def check_folds(folds, targets):
all_sequences_in_test = sum([len(np.unique(f[1])) for f in folds]) == targets.shape[0]
separate_training_test = all([len(np.intersect1d(f[0], f[1])) == 0 for f in folds])
data_shape = all([len(f[0]) + len(f[1]) == targets.shape[0] for f in folds])
no_overlapping_test_sets = len(np.unique(np.concatenate([np.unique(f[1]) for f in folds]))) == len(np.concatenate([f[1] for f in folds]))
return all_sequences_in_test and no_overlapping_test_sets and separate_training_test and data_shape
def random_cv(n_splits, targets, seed=42):
np.random.seed(seed)
t = np.arange(targets.shape[0])
np.random.shuffle(t)
folds = np.array_split(t, n_splits)
folds = [(np.setdiff1d(t,f), f) for f in folds]
return folds
def calc_transfer(targets, A, folds, n_splits):
# calculate the amount of balancing needed
tt = np.array([targets[f[1], A].sum() for f in folds])
n_transfer = np.array([tt.sum()//n_splits - t for t in tt])
if sum(n_transfer) < 0:
aa = np.zeros(len(n_transfer)).astype(np.int)
aa[:abs(sum(n_transfer))] = 1
n_transfer = n_transfer + aa
assert sum(n_transfer) == 0, 'Balancing failed'
return n_transfer
def optisplit(n_splits, targets, method='rld', max_epochs=3, seed=42, initial_folds=None):
"""Run Optisplit.
Parameters
----------
n_splits : int
Number of cross validation folds
targets : scipy csr matrix
Target matrix
method : str (rld or dcp), default=rld
Optimisation method
max_epochs: int, defauld=3
Number of times to run optisplit over the data
seed: int, default=42
Random seed
initial_folds: list, default=None
List of numpy arrays containing cross validation fold indices. These
are used as the initial folds.
Returns
-------
list
list of n_split tuples containing numpy arrays containing training and test fold indices.
"""
np.random.seed(seed)
targets = sp.csr_matrix(targets)
class_sizes = targets.sum(axis=0)
# if > 50% of the examples are positive, optimize the negative distribution
pos_index = np.where(class_sizes > 0.5*targets.shape[0])[0]
targets[:,pos_index] = (targets[:,pos_index] == 0).astype(np.int)
class_sizes = targets.sum(axis=0)
if initial_folds is None:
folds0 = random_cv(n_splits, targets)
else:
folds0 = initial_folds
res0 = cv_evaluate(folds0, targets, method=method)
score0 = np.sum(res0)
start = time.time()
for jjj in range(max_epochs):
max_offset = 0
print(f'round {jjj}')
if jjj == 0:
print(score0)
for iii in range(targets.shape[1]):
folds = deepcopy(folds0)
A = np.argsort(np.array(res0).ravel())[::-1][max_offset]
folds, n_transfer = balance(targets, A, folds, n_splits)
res1 = cv_evaluate(folds, targets, method=method)
if np.sum(res0) <= np.sum(res1) or np.all(n_transfer == 0):
#balancing unbalanced some other classes
max_offset += 1
continue
score1 = np.sum(res1)
folds0 = folds
res0 = res1
print(score1)
if np.isclose(score0, score1, atol=0.1):
break
assert check_folds(folds, targets), 'Invalid CV folds created'
print(f'Time: {str(timedelta(seconds=time.time()-start))}')
print(f'Ignored {max_offset} classes')
return folds0
def main():
pass
if __name__ == '__main__':
main()
| 7,298 | 31.29646 | 152 | py |
optisplit | optisplit-main/stratified_sampling_for_XML/stratify_function/stratify.py | import random
import numpy as np
from datetime import datetime
import helper_funcs
def stratified_train_test_split(X, y, target_test_size, random_state=None, epochs=50, swap_probability=0.1, threshold_proportion=0.1, decay=0.1):
if random_state != None:
random.seed(random_state)
# To keep track of how long the initialization takes
start_time = datetime.now()
# Keep track how how many instances have been swapped to train or test
swap_counter = {
'to_train': 0,
'to_test': 0,
}
# 1. Create instances_dict to keep track of instance information:
# labels: array of labels, []
# train_or_test: string, 'train' or 'test'
# instance_score: float, adjusted sum of label scores
instances_dict = helper_funcs.create_instances_dict(X, y, target_test_size)
# 1.5 Get average number of labels per instance
labels_per_instance = []
for instance_id, instance_dict in instances_dict.items():
labels_count = len(instance_dict['labels'])
labels_per_instance.append(labels_count)
average_labels_per_instance = sum(labels_per_instance) / len(labels_per_instance)
# 2. Create labels_dict to keep track of label information:
# train: int, number of times label appears in train set
# test: int, number of times label appears in test set
# label_score: float, label score
labels_dict = helper_funcs.create_labels_dict(instances_dict)
# 3. Calculate the label score for each label in labels_dict
# Positive score if too much of the label is in the test set
# Negative score if too much of the label is in the train set
helper_funcs.score_labels(labels_dict, target_test_size, average_labels_per_instance)
# 4. Calculate the instance score for each instance in instances_dict
# A high score means the instance is a good candidate for swapping
helper_funcs.score_instances(instances_dict, labels_dict)
# 5. Calculate the total score
# The higher the score, the more 'imbalanced' the distribution of labels between train and test sets
total_score = helper_funcs.calculate_total_score(instances_dict)
print(f'Starting score: {round(total_score)}. Calculated in {str(datetime.now() - start_time).split(".")[0]}')
# Main loop to create stratified train-test split
for epoch in range(epochs):
# To keep track of how long each itteration takes
itteration_start_time = datetime.now()
# 6. Calculate the threshold score for swapping
threshold_score = helper_funcs.calculte_threshold_score(instances_dict, average_labels_per_instance, epoch, threshold_proportion, decay)
# 7. Swap the instances with instance_score that is greater than the threshold score
# Probability of swapping an instance is swap_probability
helper_funcs.swap_instances(instances_dict, threshold_score, swap_counter, average_labels_per_instance, epoch, swap_probability, decay)
# 2. Recreate labels_dict with updated train-test split
labels_dict = helper_funcs.create_labels_dict(instances_dict)
# from pdb import set_trace as bp
# bp()
# 3. Recalculate the label score for each label in labels_dict
helper_funcs.score_labels(labels_dict, target_test_size, average_labels_per_instance)
# 4. Recalculate the instance score for each instance in instances_dict
helper_funcs.score_instances(instances_dict, labels_dict)
# 5. Recalculate the total score
total_score = helper_funcs.calculate_total_score(instances_dict)
print(f'Epoch {epoch + 1}/{epochs} score: {round(total_score)}. Calculated in {str(datetime.now() - itteration_start_time).split(".")[0]}')
# Prepare X_train, X_test, y_train, y_test
X_train = []
X_test = []
y_train = []
y_test = []
train_index = []
test_index = []
for instance_id, instance_dict in instances_dict.items():
if instance_dict['train_or_test'] == 'train':
X_train.append(X[instance_id])
y_train.append(y[instance_id])
train_index.append(instance_id)
elif instance_dict['train_or_test'] == 'test':
X_test.append(X[instance_id])
y_test.append(y[instance_id])
test_index.append(instance_id)
else:
print(f'Something went wrong: {instance_id}')
# # Print some statistics
# actual_test_size = len(X_test) / (len(X_train) + len(X_test))
# print(f'To train: {swap_counter["to_train"]}')
# print(f'To test: {swap_counter["to_test"]}')
# print(f'Target test size: {target_test_size}')
# print(f'Actual test size: {actual_test_size}')
return np.array(train_index), np.array(test_index)
| 4,788 | 40.284483 | 147 | py |
optisplit | optisplit-main/stratified_sampling_for_XML/stratify_function/helper_funcs.py | import random
import numpy as np
# 1. Create instances_dict to keep track of instance information:
# labels: array of labels, []
# train_or_test: string, 'train' or 'test'
# instance_score: float, adjusted sum of label scores
def create_instances_dict(X, y, target_test_size):
instances_dict = {}
instance_id = 0
for _ in X:
train_or_test = 'train'
if random.uniform(0, 1) <= target_test_size:
train_or_test = 'test'
instances_dict[instance_id] = {
'labels': y[instance_id],
'train_or_test': train_or_test,
'instance_score': 0,
}
instance_id += 1
return instances_dict
# 2. Create labels_dict to keep track of label information:
# train: int, number of times label appears in train set
# test: int, number of times label appears in test set
# label_score: float, label score
def create_labels_dict(instances_dict):
labels_dict = {}
for _, instance_dict in instances_dict.items():
train_or_test = instance_dict['train_or_test']
for label in instance_dict['labels']:
try:
if train_or_test == 'train':
labels_dict[label]['train'] += 1
else:
labels_dict[label]['test'] += 1
except:
if train_or_test == 'train':
labels_dict[label] = {
'train': 1,
'test': 0,
'label_score': 0
}
else:
labels_dict[label] = {
'train': 0,
'test': 1,
'label_score': 0
}
return labels_dict
# 3. Calculate the label score for each label in labels_dict
# Positive score if too much of the label is in the test set
# Negative score if too much of the label is in the train set
def score_labels( labels_dict, target_test_size, average_labels_per_instance):
for label, label_dict in labels_dict.items():
label_score = 0
label_count = label_dict['train'] + label_dict['test']
if label_count > 1:
actual_test_proportion = label_dict['test'] / label_count
if actual_test_proportion >= target_test_size: # Too much of the label is in the test set
label_score = (actual_test_proportion - target_test_size) / (1 - target_test_size)
if actual_test_proportion > 0.999:
label_score += average_labels_per_instance
else: # Too much of the label is in the train set
label_score = (actual_test_proportion - target_test_size) / target_test_size
if actual_test_proportion < 0.001:
label_score -= average_labels_per_instance
labels_dict[label]['label_score'] = label_score
# 4. Calculate the instance score for each instance in instances_dict
# A high score means the instance is a good candidate for swapping
def score_instances(instances_dict, labels_dict):
for instance_id, instance_dict in instances_dict.items():
instance_score = 0
train_or_test = instance_dict['train_or_test']
for label in instance_dict['labels']:
label_score = labels_dict[label]['label_score']
if label_score > 0: # If too much of the label is in the test set
if train_or_test == 'test':
instance_score += label_score # If instance in test, increase score
elif train_or_test == 'train':
instance_score -= label_score # If instance in train, decrease score
else:
print(f'Something went wrong: {instance_id}')
elif label_score < 0: # If too much of the label is in the train set
if train_or_test == 'train':
instance_score -= label_score # If instance in train, increase score
elif train_or_test == 'test':
instance_score += label_score # If instance in test, decrease score
else:
print(f'Something went wrong: {instance_id}')
instances_dict[instance_id]['instance_score'] = instance_score
# 5. Calculate the total score
# The higher the score, the more 'imbalanced' the distribution of labels between train and test sets
def calculate_total_score(instances_dict):
total_score = 0
for _, instance_dict in instances_dict.items():
total_score += instance_dict['instance_score']
return total_score
# 6. Calculate the threshold score for swapping
def calculte_threshold_score(instances_dict, average_labels_per_instance, epoch, threshold_proportion, decay):
instance_scores = []
for _, instance_dict in instances_dict.items():
if instance_dict['instance_score'] < average_labels_per_instance:
instance_scores.append(instance_dict['instance_score'])
threshold_score = np.quantile(instance_scores, (1 - (threshold_proportion / ((1 + decay) ** epoch))))
if threshold_score < 0:
threshold_score = 0
return threshold_score
# 7. Swap the instances with instance_score that is greater than the threshold score
# Probability of swapping an instance is swap_probability
def swap_instances(instances_dict, threshold_score, swap_counter, average_labels_per_instance, epoch, swap_probability, decay):
for instance_id, instance_dict in instances_dict.items():
instance_score = instance_dict['instance_score']
if instance_score >= average_labels_per_instance:
if random.uniform(0, 1) <= 0.25 / (1.05 ** epoch):
current_group = instance_dict['train_or_test']
if current_group == 'train':
instances_dict[instance_id]['train_or_test'] = 'test'
swap_counter['to_test'] += 1
elif current_group == 'test':
instances_dict[instance_id]['train_or_test'] = 'train'
swap_counter['to_train'] += 1
elif instance_score > threshold_score and random.uniform(0, 1) <= swap_probability / ((1 + decay) ** epoch):
current_group = instance_dict['train_or_test']
if current_group == 'train':
instances_dict[instance_id]['train_or_test'] = 'test'
swap_counter['to_test'] += 1
elif current_group == 'test':
instances_dict[instance_id]['train_or_test'] = 'train'
swap_counter['to_train'] += 1
| 6,577 | 47.014599 | 127 | py |
PC-JeDi | PC-JeDi-main/src/plotting.py | from copy import deepcopy
from functools import partial
from pathlib import Path
from typing import Optional, Union
import matplotlib.pyplot as plt
import numpy as np
import PIL
import wandb
from jetnet.utils import efps
def plot_multi_hists(
data_list: Union[list, np.ndarray],
data_labels: Union[list, str],
col_labels: Union[list, str],
path: Optional[Union[Path, str]] = None,
scale_factors: Optional[list] = None,
do_err: bool = False,
do_norm: bool = False,
bins: Union[list, str, partial] = "auto",
logy: bool = False,
y_label: Optional[str] = None,
ylim: Optional[list] = None,
rat_ylim: tuple = (0, 2),
rat_label: Optional[str] = None,
scale: int = 5,
do_legend: bool = True,
hist_kwargs: Optional[list] = None,
err_kwargs: Optional[list] = None,
legend_kwargs: Optional[dict] = None,
incl_overflow: bool = True,
incl_underflow: bool = True,
do_ratio_to_first: bool = False,
return_fig: bool = False,
return_img: bool = False,
) -> Union[plt.Figure, None]:
"""Plot multiple histograms given a list of 2D tensors/arrays.
- Performs the histogramming here
- Each column the arrays will be a seperate axis
- Matching columns in each array will be superimposed on the same axis
- If the tensor being passed is 3D it will average them and combine the uncertainty
args:
data_list: A list of tensors or numpy arrays, each col will be a seperate axis
data_labels: A list of labels for each tensor in data_list
col_labels: A list of labels for each column/axis
path: The save location of the plots (include img type)
scale_factors: List of scalars to be applied to each histogram
do_err: If the statistical errors should be included as shaded regions
do_norm: If the histograms are to be a density plot
bins: List of bins to use for each axis, can use numpy's strings
logy: If we should use the log in the y-axis
y_label: Label for the y axis of the plots
ylim: The y limits for all plots
rat_ylim: The y limits of the ratio plots
rat_label: The label for the ratio plot
scale: The size in inches for each subplot
do_legend: If the legend should be plotted
hist_kwargs: Additional keyword arguments for the line for each histogram
legend_kwargs: Extra keyword arguments to pass to the legend constructor
incl_overflow: Have the final bin include the overflow
incl_underflow: Have the first bin include the underflow
do_ratio_to_first: Include a ratio plot to the first histogram in the list
as_pdf: Also save an additional image in pdf format
return_fig: Return the figure (DOES NOT CLOSE IT!)
return_img: Return a PIL image (will close the figure)
"""
# Make the arguments lists for generality
if not isinstance(data_list, list):
data_list = [data_list]
if isinstance(data_labels, str):
data_labels = [data_labels]
if isinstance(col_labels, str):
col_labels = [col_labels]
if not isinstance(bins, list):
bins = data_list[0].shape[-1] * [bins]
if not isinstance(scale_factors, list):
scale_factors = len(data_list) * [scale_factors]
if not isinstance(hist_kwargs, list):
hist_kwargs = len(data_list) * [hist_kwargs]
if not isinstance(err_kwargs, list):
err_kwargs = len(data_list) * [err_kwargs]
# Cycle through the datalist and ensure that they are 2D, as each column is an axis
for data_idx in range(len(data_list)):
if data_list[data_idx].ndim < 2:
data_list[data_idx] = data_list[data_idx].unsqueeze(-1)
# Check the number of histograms to plot
n_data = len(data_list)
n_axis = data_list[0].shape[-1]
# Make sure that all the list lengths are consistant
assert len(data_labels) == n_data
assert len(col_labels) == n_axis
assert len(bins) == n_axis
# Make sure the there are not too many subplots
if n_axis > 20:
raise RuntimeError("You are asking to create more than 20 subplots!")
# Create the figure and axes lists
dims = np.array([1, n_axis]) # Subplot is (n_rows, n_columns)
size = np.array([n_axis, 1.0]) # Size is (width, height)
if do_ratio_to_first:
dims *= np.array([2, 1]) # Double the number of rows
size *= np.array([1, 1.2]) # Increase the height
fig, axes = plt.subplots(
*dims,
figsize=tuple(scale * size),
gridspec_kw={"height_ratios": [3, 1] if do_ratio_to_first else {1}},
squeeze=False,
)
# Cycle through each axis and determine the bins that should be used
# Automatic/Interger bins are replaced using the first item in the data list
for ax_idx in range(n_axis):
ax_bins = bins[ax_idx]
if isinstance(ax_bins, partial):
ax_bins = ax_bins()
# If the axis bins was specified to be 'auto' or another numpy string
if isinstance(ax_bins, str):
unq = np.unique(data_list[0][:, ax_idx])
n_unique = len(unq)
# If the number of datapoints is less than 10 then use even spacing
if 1 < n_unique < 10:
ax_bins = (unq[1:] + unq[:-1]) / 2 # Use midpoints, add final, initial
ax_bins = np.append(ax_bins, unq.max() + unq.max() - ax_bins[-1])
ax_bins = np.insert(ax_bins, 0, unq.min() + unq.min() - ax_bins[0])
# Numpy function to get the bin edges, catches all other cases (int, etc)
ax_bins = np.histogram_bin_edges(data_list[0][:, ax_idx], bins=ax_bins)
# Replace the element in the array with the edges
bins[ax_idx] = ax_bins
# Cycle through each of the axes
for ax_idx in range(n_axis):
# Get the bins for this axis
ax_bins = bins[ax_idx]
# Cycle through each of the data arrays
for data_idx in range(n_data):
# Apply overflow and underflow (make a copy)
data = np.copy(data_list[data_idx][..., ax_idx]).squeeze()
if incl_overflow:
data = np.minimum(data, ax_bins[-1])
if incl_underflow:
data = np.maximum(data, ax_bins[0])
# If the data is still a 2D tensor treat it as a collection of histograms
if data.ndim > 1:
h = []
for dim in range(data.shape[-1]):
h.append(np.histogram(data[:, dim], ax_bins, density=do_norm)[0])
# Nominal and err is based on chi2 of same value, mult measurements
hist = 1 / np.mean(1 / np.array(h), axis=0)
hist_err = np.sqrt(1 / np.sum(1 / np.array(h), axis=0))
# Otherwise just calculate a single histogram
else:
hist, _ = np.histogram(data, ax_bins, density=do_norm)
hist_err = np.sqrt(hist)
# Apply the scale factors
if scale_factors[data_idx] is not None:
hist *= scale_factors
hist_err *= scale_factors
# Save the first histogram for the ratio plots
if data_idx == 0:
denom_hist = hist
denom_err = hist_err
# Get the additional keyword arguments for the histograms and errors
if hist_kwargs[data_idx] is not None and bool(hist_kwargs[data_idx]):
h_kwargs = deepcopy(hist_kwargs[data_idx])
else:
h_kwargs = {}
# Use the stair function to plot the histograms
line = axes[0, ax_idx].stairs(
hist, ax_bins, label=data_labels[data_idx], **h_kwargs
)
if err_kwargs[data_idx] is not None and bool(err_kwargs[data_idx]):
e_kwargs = deepcopy(err_kwargs[data_idx])
else:
e_kwargs = {"color": line._edgecolor, "alpha": 0.2, "fill": True}
# Include the uncertainty in the plots as a shaded region
if do_err:
axes[0, ax_idx].stairs(
hist + hist_err,
ax_bins,
baseline=hist - hist_err,
**e_kwargs,
)
# Add a ratio plot
if do_ratio_to_first:
if hist_kwargs[data_idx] is not None and bool(hist_kwargs[data_idx]):
ratio_kwargs = deepcopy(hist_kwargs[data_idx])
else:
ratio_kwargs = {
"color": line._edgecolor,
"linestyle": line._linestyle,
}
ratio_kwargs["fill"] = False # Never fill a ratio plot
# Calculate the new ratio values with their errors
rat_hist = hist / denom_hist
rat_err = rat_hist * np.sqrt(
(hist_err / hist) ** 2 + (denom_err / denom_hist) ** 2
)
# Plot the ratios
axes[1, ax_idx].stairs(
rat_hist,
ax_bins,
**ratio_kwargs,
)
# Use a standard shaded region for the errors
if do_err:
axes[1, ax_idx].stairs(
rat_hist + rat_err,
ax_bins,
baseline=rat_hist - rat_err,
**e_kwargs,
)
# Cycle again through each axis and apply editing
for ax_idx in range(n_axis):
ax_bins = bins[ax_idx]
# X axis
axes[0, ax_idx].set_xlim(ax_bins[0], ax_bins[-1])
if do_ratio_to_first:
axes[0, ax_idx].set_xticklabels([])
axes[1, ax_idx].set_xlabel(col_labels[ax_idx])
axes[1, ax_idx].set_xlim(ax_bins[0], ax_bins[-1])
else:
axes[0, ax_idx].set_xlabel(col_labels[ax_idx])
# Y axis
if logy:
axes[0, ax_idx].set_yscale("log")
if ylim is not None:
axes[0, ax_idx].set_ylim(*ylim)
else:
_, ylim2 = axes[0, ax_idx].get_ylim()
if logy:
axes[0, ax_idx].set_ylim(top=10 ** (np.log10(ylim2) * 1.40))
else:
axes[0, ax_idx].set_ylim(top=ylim2 * 1.35)
if y_label is not None:
axes[0, ax_idx].set_ylabel(y_label)
elif do_norm:
axes[0, ax_idx].set_ylabel("Normalised Entries")
else:
axes[0, ax_idx].set_ylabel("Entries")
# Ratio Y axis
if do_ratio_to_first:
axes[1, ax_idx].set_ylim(rat_ylim)
if rat_label is not None:
axes[1, ax_idx].set_ylabel(rat_label)
else:
axes[1, ax_idx].set_ylabel(f"Ratio to {data_labels[0]}")
# Legend
if do_legend:
legend_kwargs = legend_kwargs or {}
axes[0, ax_idx].legend(**legend_kwargs)
# Final figure layout
fig.tight_layout()
if do_ratio_to_first:
fig.subplots_adjust(hspace=0.08) # For ratio plots minimise the h_space
# Save the file
if path is not None:
fig.savefig(path)
# Return a rendered image, or the matplotlib figure, or close
if return_img:
img = PIL.Image.frombytes(
"RGB", fig.canvas.get_width_height(), fig.canvas.tostring_rgb()
)
plt.close(fig)
return img
if return_fig:
return fig
plt.close(fig)
def locals_to_rel_mass_and_efp(csts: np.ndarray, mask: np.ndarray) -> np.ndarray:
"""Convert the values of a set of constituents to the relative mass and EFP
values of the jet they belong to.
Args:
csts: A numpy array of shape (batch_size, n_csts, 3)
containing the (eta, phi, pt) values of the constituents.
mask: A numpy array of shape (batch_size, n_csts)
containing a mask for the constituents, used to sum only over
the valid constituents.
Returns:
A numpy array of shape (batch_size, 2)
containing the relative mass and EFP values of the jet.
"""
# Calculate the constituent pt, eta and phi
eta = csts[..., 0]
phi = csts[..., 1]
pt = csts[..., 2]
# Calculate the total jet values in cartensian coordinates, include mask for sum
jet_px = (pt * np.cos(phi) * mask).sum(axis=-1)
jet_py = (pt * np.sin(phi) * mask).sum(axis=-1)
jet_pz = (pt * np.sinh(eta) * mask).sum(axis=-1)
jet_e = (pt * np.cosh(eta) * mask).sum(axis=-1)
# Get the derived jet values, the clamps ensure NaNs dont occur
jet_m = np.sqrt(
np.clip(jet_e**2 - jet_px**2 - jet_py**2 - jet_pz**2, 0, None)
)
# Get the efp values
jet_efps = efps(csts, efp_jobs=1).mean(axis=-1)
return np.vstack([jet_m, jet_efps]).T
def plot_mpgan_marginals(
outputs: np.ndarray,
nodes: np.ndarray,
mask: np.ndarray,
current_epoch: int,
) -> None:
# Clip the outputs for the marginals to match expected jet spread
outputs[..., 0] = np.clip(outputs[..., 0], -0.5, 0.5)
outputs[..., 1] = np.clip(outputs[..., 1], -0.5, 0.5)
outputs[..., 2] = np.clip(outputs[..., 2], 0, 1)
# Plot histograms for the constituent marginals
Path("./plots/").mkdir(parents=False, exist_ok=True)
cst_img = plot_multi_hists(
data_list=[nodes[mask], outputs[mask]],
data_labels=["Original", "Generated"],
col_labels=[r"$\Delta \eta$", r"$\Delta \phi$", r"$\frac{p_T}{Jet_{p_T}}$"],
do_norm=True,
return_img=True,
path=f"./plots/csts_{current_epoch}",
logy=True,
)
# Convert to total jet mass and pt, do some clamping to make everyone happy
pred_jets = locals_to_rel_mass_and_efp(outputs, mask)
pred_jets[:, 0] = np.clip(pred_jets[:, 0], 0, 0.4)
pred_jets[:, 1] = np.clip(pred_jets[:, 1], 0, 4e-3)
pred_jets = np.nan_to_num(pred_jets)
real_jets = locals_to_rel_mass_and_efp(nodes, mask)
real_jets[:, 0] = np.clip(real_jets[:, 0], 0, 0.4)
real_jets[:, 1] = np.clip(real_jets[:, 1], 0, 4e-3)
real_jets = np.nan_to_num(real_jets)
# Image for the total jet variables
jet_img = plot_multi_hists(
data_list=[real_jets, pred_jets],
data_labels=["Original", "Generated"],
col_labels=["Relative Jet Mass", "Jet EFP"],
do_norm=True,
return_img=True,
path=f"./plots/jets_{current_epoch}",
)
# Create the wandb table and add the data
if wandb.run is not None:
gen_table = wandb.Table(columns=["constituents", "jets"])
gen_table.add_data(wandb.Image(cst_img), wandb.Image(jet_img))
wandb.run.log({"generated": gen_table}, commit=False)
| 14,847 | 36.589873 | 87 | py |
PC-JeDi | PC-JeDi-main/src/physics.py | # import jetnet
import numpy as np
import pytorch_lightning as pl
import torch as T
# FIX RANDOM SEED FOR REPRODUCIBILITY
pl.seed_everything(0, workers=True)
def locals_to_mass_and_pt(csts: T.Tensor, mask: T.BoolTensor) -> T.Tensor:
"""Calculate the overall jet pt and mass from the constituents. The
constituents are expected to be expressed as:
- del_eta
- del_phi
- log_pt
"""
# Calculate the constituent pt, eta and phi
eta = csts[..., 0]
phi = csts[..., 1]
pt = csts[..., 2].exp()
# Calculate the total jet values in cartensian coordinates, include mask for sum
jet_px = (pt * T.cos(phi) * mask).sum(axis=-1)
jet_py = (pt * T.sin(phi) * mask).sum(axis=-1)
jet_pz = (pt * T.sinh(eta) * mask).sum(axis=-1)
jet_e = (pt * T.cosh(eta) * mask).sum(axis=-1)
# Get the derived jet values, the clamps ensure NaNs dont occur
jet_pt = T.clamp_min(jet_px**2 + jet_py**2, 0).sqrt()
jet_m = T.clamp_min(jet_e**2 - jet_px**2 - jet_py**2 - jet_pz**2, 0).sqrt()
return T.vstack([jet_pt, jet_m]).T
def numpy_locals_to_mass_and_pt(
csts: np.ndarray,
mask: np.ndarray,
pt_logged=False,
) -> np.ndarray:
"""Calculate the overall jet pt and mass from the constituents. The
constituents are expected to be expressed as:
- del_eta
- del_phi
- log_pt or just pt depending on pt_logged
"""
# Calculate the constituent pt, eta and phi
eta = csts[..., 0]
phi = csts[..., 1]
pt = np.exp(csts[..., 2]) * mask if pt_logged else csts[..., 2]
# Calculate the total jet values in cartensian coordinates, include mask for sum
jet_px = (pt * np.cos(phi) * mask).sum(axis=-1)
jet_py = (pt * np.sin(phi) * mask).sum(axis=-1)
jet_pz = (pt * np.sinh(eta) * mask).sum(axis=-1)
jet_e = (pt * np.cosh(eta) * mask).sum(axis=-1)
# Get the derived jet values, the clamps ensure NaNs dont occur
jet_pt = np.sqrt(np.clip(jet_px**2 + jet_py**2, 0, None))
jet_m = np.sqrt(
np.clip(jet_e**2 - jet_px**2 - jet_py**2 - jet_pz**2, 0, None)
)
return np.vstack([jet_pt, jet_m]).T
| 2,120 | 30.191176 | 84 | py |
PC-JeDi | PC-JeDi-main/src/utils.py | 0 | 0 | 0 | py |
|
PC-JeDi | PC-JeDi-main/src/numpy_utils.py | import numpy as np
def undo_log_squash(data: np.ndarray) -> np.ndarray:
"""Undo the log squash function above."""
return np.sign(data) * (np.exp(np.abs(data)) - 1)
def log_squash(data: np.ndarray) -> np.ndarray:
"""Apply a log squashing function for distributions with high tails."""
return np.sign(data) * np.log(np.abs(data) + 1)
| 352 | 28.416667 | 75 | py |
PC-JeDi | PC-JeDi-main/src/torch_utils.py | from typing import Union
import numpy as np
import torch as T
import torch.nn as nn
def get_loss_fn(name: str, **kwargs) -> nn.Module:
"""Return a pytorch loss function given a name."""
if name == "none":
return None
# Regression losses
if name == "huber":
return nn.HuberLoss(reduction="none")
if name == "mse":
return nn.MSELoss(reduction="none")
if name == "mae":
return nn.L1Loss(reduction="none")
def to_np(inpt: Union[T.Tensor, tuple]) -> np.ndarray:
"""More consicse way of doing all the necc steps to convert a pytorch
tensor to numpy array.
- Includes gradient deletion, and device migration
"""
if isinstance(inpt, (tuple, list)):
return type(inpt)(to_np(x) for x in inpt)
if inpt.dtype == T.bfloat16: # Numpy conversions don't support bfloat16s
inpt = inpt.half()
return inpt.detach().cpu().numpy()
| 918 | 26.848485 | 77 | py |
PC-JeDi | PC-JeDi-main/src/hydra_utils.py | """A collection of misculaneous functions usefull for the lighting/hydra
template."""
import logging
import os
from pathlib import Path
from typing import Any, List, Sequence
import hydra
import rich
import rich.syntax
import rich.tree
import wandb
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.utilities.rank_zero import rank_zero_only
log = logging.getLogger(__name__)
@rank_zero_only
def reload_original_config(cfg: OmegaConf, get_best: bool = False) -> OmegaConf:
"""Replaces the cfg with the one stored at the checkpoint location.
Will also set the chkpt_dir to the latest version of the last or
best checkpoint
"""
# Load the original config found in the the file directory
orig_cfg = OmegaConf.load(Path("full_config.yaml"))
# Get the latest updated checkpoint with the prefix last or best
flag = "best" if get_best else "last"
orig_cfg.ckpt_path = str(
sorted(Path.cwd().glob(f"checkpoints/{flag}*.ckpt"), key=os.path.getmtime)[-1]
)
# Set the wandb logger to attempt to resume the job
if hasattr(orig_cfg, "loggers"):
if hasattr(orig_cfg.loggers, "wandb"):
orig_cfg.loggers.wandb.resume = True
return orig_cfg
@rank_zero_only
def print_config(
cfg: DictConfig,
print_order: Sequence[str] = (
"datamodule",
"model",
"callbacks",
"loggers",
"trainer",
"paths",
),
resolve: bool = True,
) -> None:
"""Prints content of DictConfig using Rich library and its tree structure.
Args:
cfg: Configuration composed by Hydra.
print_order: Determines in what order config components are printed.
resolve: Whether to resolve reference fields of DictConfig.
save_to_file: Whether to export config to the hydra output folder.
"""
style = "dim"
tree = rich.tree.Tree("CONFIG", style=style, guide_style=style)
queue = []
# add fields from `print_order` to queue
for field in print_order:
queue.append(field) if field in cfg else log.warning(
f"Field '{field}' not found in config. Skipping '{field}' printing..."
)
# add all the other fields to queue (not specified in `print_order`)
for field in cfg:
if field not in queue:
queue.insert(0, field)
# generate config tree from queue
for field in queue:
branch = tree.add(field, style=style, guide_style=style)
config_group = cfg[field]
if isinstance(config_group, DictConfig):
branch_content = OmegaConf.to_yaml(config_group, resolve=resolve)
else:
branch_content = str(config_group)
branch.add(rich.syntax.Syntax(branch_content, "yaml"))
# print config tree
rich.print(tree)
def save_config(cfg: OmegaConf) -> None:
"""Saves the config to the output directory.
This is necc ontop of hydra's default conf.yaml as it will resolve the entries
allowing one to resume jobs identically with elements such as ${now:%H-%M-%S}.
Furthermore, hydra does not allow resuming a previous job from the same dir.
The work around is reload_original_config but that will fail as hydra overwites
the default config.yaml file on startup, so this backup is needed for resuming.
"""
# In order to be able to resume the wandb logger session, save the run id
if hasattr(cfg, "loggers"):
if hasattr(cfg.loggers, "wandb"):
if wandb.run is not None:
cfg.loggers.wandb.id = wandb.run.id
# save config tree to file
OmegaConf.save(cfg, Path(cfg.paths.full_path, "full_config.yaml"), resolve=True)
@rank_zero_only
def log_hyperparameters(
cfg: DictConfig, model: LightningModule, trainer: Trainer
) -> None:
"""Passes the config dict to the trainer's logger, also calculates #
params."""
# Convert the config object to a hyperparameter dict
hparams = OmegaConf.to_container(cfg, resolve=True)
# calculate the number of trainable parameters in the model and add it
hparams["model/params/total"] = sum(p.numel() for p in model.parameters())
hparams["model/params/trainable"] = sum(
p.numel() for p in model.parameters() if p.requires_grad
)
hparams["model/params/non_trainable"] = sum(
p.numel() for p in model.parameters() if not p.requires_grad
)
trainer.logger.log_hyperparams(hparams)
def instantiate_collection(cfg_coll: DictConfig) -> List[Any]:
"""Uses hydra to instantiate a collection of classes and return a list."""
objs = []
if not cfg_coll:
log.warning("List of configs is empty")
return objs
if not isinstance(cfg_coll, DictConfig):
raise TypeError("List of configs must be a DictConfig!")
for _, cb_conf in cfg_coll.items():
if isinstance(cb_conf, DictConfig) and "_target_" in cb_conf:
log.info(f"Instantiating <{cb_conf._target_}>")
objs.append(hydra.utils.instantiate(cb_conf))
return objs
| 5,097 | 30.8625 | 86 | py |
PC-JeDi | PC-JeDi-main/src/__init__.py | 0 | 0 | 0 | py |
|
PC-JeDi | PC-JeDi-main/src/datamodules/__init__.py | 0 | 0 | 0 | py |
|
PC-JeDi | PC-JeDi-main/src/datamodules/jetnet.py | from copy import deepcopy
from typing import Mapping
import numpy as np
from jetnet.datasets import JetNet
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, Dataset
from src.numpy_utils import log_squash
from src.physics import numpy_locals_to_mass_and_pt
class JetNetData(Dataset):
"""Wrapper for the JetNet dataset so it works with our models with
different inputs."""
def __init__(self, **kwargs) -> None:
# Extra arguments used here
self.log_squash_pt = kwargs.pop("log_squash_pt", False)
self.high_as_context = kwargs.pop("high_as_context", True)
self.recalc_high = kwargs.pop("recalculate_jet_from_pc", True)
self.n_jets = kwargs.pop("n_jets", None)
# All other arguments passed to the jetnet dataset constructor
self.csts, self.high = JetNet.getData(**kwargs)
self.csts = self.csts.astype(np.float32)
self.high = self.high.astype(np.float32)
# Trim the data based on the requested number of jets (None does nothing)
self.csts = self.csts[: self.n_jets].astype(np.float32)
self.high = self.high[: self.n_jets].astype(np.float32)
# Manually calculate the mask by looking for zero padding
self.mask = ~np.all(self.csts == 0, axis=-1)
# Change the constituent information from pt-fraction to pure pt
csts = self.csts.copy()
csts[..., -1] = csts[..., -1] * self.high[..., 0:1]
# Recalculate the jet mass and pt using the point cloud
if self.recalc_high:
self.high = numpy_locals_to_mass_and_pt(csts, self.mask)
# Change the pt fraction to log_squash(pt)
if self.log_squash_pt:
self.csts[..., -1] = log_squash(csts[..., -1]) * self.mask
def __getitem__(self, idx) -> tuple:
csts = self.csts[idx]
high = self.high[idx] if self.high_as_context else np.empty(0, dtype="f")
mask = self.mask[idx]
return csts, mask, high
def __len__(self) -> int:
return len(self.high)
class JetNetDataModule(LightningDataModule):
def __init__(
self,
*,
data_conf: Mapping,
loader_kwargs: Mapping,
) -> None:
super().__init__()
self.save_hyperparameters(logger=False)
# Get the dimensions of the data from the config file
self.dim = len(data_conf["particle_features"])
self.n_nodes = data_conf["num_particles"]
if data_conf["high_as_context"]:
self.ctxt_dim = len(data_conf["jet_features"])
else:
self.ctxt_dim = 0
def setup(self, stage: str) -> None:
"""Sets up the relevant datasets."""
if stage == "fit":
self.train_set = JetNetData(**self.hparams.data_conf, split="train")
self.valid_set = JetNetData(**self.hparams.data_conf, split="test")
if stage == "test":
self.test_set = JetNetData(**self.hparams.data_conf, split="test")
def train_dataloader(self) -> DataLoader:
return DataLoader(self.train_set, **self.hparams.loader_kwargs, shuffle=True)
def val_dataloader(self) -> DataLoader:
return DataLoader(self.valid_set, **self.hparams.loader_kwargs, shuffle=False)
def test_dataloader(self) -> DataLoader:
test_kwargs = deepcopy(self.hparams.loader_kwargs)
test_kwargs["drop_last"] = False
return DataLoader(self.test_set, **test_kwargs, shuffle=False)
| 3,490 | 34.989691 | 86 | py |
PC-JeDi | PC-JeDi-main/src/models/diffusion.py | import math
from typing import Optional, Tuple
import torch as T
from tqdm import tqdm
class VPDiffusionSchedule:
def __init__(self, max_sr: float = 1, min_sr: float = 1e-2) -> None:
self.max_sr = max_sr
self.min_sr = min_sr
def __call__(self, time: T.Tensor) -> T.Tensor:
return cosine_diffusion_shedule(time, self.max_sr, self.min_sr)
def get_betas(self, time: T.Tensor) -> T.Tensor:
return cosine_beta_shedule(time, self.max_sr, self.min_sr)
def cosine_diffusion_shedule(
diff_time: T.Tensor, max_sr: float = 1, min_sr: float = 1e-2
) -> Tuple[T.Tensor, T.Tensor]:
"""Calculates the signal and noise rate for any point in the diffusion
processes.
Using continuous diffusion times between 0 and 1 which make switching between
different numbers of diffusion steps between training and testing much easier.
Returns only the values needed for the jump forward diffusion step and the reverse
DDIM step.
These are sqrt(alpha_bar) and sqrt(1-alphabar) which are called the signal_rate
and noise_rate respectively.
The jump forward diffusion process is simply a weighted sum of:
input * signal_rate + eps * noise_rate
Uses a cosine annealing schedule as proposed in
Proposed in https://arxiv.org/abs/2102.09672
Args:
diff_time: The time used to sample the diffusion scheduler
Output will match the shape
Must be between 0 and 1
max_sr: The initial rate at the first step
min_sr: How much signal is preserved at end of diffusion
(can't be zero due to log)
"""
# Use cosine annealing, which requires switching from times -> angles
start_angle = math.acos(max_sr)
end_angle = math.acos(min_sr)
diffusion_angles = start_angle + diff_time * (end_angle - start_angle)
signal_rates = T.cos(diffusion_angles)
noise_rates = T.sin(diffusion_angles)
return signal_rates, noise_rates
def cosine_beta_shedule(
diff_time: T.Tensor, max_sr: float = 1, min_sr: float = 1e-2
) -> T.Tensor:
"""Returns the beta values for the continuous flows using the above cosine
scheduler."""
start_angle = math.acos(max_sr)
end_angle = math.acos(min_sr)
diffusion_angles = start_angle + diff_time * (end_angle - start_angle)
return 2 * (end_angle - start_angle) * T.tan(diffusion_angles)
def ddim_predict(
noisy_data: T.Tensor,
pred_noises: T.Tensor,
signal_rates: T.Tensor,
noise_rates: T.Tensor,
) -> T.Tensor:
"""Use a single ddim step to predict the final image from anywhere in the
diffusion process."""
return (noisy_data - noise_rates * pred_noises) / signal_rates
@T.no_grad()
def ddim_sampler(
model,
diff_sched: VPDiffusionSchedule,
initial_noise: T.Tensor,
n_steps: int = 50,
keep_all: bool = False,
mask: Optional[T.Tensor] = None,
ctxt: Optional[T.BoolTensor] = None,
clip_predictions: Optional[tuple] = None,
) -> Tuple[T.Tensor, list]:
"""Apply the DDIM sampling process to generate a batch of samples from
noise.
Args:
model: A denoising diffusion model
Requires: inpt_dim, device, forward() method that outputs pred noise
diif_sched: A diffusion schedule object to calculate signal and noise rates
initial_noise: The initial noise to pass through the process
If none it will be generated here
n_steps: The number of iterations to generate the samples
keep_all: Return all stages of diffusion process
Can be memory heavy for large batches
num_samples: How many samples to generate
Ignored if initial_noise is provided
mask: The mask for the output point clouds
ctxt: The context tensor for the output point clouds
clip_predictions: Can stabalise generation by clipping the outputs
"""
# Get the initial noise for generation and the number of sammples
num_samples = initial_noise.shape[0]
# The shape needed for expanding the time encodings
expanded_shape = [-1] + [1] * (initial_noise.dim() - 1)
# Check the input argument for the n_steps, must be less than what was trained
all_stages = []
step_size = 1 / n_steps
# The initial variables needed for the loop
noisy_data = initial_noise
diff_times = T.ones(num_samples, device=model.device)
next_signal_rates, next_noise_rates = diff_sched(diff_times.view(expanded_shape))
for step in tqdm(range(n_steps), "DDIM-sampling", leave=False):
# Update with the previous 'next' step
signal_rates = next_signal_rates
noise_rates = next_noise_rates
# Keep track of the diffusion evolution
if keep_all:
all_stages.append(noisy_data)
# Apply the denoise step to get X_0 and expected noise
pred_noises = model(noisy_data, diff_times, mask, ctxt)
pred_data = ddim_predict(noisy_data, pred_noises, signal_rates, noise_rates)
# Get the next predicted components using the next signal and noise rates
diff_times = diff_times - step_size
next_signal_rates, next_noise_rates = diff_sched(
diff_times.view(expanded_shape)
)
# Clamp the predicted X_0 for stability
if clip_predictions is not None:
pred_data.clamp_(*clip_predictions)
# Remix the predicted components to go from estimated X_0 -> X_{t-1}
noisy_data = next_signal_rates * pred_data + next_noise_rates * pred_noises
return pred_data, all_stages
@T.no_grad()
def euler_maruyama_sampler(
model,
diff_sched: VPDiffusionSchedule,
initial_noise: T.Tensor,
n_steps: int = 50,
keep_all: bool = False,
mask: Optional[T.Tensor] = None,
ctxt: Optional[T.BoolTensor] = None,
clip_predictions: Optional[tuple] = None,
) -> Tuple[T.Tensor, list]:
"""Apply the full reverse process to noise to generate a batch of
samples."""
# Get the initial noise for generation and the number of sammples
num_samples = initial_noise.shape[0]
# The shape needed for expanding the time encodings
expanded_shape = [-1] + [1] * (initial_noise.dim() - 1)
# Check the input argument for the n_steps, must be less than what was trained
all_stages = []
delta_t = 1 / n_steps
# The initial variables needed for the loop
x_t = initial_noise
t = T.ones(num_samples, device=model.device)
for step in tqdm(range(n_steps), "Euler-Maruyama-sampling", leave=False):
# Use the model to get the expected noise
pred_noises = model(x_t, t, mask, ctxt)
# Use to get s_theta
_, noise_rates = diff_sched(t.view(expanded_shape))
s = -pred_noises / noise_rates
# Take one step using the em method
betas = diff_sched.get_betas(t.view(expanded_shape))
x_t += 0.5 * betas * (x_t + 2 * s) * delta_t
x_t += (betas * delta_t).sqrt() * T.randn_like(x_t)
t -= delta_t
# Keep track of the diffusion evolution
if keep_all:
all_stages.append(x_t)
# Clamp the denoised data for stability
if clip_predictions is not None:
x_t.clamp_(*clip_predictions)
return x_t, all_stages
@T.no_grad()
def euler_sampler(
model,
diff_sched: VPDiffusionSchedule,
initial_noise: T.Tensor,
n_steps: int = 50,
keep_all: bool = False,
mask: Optional[T.Tensor] = None,
ctxt: Optional[T.BoolTensor] = None,
clip_predictions: Optional[tuple] = None,
) -> Tuple[T.Tensor, list]:
"""Apply the full reverse process to noise to generate a batch of
samples."""
# Get the initial noise for generation and the number of sammples
num_samples = initial_noise.shape[0]
# The shape needed for expanding the time encodings
expanded_shape = [-1] + [1] * (initial_noise.dim() - 1)
# Check the input argument for the n_steps, must be less than what was trained
all_stages = []
delta_t = 1 / n_steps
# The initial variables needed for the loop
t = T.ones(num_samples, device=model.device)
signal_rates, noise_rates = diff_sched(t.view(expanded_shape))
x_t = initial_noise * (signal_rates + noise_rates)
for step in tqdm(range(n_steps), "Euler-sampling", leave=False):
# Take a step using the euler method and the gradient calculated by the ode
x_t += get_ode_gradient(model, diff_sched, x_t, t, mask, ctxt) * delta_t
t -= delta_t
# Keep track of the diffusion evolution
if keep_all:
all_stages.append(x_t)
# Clamp the denoised data for stability
if clip_predictions is not None:
x_t.clamp_(*clip_predictions)
return x_t, all_stages
@T.no_grad()
def runge_kutta_sampler(
model,
diff_sched: VPDiffusionSchedule,
initial_noise: T.Tensor,
n_steps: int = 50,
keep_all: bool = False,
mask: Optional[T.Tensor] = None,
ctxt: Optional[T.BoolTensor] = None,
clip_predictions: Optional[tuple] = None,
) -> Tuple[T.Tensor, list]:
"""Apply the full reverse process to noise to generate a batch of
samples."""
# Get the initial noise for generation and the number of sammples
num_samples = initial_noise.shape[0]
# Check the input argument for the n_steps, must be less than what was trained
all_stages = []
delta_t = 1 / n_steps
# Wrap the ode gradient in a lambda function depending only on xt and t
ode_grad = lambda t, x_t: get_ode_gradient(model, diff_sched, x_t, t, mask, ctxt)
# The initial variables needed for the loop
x_t = initial_noise
t = T.ones(num_samples, device=model.device)
for step in tqdm(range(n_steps), "Runge-Kutta-sampling", leave=False):
k1 = delta_t * (ode_grad(t, x_t))
k2 = delta_t * (ode_grad((t - delta_t / 2), (x_t + k1 / 2)))
k3 = delta_t * (ode_grad((t - delta_t / 2), (x_t + k2 / 2)))
k4 = delta_t * (ode_grad((T.clamp_min(t - delta_t, 0)), (x_t + k3)))
k = (k1 + 2 * k2 + 2 * k3 + k4) / 6
x_t += k
t -= delta_t
# Keep track of the diffusion evolution
if keep_all:
all_stages.append(x_t)
# Clamp the denoised data for stability
if clip_predictions is not None:
x_t.clamp_(*clip_predictions)
return x_t, all_stages
def get_ode_gradient(
model,
diff_sched: VPDiffusionSchedule,
x_t: T.Tensor,
t: T.Tensor,
mask: Optional[T.BoolTensor] = None,
ctxt: Optional[T.Tensor] = None,
) -> T.Tensor:
expanded_shape = [-1] + [1] * (x_t.dim() - 1)
_, noise_rates = diff_sched(t.view(expanded_shape))
betas = diff_sched.get_betas(t.view(expanded_shape))
return 0.5 * betas * (x_t - model(x_t, t, mask, ctxt) / noise_rates)
def run_sampler(sampler: str, *args, **kwargs) -> Tuple[T.Tensor, list]:
if sampler == "em":
return euler_maruyama_sampler(*args, **kwargs)
if sampler == "euler":
return euler_sampler(*args, **kwargs)
if sampler == "rk":
return runge_kutta_sampler(*args, **kwargs)
if sampler == "ddim":
return ddim_sampler(*args, **kwargs)
raise RuntimeError(f"Unknown sampler: {sampler}")
| 11,263 | 33.873065 | 86 | py |
PC-JeDi | PC-JeDi-main/src/models/transformers.py | """Some classes to describe transformer architectures."""
import math
from typing import Mapping, Optional, Union
import torch as T
import torch.nn as nn
from torch.nn.functional import dropout, softmax
from .modules import DenseNetwork
def merge_masks(
q_mask: Union[T.BoolTensor, None],
kv_mask: Union[T.BoolTensor, None],
attn_mask: Union[T.BoolTensor, None],
q_shape: T.Size,
k_shape: T.Size,
device: T.device,
) -> Union[None, T.BoolTensor]:
"""Create a full attention mask which incoporates the padding
information."""
# Create the full mask which combines the attention and padding masks
merged_mask = None
# If either pad mask exists, create
if q_mask is not None or kv_mask is not None:
if q_mask is None:
q_mask = T.full(q_shape[:-1], True, device=device)
if kv_mask is None:
kv_mask = T.full(k_shape[:-1], True, device=device)
merged_mask = q_mask.unsqueeze(-1) & kv_mask.unsqueeze(-2)
# If attention mask exists, create
if attn_mask is not None:
merged_mask = attn_mask if merged_mask is None else attn_mask & merged_mask
return merged_mask
def attention(
query: T.Tensor,
key: T.Tensor,
value: T.Tensor,
dim_key: int,
attn_mask: Optional[T.BoolTensor] = None,
attn_bias: Optional[T.Tensor] = None,
drp: float = 0.0,
training: bool = True,
) -> T.Tensor:
"""Apply the attention using the scaled dot product between the key query
and key tensors, then matrix multiplied by the value.
Note that the attention scores are ordered in recv x send, which is the opposite
to how I usually do it for the graph network, which is send x recv
We use masked fill -T.inf as this kills the padded key/values elements but
introduces nans for padded query elements. We could used a very small number like
-1e9 but this would need to scale with if we are using half precision.
Args:
query: Batched query sequence of tensors (b, h, s, f)
key: Batched key sequence of tensors (b, h, s, f)
value: Batched value sequence of tensors (b, h, s, f)
dim_key: The dimension of the key features, used to scale the dot product
attn_mask: The attention mask, used to blind certain combinations of k,q pairs
attn_bias: Extra weights to combine with attention weights
drp: Dropout probability
training: If the model is in training mode, effects the dropout applied
"""
# Perform the matrix multiplication
scores = T.matmul(query, key.transpose(-2, -1)) / math.sqrt(dim_key)
# Add the bias terms if present
if attn_bias is not None: # Move the head dimension to the first
scores = scores + attn_bias.permute(0, 3, 1, 2)
# Mask away the scores between invalid elements in sequence
if attn_mask is not None:
scores = scores.masked_fill(~attn_mask.unsqueeze(-3), -T.inf)
# Apply the softmax function per head feature
scores = softmax(scores, dim=-1)
# Kill the nans introduced by the padded query elements
scores = T.nan_to_num(scores, 0)
# Apply dropout to the attention scores
scores = dropout(scores, p=drp, training=training)
# Finally multiply these scores by the output
scores = T.matmul(scores, value)
return scores
class MultiHeadedAttentionBlock(nn.Module):
"""Generic Multiheaded Attention.
Takes in three sequences with dim: (batch, sqeuence, features)
- q: The primary sequence queries (determines output sequence length)
- k: The attending sequence keys (determines incoming information)
- v: The attending sequence values
In a message passing sense you can think of q as your receiver nodes, v and k
are the information coming from the sender nodes.
When q == k(and v) this is a SELF attention operation
When q != k(and v) this is a CROSS attention operation
===
Block operations:
1) Uses three linear layers to project the sequences.
- q = q_linear * q
- k = k_linear * k
- v = v_linear * v
2) Outputs are reshaped to add a head dimension, and transposed for matmul.
- features = model_dim = head_dim * num_heads
- dim becomes: batch, num_heads, sequence, head_dim
3) Passes these through to the attention module (message passing)
- In standard transformers this is the scaled dot product attention
- Also takes additional dropout layer to mask the attention
4) Flatten out the head dimension and pass through final linear layer
- results are same as if attention was done seperately for each head and concat
- dim: batch, q_seq, head_dim * num_heads
"""
def __init__(
self,
model_dim: int,
num_heads: int = 1,
drp: float = 0,
) -> None:
"""
Args:
model_dim: The dimension of the model
num_heads: The number of different attention heads to process in parallel
- Must allow interger division into model_dim
drp: The dropout probability used in the MHA operation
"""
super().__init__()
# Define model base attributes
self.model_dim = model_dim
self.num_heads = num_heads
self.head_dim = model_dim // num_heads
# Check that the dimension of each head makes internal sense
if self.head_dim * num_heads != model_dim:
raise ValueError("Model dimension must be divisible by number of heads!")
# Initialise the weight matrices
self.q_linear = nn.Linear(model_dim, model_dim)
self.k_linear = nn.Linear(model_dim, model_dim)
self.v_linear = nn.Linear(model_dim, model_dim)
self.out_linear = nn.Linear(model_dim, model_dim)
self.drp = drp
def forward(
self,
q: T.Tensor,
k: Optional[T.Tensor] = None,
v: Optional[T.Tensor] = None,
q_mask: Optional[T.BoolTensor] = None,
kv_mask: Optional[T.BoolTensor] = None,
attn_mask: Optional[T.BoolTensor] = None,
attn_bias: Optional[T.Tensor] = None,
) -> T.Tensor:
"""
Args:
q: The main sequence queries (determines the output length)
k: The incoming information keys
v: The incoming information values
q_mask: Shows which elements of the main sequence are real
kv_mask: Shows which elements of the attn sequence are real
attn_mask: Extra mask for the attention matrix (eg: look ahead)
attn_bias: Extra bias term for the attention matrix (eg: edge features)
"""
# If only q and q_mask are provided then we automatically apply self attention
if k is None:
k = q
if kv_mask is None:
kv_mask = q_mask
v = v if v is not None else k
# Store the batch size, useful for reshaping
b_size, seq, feat = q.shape
# Work out the masking situation, with padding, no peaking etc
attn_mask = merge_masks(q_mask, kv_mask, attn_mask, q.shape, k.shape, q.device)
# Generate the q, k, v projections, break final head dimension in 2
shape = (b_size, -1, self.num_heads, self.head_dim)
q = self.q_linear(q).view(shape)
k = self.k_linear(k).view(shape)
v = self.v_linear(v).view(shape)
# Transpose to get dimensions: B,H,Seq,HD (required for matmul)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
# Calculate the new sequence values, for memory reasons overwrite q
q = attention(
q,
k,
v,
self.head_dim,
attn_mask=attn_mask,
attn_bias=attn_bias,
drp=self.drp,
training=self.training,
) # Returned shape is B,H,Q_seq,HD
# Concatenate the all of the heads together to get shape: B,Seq,F
q = q.transpose(1, 2).contiguous().view(b_size, -1, self.model_dim)
# Pass through final linear layer
q = self.out_linear(q)
return q
class TransformerEncoderLayer(nn.Module):
"""A transformer encoder layer based on the GPT-2+Normformer style
arcitecture.
We choose Normformer as it has often proved to be the most stable to train
https://arxiv.org/abs/2210.06423
https://arxiv.org/abs/2110.09456
It contains:
- Multihead(self)Attention block
- A dense network
Layernorm is applied before each operation
Residual connections are used to bypass each operation
"""
def __init__(
self,
model_dim: int,
mha_config: Optional[Mapping] = None,
dense_config: Optional[Mapping] = None,
ctxt_dim: int = 0,
) -> None:
"""
Args:
model_dim: The embedding dimensio of the transformer block
mha_config: Keyword arguments for multiheaded-attention block
dense_config: Keyword arguments for feed forward network
ctxt_dim: Context dimension,
"""
super().__init__()
mha_config = mha_config or {}
dense_config = dense_config or {}
self.model_dim = model_dim
self.ctxt_dim = ctxt_dim
# The basic blocks
self.self_attn = MultiHeadedAttentionBlock(model_dim, **mha_config)
self.dense = DenseNetwork(
model_dim, outp_dim=model_dim, ctxt_dim=ctxt_dim, **dense_config
)
# The normalisation layers (lots from NormFormer)
self.norm1 = nn.LayerNorm(model_dim)
self.norm2 = nn.LayerNorm(model_dim)
self.norm3 = nn.LayerNorm(model_dim)
def forward(
self,
x: T.Tensor,
mask: Optional[T.BoolTensor] = None,
ctxt: Optional[T.Tensor] = None,
attn_bias: Optional[T.Tensor] = None,
attn_mask: Optional[T.BoolTensor] = None,
) -> T.Tensor:
"Pass through the layer using residual connections and layer normalisation"
x = x + self.norm2(
self.self_attn(
self.norm1(x), q_mask=mask, attn_mask=attn_mask, attn_bias=attn_bias
)
)
x = x + self.dense(self.norm3(x), ctxt)
return x
class TransformerEncoder(nn.Module):
"""A stack of N transformer encoder layers followed by a final
normalisation step.
Sequence -> Sequence
"""
def __init__(
self,
model_dim: int = 64,
num_layers: int = 3,
mha_config: Optional[Mapping] = None,
dense_config: Optional[Mapping] = None,
ctxt_dim: int = 0,
) -> None:
"""
Args:
model_dim: Feature sieze for input, output, and all intermediate layers
num_layers: Number of encoder layers used
mha_config: Keyword arguments for the mha block
dense_config: Keyword arguments for the dense network in each layer
ctxt_dim: Dimension of the context inputs
"""
super().__init__()
self.model_dim = model_dim
self.num_layers = num_layers
self.layers = nn.ModuleList(
[
TransformerEncoderLayer(model_dim, mha_config, dense_config, ctxt_dim)
for _ in range(num_layers)
]
)
self.final_norm = nn.LayerNorm(model_dim)
def forward(self, x: T.Tensor, **kwargs) -> T.Tensor:
"""Pass the input through all layers sequentially."""
for layer in self.layers:
x = layer(x, **kwargs)
return self.final_norm(x)
class FullTransformerEncoder(nn.Module):
"""A transformer encoder with added input and output embedding networks.
Sequence -> Sequence
"""
def __init__(
self,
inpt_dim: int,
outp_dim: int,
edge_dim: int = 0,
ctxt_dim: int = 0,
te_config: Optional[Mapping] = None,
node_embd_config: Optional[Mapping] = None,
outp_embd_config: Optional[Mapping] = None,
edge_embd_config: Optional[Mapping] = None,
ctxt_embd_config: Optional[Mapping] = None,
) -> None:
"""
Args:
inpt_dim: Dim. of each element of the sequence
outp_dim: Dim. of of the final output vector
edge_dim: Dim. of the input edge features
ctxt_dim: Dim. of the context vector to pass to the embedding nets
te_config: Keyword arguments to pass to the TVE constructor
node_embd_config: Keyword arguments for node dense embedder
outp_embd_config: Keyword arguments for output dense embedder
edge_embd_config: Keyword arguments for edge dense embedder
ctxt_embd_config: Keyword arguments for context dense embedder
"""
super().__init__()
self.inpt_dim = inpt_dim
self.outp_dim = outp_dim
self.ctxt_dim = ctxt_dim
self.edge_dim = edge_dim
te_config = te_config or {}
node_embd_config = node_embd_config or {}
outp_embd_config = outp_embd_config or {}
edge_embd_config = edge_embd_config or {}
# Initialise the context embedding network (optional)
if self.ctxt_dim:
self.ctxt_emdb = DenseNetwork(
inpt_dim=self.ctxt_dim,
**ctxt_embd_config,
)
self.ctxt_out = self.ctxt_emdb.outp_dim
else:
self.ctxt_out = 0
# Initialise the TVE, the main part of this network
self.te = TransformerEncoder(**te_config, ctxt_dim=self.ctxt_out)
self.model_dim = self.te.model_dim
# Initialise all embedding networks
self.node_embd = DenseNetwork(
inpt_dim=self.inpt_dim,
outp_dim=self.model_dim,
ctxt_dim=self.ctxt_out,
**node_embd_config,
)
self.outp_embd = DenseNetwork(
inpt_dim=self.model_dim,
outp_dim=self.outp_dim,
ctxt_dim=self.ctxt_out,
**outp_embd_config,
)
# Initialise the edge embedding network (optional)
if self.edge_dim:
self.edge_embd = DenseNetwork(
inpt_dim=self.edge_dim,
outp_dim=self.te.layers[0].self_attn.num_heads,
ctxt_dim=self.ctxt_out,
**edge_embd_config,
)
def forward(
self,
x: T.Tensor,
mask: Optional[T.BoolTensor] = None,
ctxt: Optional[T.Tensor] = None,
attn_bias: Optional[T.Tensor] = None,
attn_mask: Optional[T.BoolTensor] = None,
) -> T.Tensor:
"""Pass the input through all layers sequentially."""
if self.ctxt_dim:
ctxt = self.ctxt_emdb(ctxt)
if self.edge_dim:
attn_bias = self.edge_embd(attn_bias, ctxt)
x = self.node_embd(x, ctxt)
x = self.te(x, mask=mask, ctxt=ctxt, attn_bias=attn_bias, attn_mask=attn_mask)
x = self.outp_embd(x, ctxt)
return x
| 15,049 | 33.837963 | 87 | py |
PC-JeDi | PC-JeDi-main/src/models/schedulers.py | from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
class WarmupToConstant(_LRScheduler):
"""Gradually warm-up learning rate in optimizer to a constant value."""
def __init__(self, optimizer: Optimizer, num_steps: int = 100) -> None:
"""
args:
optimizer (Optimizer): Wrapped optimizer.
num_steps: target learning rate is reached at num_steps.
"""
self.num_steps = num_steps
self.finished = False
super().__init__(optimizer)
def get_lr(self) -> list[float]:
if self.last_epoch > self.num_steps:
return [base_lr for base_lr in self.base_lrs]
return [
(base_lr / self.num_steps) * self.last_epoch for base_lr in self.base_lrs
]
| 793 | 32.083333 | 85 | py |
PC-JeDi | PC-JeDi-main/src/models/modules.py | """Collection of pytorch modules that make up the networks."""
import math
from typing import Optional, Union
import torch as T
import torch.nn as nn
def get_act(name: str) -> nn.Module:
"""Return a pytorch activation function given a name."""
if name == "relu":
return nn.ReLU()
if name == "lrlu":
return nn.LeakyReLU(0.1)
if name == "silu" or name == "swish":
return nn.SiLU()
if name == "selu":
return nn.SELU()
if name == "softmax":
return nn.Softmax()
if name == "gelu":
return nn.GELU()
if name == "tanh":
return nn.Tanh()
if name == "softmax":
return nn.Softmax()
if name == "sigmoid":
return nn.Sigmoid()
raise ValueError("No activation function with name: ", name)
def get_nrm(name: str, outp_dim: int) -> nn.Module:
"""Return a 1D pytorch normalisation layer given a name and a output size
Returns None object if name is none."""
if name == "batch":
return nn.BatchNorm1d(outp_dim)
if name == "layer":
return nn.LayerNorm(outp_dim)
if name == "none":
return None
else:
raise ValueError("No normalistation with name: ", name)
class MLPBlock(nn.Module):
"""A simple MLP block that makes up a dense network.
Made up of several layers containing:
- linear map
- activation function [Optional]
- layer normalisation [Optional]
- dropout [Optional]
Only the input of the block is concatentated with context information.
For residual blocks, the input is added to the output of the final layer.
"""
def __init__(
self,
inpt_dim: int,
outp_dim: int,
ctxt_dim: int = 0,
n_layers: int = 1,
act: str = "lrlu",
nrm: str = "none",
drp: float = 0,
do_res: bool = False,
) -> None:
"""Init method for MLPBlock.
Parameters
----------
inpt_dim : int
The number of features for the input layer
outp_dim : int
The number of output features
ctxt_dim : int, optional
The number of contextual features to concat to the inputs, by default 0
n_layers : int, optional1
A string indicating the name of the activation function, by default 1
act : str, optional
A string indicating the name of the normalisation, by default "lrlu"
nrm : str, optional
The dropout probability, 0 implies no dropout, by default "none"
drp : float, optional
Add to previous output, only if dim does not change, by default 0
do_res : bool, optional
The number of transform layers in this block, by default False
"""
super().__init__()
# Save the input and output dimensions of the module
self.inpt_dim = inpt_dim
self.outp_dim = outp_dim
self.ctxt_dim = ctxt_dim
# If this layer includes an additive residual connection
self.do_res = do_res and (inpt_dim == outp_dim)
# Initialise the block layers as a module list
self.block = nn.ModuleList()
for n in range(n_layers):
# Increase the input dimension of the first layer to include context
lyr_in = inpt_dim + ctxt_dim if n == 0 else outp_dim
# Linear transform, activation, normalisation, dropout
self.block.append(nn.Linear(lyr_in, outp_dim))
if act != "none":
self.block.append(get_act(act))
if nrm != "none":
self.block.append(get_nrm(nrm, outp_dim))
if drp > 0:
self.block.append(nn.Dropout(drp))
def forward(self, inpt: T.Tensor, ctxt: Optional[T.Tensor] = None) -> T.Tensor:
"""
args:
tensor: Pytorch tensor to pass through the network
ctxt: The conditioning tensor, can be ignored
"""
# Concatenate the context information to the input of the block
if self.ctxt_dim and ctxt is None:
raise ValueError(
"Was expecting contextual information but none has been provided!"
)
temp = T.cat([inpt, ctxt], dim=-1) if self.ctxt_dim else inpt
# Pass through each transform in the block
for layer in self.block:
temp = layer(temp)
# Add the original inputs again for the residual connection
if self.do_res:
temp = temp + inpt
return temp
def __repr__(self) -> str:
"""Generate a one line string summing up the components of the
block."""
string = str(self.inpt_dim)
if self.ctxt_dim:
string += f"({self.ctxt_dim})"
string += "->"
string += "->".join([str(b).split("(", 1)[0] for b in self.block])
string += "->" + str(self.outp_dim)
if self.do_res:
string += "(add)"
return string
class DenseNetwork(nn.Module):
"""A dense neural network made from a series of consecutive MLP blocks and
context injection layers."""
def __init__(
self,
inpt_dim: int,
outp_dim: int = 0,
ctxt_dim: int = 0,
hddn_dim: Union[int, list] = 32,
num_blocks: int = 1,
n_lyr_pbk: int = 1,
act_h: str = "lrlu",
act_o: str = "none",
do_out: bool = True,
nrm: str = "none",
drp: float = 0,
do_res: bool = False,
ctxt_in_inpt: bool = True,
ctxt_in_hddn: bool = False,
) -> None:
"""Initialise the DenseNetwork.
Parameters
----------
inpt_dim : int
The number of input neurons
outp_dim : int, optional
The number of output neurons. If none it will take from inpt or hddn,
by default 0
ctxt_dim : int, optional
The number of context features. The context feature use is determined by
ctxt_type, by default 0
hddn_dim : Union[int, list], optional
The width of each hidden block. If a list it overides depth, by default 32
num_blocks : int, optional
The number of hidden blocks, can be overwritten by hddn_dim, by default 1
n_lyr_pbk : int, optional
The number of transform layers per hidden block, by default 1
act_h : str, optional
The name of the activation function to apply in the hidden blocks,
by default "lrlu"
act_o : str, optional
The name of the activation function to apply to the outputs,
by default "none"
do_out : bool, optional
If the network has a dedicated output block, by default True
nrm : str, optional
Type of normalisation (layer or batch) in each hidden block, by default "none"
drp : float, optional
Dropout probability for hidden layers (0 means no dropout), by default 0
do_res : bool, optional
Use resisdual-connections between hidden blocks (only if same size),
by default False
ctxt_in_inpt : bool, optional
Include the ctxt tensor in the input block, by default True
ctxt_in_hddn : bool, optional
Include the ctxt tensor in the hidden blocks, by default False
Raises
------
ValueError
If the network was given a context input but both ctxt_in_inpt and
ctxt_in_hddn were False
"""
super().__init__()
# Check that the context is used somewhere
if ctxt_dim:
if not ctxt_in_hddn and not ctxt_in_inpt:
raise ValueError("Network has context inputs but nowhere to use them!")
# We store the input, hddn (list), output, and ctxt dims to query them later
self.inpt_dim = inpt_dim
if not isinstance(hddn_dim, int):
self.hddn_dim = hddn_dim
else:
self.hddn_dim = num_blocks * [hddn_dim]
self.outp_dim = outp_dim or inpt_dim if do_out else self.hddn_dim[-1]
self.num_blocks = len(self.hddn_dim)
self.ctxt_dim = ctxt_dim
self.do_out = do_out
# Necc for this module to work with the nflows package
self.hidden_features = self.hddn_dim[-1]
# Input MLP block
self.input_block = MLPBlock(
inpt_dim=self.inpt_dim,
outp_dim=self.hddn_dim[0],
ctxt_dim=self.ctxt_dim if ctxt_in_inpt else 0,
act=act_h,
nrm=nrm,
drp=drp,
)
# All hidden blocks as a single module list
self.hidden_blocks = []
if self.num_blocks > 1:
self.hidden_blocks = nn.ModuleList()
for h_1, h_2 in zip(self.hddn_dim[:-1], self.hddn_dim[1:]):
self.hidden_blocks.append(
MLPBlock(
inpt_dim=h_1,
outp_dim=h_2,
ctxt_dim=self.ctxt_dim if ctxt_in_hddn else 0,
n_layers=n_lyr_pbk,
act=act_h,
nrm=nrm,
drp=drp,
do_res=do_res,
)
)
# Output block (optional and there is no normalisation, dropout or context)
if do_out:
self.output_block = MLPBlock(
inpt_dim=self.hddn_dim[-1],
outp_dim=self.outp_dim,
act=act_o,
)
def forward(self, inputs: T.Tensor, ctxt: Optional[T.Tensor] = None) -> T.Tensor:
"""Pass through all layers of the dense network."""
# Reshape the context if it is available
if ctxt is not None:
dim_diff = inputs.dim() - ctxt.dim()
if dim_diff > 0:
ctxt = ctxt.view(ctxt.shape[0], *dim_diff * (1,), *ctxt.shape[1:])
ctxt = ctxt.expand(*inputs.shape[:-1], -1)
# Pass through the input block
inputs = self.input_block(inputs, ctxt)
# Pass through each hidden block
for h_block in self.hidden_blocks: # Context tensor will only be used if
inputs = h_block(inputs, ctxt) # block was initialised with a ctxt dim
# Pass through the output block
if self.do_out:
inputs = self.output_block(inputs)
return inputs
def __repr__(self):
string = ""
string += "\n (inp): " + repr(self.input_block) + "\n"
for i, h_block in enumerate(self.hidden_blocks):
string += f" (h-{i+1}): " + repr(h_block) + "\n"
if self.do_out:
string += " (out): " + repr(self.output_block)
return string
def one_line_string(self):
"""Return a one line string that sums up the network structure."""
string = str(self.inpt_dim)
if self.ctxt_dim:
string += f"({self.ctxt_dim})"
string += ">"
string += str(self.input_block.outp_dim) + ">"
if self.num_blocks > 1:
string += ">".join(
[
str(layer.out_features)
for hidden in self.hidden_blocks
for layer in hidden.block
if isinstance(layer, nn.Linear)
]
)
string += ">"
if self.do_out:
string += str(self.outp_dim)
return string
class IterativeNormLayer(nn.Module):
"""A basic normalisation layer so it can be part of the model.
Note! If a mask is provided in the forward pass, then this must be
the dimension to apply over the masked inputs! For example: Graph
nodes are usually batch x n_nodes x features so to normalise over
the features one would typically give extra_dims as (0,) But nodes
are always passed with the mask which flattens it to batch x
features. Batch dimension is done automatically, so we dont pass any
extra_dims!!!
"""
def __init__(
self,
inpt_dim: Union[T.Tensor, tuple, int],
means: Optional[T.Tensor] = None,
vars: Optional[T.Tensor] = None,
n: int = 0,
max_n: int = 5_00_000,
extra_dims: Union[tuple, int] = (),
) -> None:
"""Init method for Normalisatiion module.
Args:
inpt_dim: Shape of the input tensor, required for reloading
means: Calculated means for the mapping. Defaults to None.
vars: Calculated variances for the mapping. Defaults to None.
n: Number of samples used to make the mapping. Defaults to None.
max_n: Maximum number of iterations before the means and vars are frozen
extra_dims: The extra dimension(s) over which to calculate the stats
Will always calculate over the batch dimension
"""
super().__init__()
# Fail if only one of means or vars is provided
if (means is None) ^ (vars is None): # XOR
raise ValueError(
"""Only one of 'means' and 'vars' is defined. Either both or
neither must be defined"""
)
# Allow interger inpt_dim and n arguments
if isinstance(inpt_dim, int):
inpt_dim = (inpt_dim,)
if isinstance(n, int):
n = T.tensor(n)
# The dimensions over which to apply the normalisation, make positive!
if isinstance(extra_dims, int): # Ensure it is a list
extra_dims = [extra_dims]
else:
extra_dims = list(extra_dims)
if any([abs(e) > len(inpt_dim) for e in extra_dims]): # Check size
raise ValueError("extra_dims argument lists dimensions outside input range")
for d in range(len(extra_dims)):
if extra_dims[d] < 0: # make positive
extra_dims[d] = len(inpt_dim) + extra_dims[d]
extra_dims[d] += 1 # Add one because we are inserting a batch dimension
self.extra_dims = extra_dims
# Calculate the input and output shapes
self.max_n = max_n
self.inpt_dim = list(inpt_dim)
self.stat_dim = [1] + list(inpt_dim) # Add batch dimension
for d in range(len(self.stat_dim)):
if d in self.extra_dims:
self.stat_dim[d] = 1
# Buffers arenneeded for saving/loading the layer
self.register_buffer(
"means", T.zeros(self.stat_dim) if means is None else means
)
self.register_buffer("vars", T.ones(self.stat_dim) if vars is None else vars)
self.register_buffer("n", n)
# For the welford algorithm it is useful to have another variable m2
self.register_buffer("m2", T.ones(self.stat_dim) if vars is None else vars)
# If the means are set here then the model is "frozen" and not updated
self.frozen = means is not None
def _mask(self, inpt: T.Tensor, mask: Optional[T.BoolTensor] = None) -> T.Tensor:
if mask is None:
return inpt
return inpt[mask]
def _check_attributes(self) -> None:
if self.means is None or self.vars is None:
raise ValueError(
"Stats for have not been initialised or fit() has not been run!"
)
def fit(
self, inpt: T.Tensor, mask: Optional[T.BoolTensor] = None, freeze: bool = True
) -> None:
"""Set the stats given a population of data."""
inpt = self._mask(inpt, mask)
self.vars, self.means = T.var_mean(
inpt, dim=(0, *self.extra_dims), keepdim=True
)
self.n = T.tensor(len(inpt), device=self.means.device)
self.m2 = self.vars * self.n
self.frozen = freeze
def forward(self, inpt: T.Tensor, mask: Optional[T.BoolTensor] = None) -> T.Tensor:
"""Applies the standardisation to a batch of inputs, also uses the
inputs to update the running stats if in training mode."""
with T.no_grad():
sel_inpt = self._mask(inpt, mask)
if not self.frozen and self.training:
self.update(sel_inpt)
# Apply the mapping
normed_inpt = (sel_inpt - self.means) / (self.vars.sqrt() + 1e-8)
# Undo the masking
if mask is not None:
inpt = inpt.clone() # prevents inplace operation, bad for autograd
inpt[mask] = normed_inpt
return inpt
return normed_inpt
def reverse(self, inpt: T.Tensor, mask: Optional[T.BoolTensor] = None) -> T.Tensor:
"""Unnormalises the inputs given the recorded stats."""
sel_inpt = self._mask(inpt, mask)
unnormed_inpt = sel_inpt * self.vars.sqrt() + self.means
# Undo the masking
if mask is not None:
inpt = inpt.clone() # prevents inplace operation, bad for autograd
inpt[mask] = unnormed_inpt
return inpt
return unnormed_inpt
def update(self, inpt: T.Tensor, mask: Optional[T.BoolTensor] = None) -> None:
"""Update the running stats using a batch of data."""
inpt = self._mask(inpt, mask)
# For first iteration
if self.n == 0:
self.fit(inpt, freeze=False)
return
# later iterations based on batched welford algorithm
with T.no_grad():
self.n += len(inpt)
delta = inpt - self.means
self.means += (delta / self.n).mean(
dim=(0, *self.extra_dims), keepdim=True
) * len(inpt)
delta2 = inpt - self.means
self.m2 += (delta * delta2).mean(
dim=(0, *self.extra_dims), keepdim=True
) * len(inpt)
self.vars = self.m2 / self.n
# Freeze the model if we exceed the requested stats
self.frozen = self.n >= self.max_n
class CosineEncoding:
def __init__(
self,
outp_dim: int = 32,
min_value: float = 0.0,
max_value: float = 1.0,
frequency_scaling: str = "exponential",
) -> None:
self.outp_dim = outp_dim
self.min_value = min_value
self.max_value = max_value
self.frequency_scaling = frequency_scaling
def __call__(self, inpt: T.Tensor) -> T.Tensor:
return cosine_encoding(
inpt, self.outp_dim, self.min_value, self.max_value, self.frequency_scaling
)
def cosine_encoding(
x: T.Tensor,
outp_dim: int = 32,
min_value: float = 0.0,
max_value: float = 1.0,
frequency_scaling: str = "exponential",
) -> T.Tensor:
"""Computes a positional cosine encodings with an increasing series of
frequencies.
The frequencies either increase linearly or exponentially (default).
The latter is good for when max_value is large and extremely high sensitivity to the
input is required.
If inputs greater than the max value are provided, the outputs become degenerate.
If inputs smaller than the min value are provided, the inputs the the cosine will
be both positive and negative, which may lead degenerate outputs.
Always make sure that the min and max bounds are not exceeded!
Args:
x: The input, the final dimension is encoded. If 1D then it will be unqueezed
out_dim: The dimension of the output encoding
min_value: Added to x (and max) as cosine embedding works with positive inputs
max_value: The maximum expected value, sets the scale of the lowest frequency
frequency_scaling: Either 'linear' or 'exponential'
Returns:
The cosine embeddings of the input using (out_dim) many frequencies
"""
# Unsqueeze if final dimension is flat
if x.shape[-1] != 1 or x.dim() == 1:
x = x.unsqueeze(-1)
# Check the the bounds are obeyed
if T.any(x > max_value):
print("Warning! Passing values to cosine_encoding encoding that exceed max!")
if T.any(x < min_value):
print("Warning! Passing values to cosine_encoding encoding below min!")
# Calculate the various frequencies
if frequency_scaling == "exponential":
freqs = T.arange(outp_dim, device=x.device).exp()
elif frequency_scaling == "linear":
freqs = T.arange(1, outp_dim + 1, device=x.device)
else:
raise RuntimeError(f"Unrecognised frequency scaling: {frequency_scaling}")
return T.cos((x + min_value) * freqs * math.pi / (max_value + min_value))
| 20,518 | 35.575758 | 90 | py |
PC-JeDi | PC-JeDi-main/src/models/pc_jedi.py | import copy
from functools import partial
from typing import Mapping, Optional, Tuple
import numpy as np
import pytorch_lightning as pl
import torch as T
import wandb
from jetnet.evaluation import w1efp, w1m, w1p
from src.models.diffusion import VPDiffusionSchedule, run_sampler
from src.models.modules import CosineEncoding, IterativeNormLayer
from src.models.schedulers import WarmupToConstant
from src.models.transformers import FullTransformerEncoder
from src.numpy_utils import undo_log_squash
from src.plotting import plot_mpgan_marginals
from src.torch_utils import get_loss_fn, to_np
class TransformerDiffusionGenerator(pl.LightningModule):
"""A generative model which uses the diffusion process on a point cloud."""
def __init__(
self,
*,
pc_dim: list,
ctxt_dim: int,
n_nodes: int,
cosine_config: Mapping,
diff_config: Mapping,
normaliser_config: Mapping,
trans_enc_config: Mapping,
optimizer: partial,
loss_name: str = "mse",
mle_loss_weight: float = 0.0,
ema_sync: float = 0.999,
sampler_name: str = "em",
sampler_steps: int = 100,
) -> None:
"""
Args:
pc_dim: The dimension of the point cloud
ctxt_dim: The size of the context vector for the point cloud
n_nodes: Max number of nodes used to train this model
cosine_config: For defining the cosine embedding arguments
normaliser_config: For defining the iterative normalisation layer
diff_shedule: The diffusion scheduler, defines the signal and noise rates
trans_enc_config: Keyword arguments for the TransformerEncoder network
optimizer: Partially initialised optimizer
sched_config: The config for how to apply the scheduler
ema_sync: How fast the ema network syncs with the given one
loss_name: Name of the loss function to use for noise estimation
mle_loss_weight: Relative weight of the Maximum-Liklihood loss term
sampler_name: Name of O/SDE solver, does not effect training.
sampler_steps: Steps used in generation, does not effect training.
"""
super().__init__()
self.save_hyperparameters(logger=False)
# Class attributes
self.pc_dim = pc_dim
self.ctxt_dim = ctxt_dim
self.n_nodes = n_nodes
self.loss_fn = get_loss_fn(loss_name)
self.mle_loss_weight = mle_loss_weight
self.ema_sync = ema_sync
# The encoder and scheduler needed for diffusion
self.diff_sched = VPDiffusionSchedule(**diff_config)
self.time_encoder = CosineEncoding(**cosine_config)
# The layer which normalises the input point cloud data
self.normaliser = IterativeNormLayer((pc_dim,), **normaliser_config)
if self.ctxt_dim:
self.ctxt_normaliser = IterativeNormLayer((ctxt_dim,), **normaliser_config)
# The denoising transformer
self.net = FullTransformerEncoder(
inpt_dim=pc_dim,
outp_dim=pc_dim,
ctxt_dim=ctxt_dim + self.time_encoder.outp_dim,
**trans_enc_config,
)
# A copy of the network which will sync with an exponential moving average
self.ema_net = copy.deepcopy(self.net)
# Sampler to run in the validation/testing loop
self.sampler_name = sampler_name
self.sampler_steps = sampler_steps
# Record of the outputs of the validation step
self.val_outs = []
def forward(
self,
noisy_data: T.Tensor,
diffusion_times: T.Tensor,
mask: T.BoolTensor,
ctxt: Optional[T.Tensor] = None,
) -> T.Tensor:
"""Pass through the model and get an estimate of the noise added to the
input."""
# Use the appropriate network for training or validation
if self.training:
network = self.net
else:
network = self.ema_net
# Encode the times and combine with existing context info
context = self.time_encoder(diffusion_times)
if self.ctxt_dim:
context = T.cat([context, ctxt], dim=-1)
# Use the selected network to esitmate the noise present in the data
return network(noisy_data, mask=mask, ctxt=context)
def _shared_step(self, sample: tuple) -> Tuple[T.Tensor, T.Tensor]:
"""Shared step used in both training and validaiton."""
# Unpack the sample tuple
nodes, mask, ctxt = sample
# Pass through the normalisers
nodes = self.normaliser(nodes, mask)
if self.ctxt_dim:
ctxt = self.ctxt_normaliser(ctxt)
# Sample from the gaussian latent space to perturb the point clouds
noises = T.randn_like(nodes) * mask.unsqueeze(-1)
# Sample uniform random diffusion times and get the rates
diffusion_times = T.rand(size=(len(nodes), 1), device=self.device)
signal_rates, noise_rates = self.diff_sched(diffusion_times.view(-1, 1, 1))
# Mix the signal and noise according to the diffusion equation
noisy_nodes = signal_rates * nodes + noise_rates * noises
# Predict the noise using the network
pred_noises = self.forward(noisy_nodes, diffusion_times, mask, ctxt)
# Simple noise loss is for "perceptual quality"
simple_loss = self.loss_fn(noises[mask], pred_noises[mask])
# MLE loss is for maximum liklihood training
if self.mle_loss_weight:
betas = self.diff_sched.get_betas(diffusion_times.view(-1, 1, 1))
mle_weights = betas / noise_rates
mle_loss = mle_weights * simple_loss
else:
mle_loss = T.zeros_like(simple_loss)
return simple_loss.mean(), mle_loss.mean()
def training_step(self, sample: tuple, _batch_idx: int) -> T.Tensor:
simple_loss, mle_loss = self._shared_step(sample)
total_loss = simple_loss + self.mle_loss_weight * mle_loss
self.log("train/simple_loss", simple_loss)
self.log("train/mle_loss", mle_loss)
self.log("train/total_loss", total_loss)
self._sync_ema_network()
return total_loss
def validation_step(self, sample: tuple, batch_idx: int) -> None:
simple_loss, mle_loss = self._shared_step(sample)
total_loss = simple_loss + self.mle_loss_weight * mle_loss
self.log("valid/simple_loss", simple_loss)
self.log("valid/mle_loss", mle_loss)
self.log("valid/total_loss", total_loss)
# Run the full generation of the sample during a validation step
outputs = self.full_generation(
self.sampler_name,
self.sampler_steps,
mask=sample[1],
ctxt=sample[2],
)
# Add to the collection of the validaiton outputs
self.val_outs.append((to_np(outputs), to_np(sample)))
def on_validation_epoch_end(self) -> None:
"""At the end of the validation epoch, calculate and log the metrics
and plot the histograms.
This function right now only works with MPGAN configs
"""
# Combine all outputs
gen_nodes = np.vstack([v[0] for v in self.val_outs])
real_nodes = np.vstack([v[1][0] for v in self.val_outs])
mask = np.vstack([v[1][1] for v in self.val_outs])
high = np.vstack([v[1][2] for v in self.val_outs])
# Change the data from log(pt+1) into pt fraction (needed for metrics)
if self.trainer.datamodule.hparams.data_conf.log_squash_pt:
gen_nodes[..., -1] = undo_log_squash(gen_nodes[..., -1]) / high[..., 0:1]
real_nodes[..., -1] = undo_log_squash(real_nodes[..., -1]) / high[..., 0:1]
# Apply clipping
gen_nodes = np.nan_to_num(gen_nodes)
gen_nodes[..., 0] = np.clip(gen_nodes[..., 0], -0.5, 0.5)
gen_nodes[..., 1] = np.clip(gen_nodes[..., 1], -0.5, 0.5)
gen_nodes[..., 2] = np.clip(gen_nodes[..., 2], 0, 1)
real_nodes = np.nan_to_num(real_nodes)
real_nodes[..., 0] = np.clip(real_nodes[..., 0], -0.5, 0.5)
real_nodes[..., 1] = np.clip(real_nodes[..., 1], -0.5, 0.5)
real_nodes[..., 2] = np.clip(real_nodes[..., 2], 0, 1)
# Calculate and log the Wasserstein discriminants
bootstrap = {
"num_eval_samples": 10000,
"num_batches": 10,
}
w1m_val, w1m_err = w1m(real_nodes, gen_nodes, **bootstrap)
w1p_val, w1p_err = w1p(real_nodes, gen_nodes, **bootstrap)
w1efp_val, w1efp_err = w1efp(real_nodes, gen_nodes, efp_jobs=1, **bootstrap)
self.log("valid/w1m", w1m_val)
self.log("valid/w1m_err", w1m_err)
self.log("valid/w1p", w1p_val.mean())
self.log("valid/w1p_err", w1p_err.mean())
self.log("valid/w1efp", w1efp_val.mean())
self.log("valid/w1efp_err", w1efp_err.mean())
# Plot the MPGAN-like marginals
plot_mpgan_marginals(gen_nodes, real_nodes, mask, self.trainer.current_epoch)
self.val_outs.clear()
def _sync_ema_network(self) -> None:
"""Updates the Exponential Moving Average Network."""
with T.no_grad():
for params, ema_params in zip(
self.net.parameters(), self.ema_net.parameters()
):
ema_params.data.copy_(
self.ema_sync * ema_params.data
+ (1.0 - self.ema_sync) * params.data
)
def on_fit_start(self, *_args) -> None:
"""Function to run at the start of training."""
# Define the metrics for wandb (otherwise the min wont be stored!)
if wandb.run is not None:
wandb.define_metric("train/simple_loss", summary="min")
wandb.define_metric("train/mle_loss", summary="min")
wandb.define_metric("train/total_loss", summary="min")
wandb.define_metric("valid/simple_loss", summary="min")
wandb.define_metric("valid/mle_loss", summary="min")
wandb.define_metric("valid/total_loss", summary="min")
wandb.define_metric("valid/w1m", summary="min")
wandb.define_metric("valid/w1p", summary="min")
wandb.define_metric("valid/w1efp", summary="min")
def set_sampler(
self, sampler_name: Optional[str] = None, sampler_steps: Optional[int] = None
) -> None:
"""Replaces the sampler list with a new one."""
if sampler_name is not None:
self.sampler_name = sampler_name
if sampler_steps is not None:
self.sampler_steps = sampler_steps
def full_generation(
self,
sampler: str,
steps: int,
mask: Optional[T.BoolTensor] = None,
ctxt: Optional[T.Tensor] = None,
initial_noise: Optional[T.Tensor] = None,
) -> T.Tensor:
"""Fully generate a batch of data from noise, given context information
and a mask."""
# Either a mask or initial noise must be defined or we dont know how
# many samples to generate and with what cardinality
if mask is None and initial_noise is None:
raise ValueError("Please provide either a mask or noise to generate from")
if mask is None:
mask = T.full(initial_noise.shape[:-1], True, device=self.device)
if initial_noise is None:
initial_noise = T.randn((*mask.shape, self.pc_dim), device=self.device)
# Normalise the context
if self.ctxt_dim:
ctxt = self.ctxt_normaliser(ctxt)
assert len(ctxt) == len(initial_noise)
# Run the sampling method
outputs, _ = run_sampler(
sampler,
self,
self.diff_sched,
initial_noise=initial_noise * mask.unsqueeze(-1),
n_steps=steps,
mask=mask,
ctxt=ctxt,
clip_predictions=(-25, 25),
)
# Ensure that the output adheres to the mask
outputs[~mask] = 0
# Return the normalisation of the generated point cloud
return self.normaliser.reverse(outputs, mask=mask)
def configure_optimizers(self) -> dict:
"""Configure the optimisers and learning rate sheduler for this
model."""
# Finish initialising the optimiser and create the scheduler
opt = self.hparams.optimizer(params=self.parameters())
sched = WarmupToConstant(opt, num_steps=10_000)
# Return the dict for the lightning trainer
return {
"optimizer": opt,
"lr_scheduler": {
"scheduler": sched,
"interval": "step",
"frequency": 1,
},
}
| 12,805 | 38.403077 | 87 | py |
PC-JeDi | PC-JeDi-main/src/models/__init__.py | 0 | 0 | 0 | py |
|
PC-JeDi | PC-JeDi-main/scripts/train.py | import pyrootutils
root = pyrootutils.setup_root(search_from=__file__, pythonpath=True)
import logging
import hydra
import pytorch_lightning as pl
from omegaconf import DictConfig
from src.hydra_utils import (
instantiate_collection,
log_hyperparameters,
print_config,
reload_original_config,
save_config,
)
log = logging.getLogger(__name__)
@hydra.main(
version_base=None, config_path=str(root / "configs"), config_name="train.yaml"
)
def main(cfg: DictConfig) -> None:
log.info("Setting up full job config")
if cfg.full_resume:
cfg = reload_original_config(cfg)
print_config(cfg)
if cfg.seed:
log.info(f"Setting seed to: {cfg.seed}")
pl.seed_everything(cfg.seed, workers=True)
log.info("Instantiating the data module")
datamodule = hydra.utils.instantiate(cfg.datamodule)
log.info("Instantiating the model")
model = hydra.utils.instantiate(
cfg.model,
pc_dim=datamodule.dim,
n_nodes=datamodule.n_nodes,
ctxt_dim=datamodule.ctxt_dim,
)
log.info(model)
log.info("Instantiating all callbacks")
callbacks = instantiate_collection(cfg.callbacks)
log.info("Instantiating the loggers")
loggers = instantiate_collection(cfg.loggers)
log.info("Instantiating the trainer")
trainer = hydra.utils.instantiate(cfg.trainer, callbacks=callbacks, logger=loggers)
if loggers:
log.info("Logging all hyperparameters")
log_hyperparameters(cfg, model, trainer)
log.info("Saving config so job can be resumed")
save_config(cfg)
log.info("Starting training!")
trainer.fit(model, datamodule, ckpt_path=cfg.ckpt_path)
if __name__ == "__main__":
main()
| 1,731 | 23.742857 | 87 | py |
trees_from_transformers | trees_from_transformers-master/run.py | import argparse
import datetime
import logging
import os
import pickle
from tqdm import tqdm
import torch
from transformers import *
from data.dataset import Dataset
from utils.measure import Measure
from utils.parser import not_coo_parser, parser
from utils.tools import set_seed, select_indices, group_indices
from utils.yk import get_actions, get_nonbinary_spans
MODELS = [(BertModel, BertTokenizer, BertConfig, 'bert-base-cased'),
(BertModel, BertTokenizer, BertConfig, 'bert-large-cased'),
(GPT2Model, GPT2Tokenizer, GPT2Config, 'gpt2'),
(GPT2Model, GPT2Tokenizer, GPT2Config, 'gpt2-medium'),
(RobertaModel, RobertaTokenizer, RobertaConfig, 'roberta-base'),
(RobertaModel, RobertaTokenizer, RobertaConfig, 'roberta-large'),
(XLNetModel, XLNetTokenizer, XLNetConfig, 'xlnet-base-cased'),
(XLNetModel, XLNetTokenizer, XLNetConfig, 'xlnet-large-cased')]
def evaluate(args):
scores = dict()
for model_class, tokenizer_class, model_config, pretrained_weights in MODELS:
tokenizer = tokenizer_class.from_pretrained(
pretrained_weights, cache_dir=args.lm_cache_path)
if args.from_scratch:
config = model_config.from_pretrained(pretrained_weights)
config.output_hidden_states = True
config.output_attentions = True
model = model_class(config).to(args.device)
else:
model = model_class.from_pretrained(
pretrained_weights,
cache_dir=args.lm_cache_path,
output_hidden_states=True,
output_attentions=True).to(args.device)
with torch.no_grad():
test_sent = tokenizer.encode('test', add_special_tokens=False)
token_ids = torch.tensor([test_sent]).to(args.device)
all_hidden, all_att = model(token_ids)[-2:]
n_layers = len(all_att)
n_att = all_att[0].size(1)
n_hidden = all_hidden[0].size(-1)
measure = Measure(n_layers, n_att)
data = Dataset(path=args.data_path, tokenizer=tokenizer)
for idx, s in tqdm(enumerate(data.sents), total=len(data.sents),
desc=pretrained_weights, ncols=70):
raw_tokens = data.raw_tokens[idx]
tokens = data.tokens[idx]
if len(raw_tokens) < 2:
data.cnt -= 1
continue
token_ids = tokenizer.encode(s, add_special_tokens=False)
token_ids_tensor = torch.tensor([token_ids]).to(args.device)
with torch.no_grad():
all_hidden, all_att = model(token_ids_tensor)[-2:]
all_hidden, all_att = list(all_hidden[1:]), list(all_att)
# (n_layers, seq_len, hidden_dim)
all_hidden = torch.cat([all_hidden[n] for n in range(n_layers)], dim=0)
# (n_layers, n_att, seq_len, seq_len)
all_att = torch.cat([all_att[n] for n in range(n_layers)], dim=0)
if len(tokens) > len(raw_tokens):
th = args.token_heuristic
if th == 'first' or th == 'last':
mask = select_indices(tokens, raw_tokens, pretrained_weights, th)
assert len(mask) == len(raw_tokens)
all_hidden = all_hidden[:, mask]
all_att = all_att[:, :, mask, :]
all_att = all_att[:, :, :, mask]
else:
# mask = torch.tensor(data.masks[idx])
mask = group_indices(tokens, raw_tokens, pretrained_weights)
raw_seq_len = len(raw_tokens)
all_hidden = torch.stack(
[all_hidden[:, mask == i].mean(dim=1)
for i in range(raw_seq_len)], dim=1)
all_att = torch.stack(
[all_att[:, :, :, mask == i].sum(dim=3)
for i in range(raw_seq_len)], dim=3)
all_att = torch.stack(
[all_att[:, :, mask == i].mean(dim=2)
for i in range(raw_seq_len)], dim=2)
l_hidden, r_hidden = all_hidden[:, :-1], all_hidden[:, 1:]
l_att, r_att = all_att[:, :, :-1], all_att[:, :, 1:]
syn_dists = measure.derive_dists(l_hidden, r_hidden, l_att, r_att)
gold_spans = data.gold_spans[idx]
gold_tags = data.gold_tags[idx]
assert len(gold_spans) == len(gold_tags)
for m, d in syn_dists.items():
pred_spans = []
for i in range(measure.scores[m].n):
dist = syn_dists[m][i].tolist()
if len(dist) > 1:
bias_base = (sum(dist) / len(dist)) * args.bias
bias = [bias_base * (1 - (1 / (len(dist) - 1)) * x)
for x in range(len(dist))]
dist = [dist[i] + bias[i] for i in range(len(dist))]
if args.use_not_coo_parser:
pred_tree = not_coo_parser(dist, raw_tokens)
else:
pred_tree = parser(dist, raw_tokens)
ps = get_nonbinary_spans(get_actions(pred_tree))[0]
pred_spans.append(ps)
measure.scores[m].update(pred_spans, gold_spans, gold_tags)
measure.derive_final_score()
scores[pretrained_weights] = measure.scores
if not os.path.exists(args.result_path):
os.makedirs(args.result_path)
with open(f'{args.result_path}/{pretrained_weights}.txt', 'w') as f:
print('Model name:', pretrained_weights, file=f)
print('Experiment time:', args.time, file=f)
print('# of layers:', n_layers, file=f)
print('# of attentions:', n_att, file=f)
print('# of hidden dimensions:', n_hidden, file=f)
print('# of processed sents:', data.cnt, file=f)
max_corpus_f1, max_sent_f1 = 0, 0
for n in range(n_layers):
print(f'[Layer {n + 1}]', file=f)
print('-' * (119 + measure.max_m_len), file=f)
for m, s in measure.scores.items():
if m in measure.h_measures + measure.a_avg_measures:
print(
f'| {m.upper()} {" " * (measure.max_m_len - len(m))} '
f'| Corpus F1: {s.corpus_f1[n] * 100:.2f} '
f'| Sent F1: {s.sent_f1[n] * 100:.2f} ',
end='', file=f)
for z in range(len(s.label_recalls[0])):
print(
f'| {s.labels[z]}: '
f'{s.label_recalls[n][z] * 100:.2f} ',
end='', file=f)
print('|', file=f)
if s.sent_f1[n] > max_sent_f1:
max_corpus_f1 = s.corpus_f1[n]
max_sent_f1 = s.sent_f1[n]
max_measure = m
max_layer = n + 1
else:
for i in range(n_att):
m_att = str(i) if i > 9 else '0' + str(i)
m_att = m + m_att + " " * (
measure.max_m_len - len(m))
i_att = n_att * n + i
print(
f'| {m_att.upper()}'
f'| Corpus F1: {s.corpus_f1[i_att] * 100:.2f} '
f'| Sent F1: {s.sent_f1[i_att] * 100:.2f} ',
end='', file=f)
for z in range(len(s.label_recalls[0])):
print(f'| {s.labels[z]}: '
f'{s.label_recalls[i_att][z] * 100:.2f} ',
end='', file=f)
print('|', file=f)
if s.sent_f1[i_att] > max_sent_f1:
max_corpus_f1 = s.corpus_f1[i_att]
max_sent_f1 = s.sent_f1[i_att]
max_measure = m_att
max_layer = n + 1
print('-' * (119 + measure.max_m_len), file=f)
print(f'[MAX]: | Layer: {max_layer} '
f'| {max_measure.upper()} '
f'| Corpus F1: {max_corpus_f1 * 100:.2f} '
f'| Sent F1: {max_sent_f1 * 100:.2f} |')
print(f'[MAX]: | Layer: {max_layer} '
f'| {max_measure.upper()} '
f'| Corpus F1: {max_corpus_f1 * 100:.2f} '
f'| Sent F1: {max_sent_f1 * 100:.2f} |', file=f)
return scores
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data-path',
default='.data/PTB/ptb-test.txt', type=str)
parser.add_argument('--result-path', default='outputs', type=str)
parser.add_argument('--lm-cache-path',
default='/data/transformers', type=str)
parser.add_argument('--from-scratch', default=False, action='store_true')
parser.add_argument('--gpu', default=0, type=int)
parser.add_argument('--bias', default=0.0, type=float,
help='the right-branching bias hyperparameter lambda')
parser.add_argument('--seed', default=1234, type=int)
parser.add_argument('--token-heuristic', default='mean', type=str,
help='Available options: mean, first, last')
parser.add_argument('--use-not-coo-parser', default=False,
action='store_true',
help='Turning on this option will allow you to exploit '
'the NOT-COO parser (named by Dyer et al. 2019), '
'which has been broadly adopted by recent methods '
'for unsupervised parsing. As this parser utilizes'
' the right-branching bias in its inner workings, '
'it may give rise to some unexpected gains or '
'latent issues for the resulting trees. For more '
'details, see https://arxiv.org/abs/1909.09428.')
args = parser.parse_args()
setattr(args, 'device', f'cuda:{args.gpu}'
if torch.cuda.is_available() and args.gpu >= 0 else 'cpu')
setattr(args, 'time', datetime.datetime.now().strftime('%Y%m%d-%H:%M:%S'))
dataset_name = args.data_path.split('/')[-1].split('.')[0]
parser = '-w-not-coo-parser' if args.use_not_coo_parser else ''
pretrained = 'scratch' if args.from_scratch else 'pretrained'
result_path = f'{args.result_path}/{dataset_name}-{args.token_heuristic}'
result_path += f'-{pretrained}-{args.bias}{parser}'
setattr(args, 'result_path', result_path)
set_seed(args.seed)
logging.disable(logging.WARNING)
print('[List of arguments]')
for a in args.__dict__:
print(f'{a}: {args.__dict__[a]}')
scores = evaluate(args)
with open(f'{args.result_path}/scores.pickle', 'wb') as f:
pickle.dump(scores, f)
if __name__ == '__main__':
main()
| 11,441 | 45.893443 | 85 | py |
trees_from_transformers | trees_from_transformers-master/utils/yk.py | """
The functions in this file are originated from the code for
Compound Probabilistic Context-Free Grammars for Grammar Induction,
Y. Kim et al., ACL 2019.
For more details, visit https://github.com/harvardnlp/compound-pcfg.
"""
import re
def clean_number(w):
new_w = re.sub('[0-9]{1,}([,.]?[0-9]*)*', 'N', w)
return new_w
def get_stats(span1, span2):
tp = 0
fp = 0
fn = 0
for span in span1:
if span in span2:
tp += 1
else:
fp += 1
for span in span2:
if span not in span1:
fn += 1
return tp, fp, fn
def get_nonbinary_spans(actions, SHIFT=0, REDUCE=1):
spans = []
tags = []
stack = []
pointer = 0
binary_actions = []
nonbinary_actions = []
num_shift = 0
num_reduce = 0
for action in actions:
# print(action, stack)
if action == "SHIFT":
nonbinary_actions.append(SHIFT)
stack.append((pointer, pointer))
pointer += 1
binary_actions.append(SHIFT)
num_shift += 1
elif action[:3] == 'NT(':
# stack.append('(')
stack.append(action[3:-1].split('-')[0])
elif action == "REDUCE":
nonbinary_actions.append(REDUCE)
right = stack.pop()
left = right
n = 1
# while stack[-1] is not '(':
while type(stack[-1]) is tuple:
left = stack.pop()
n += 1
span = (left[0], right[1])
tag = stack.pop()
if left[0] != right[1]:
spans.append(span)
tags.append(tag)
stack.append(span)
while n > 1:
n -= 1
binary_actions.append(REDUCE)
num_reduce += 1
else:
assert False
assert (len(stack) == 1)
assert (num_shift == num_reduce + 1)
return spans, tags, binary_actions, nonbinary_actions
def get_actions(line):
output_actions = []
line_strip = line.rstrip()
i = 0
max_idx = (len(line_strip) - 1)
while i <= max_idx:
assert line_strip[i] == '(' or line_strip[i] == ')'
if line_strip[i] == '(':
if is_next_open_bracket(line_strip, i): # open non-terminal
curr_NT = get_nonterminal(line_strip, i)
output_actions.append('NT(' + curr_NT + ')')
i += 1
# get the next open bracket,
# which may be a terminal or another non-terminal
while line_strip[i] != '(':
i += 1
else: # it's a terminal symbol
output_actions.append('SHIFT')
while line_strip[i] != ')':
i += 1
i += 1
while line_strip[i] != ')' and line_strip[i] != '(':
i += 1
else:
output_actions.append('REDUCE')
if i == max_idx:
break
i += 1
while line_strip[i] != ')' and line_strip[i] != '(':
i += 1
assert i == max_idx
return output_actions
def is_next_open_bracket(line, start_idx):
for char in line[(start_idx + 1):]:
if char == '(':
return True
elif char == ')':
return False
raise IndexError('Bracket possibly not balanced, '
'open bracket not followed by closed bracket')
def get_nonterminal(line, start_idx):
assert line[start_idx] == '(' # make sure it's an open bracket
output = []
for char in line[(start_idx + 1):]:
if char == ' ':
break
assert not (char == '(') and not (char == ')')
output.append(char)
return ''.join(output)
def get_tags_tokens_lowercase(line):
output = []
line_strip = line.rstrip()
for i in range(len(line_strip)):
if i == 0:
assert line_strip[i] == '('
# fulfilling this condition means this is a terminal symbol
if line_strip[i] == '(' and not (is_next_open_bracket(line_strip, i)):
output.append(get_between_brackets(line_strip, i))
# print 'output:',output
output_tags = []
output_tokens = []
output_lowercase = []
for terminal in output:
terminal_split = terminal.split()
# print(terminal, terminal_split)
assert len(
terminal_split) == 2 # each terminal contains a POS tag and word
output_tags.append(terminal_split[0])
output_tokens.append(terminal_split[1])
output_lowercase.append(terminal_split[1].lower())
return [output_tags, output_tokens, output_lowercase]
def get_between_brackets(line, start_idx):
output = []
for char in line[(start_idx + 1):]:
if char == ')':
break
assert not (char == '(')
output.append(char)
return ''.join(output)
| 4,935 | 29.097561 | 78 | py |
trees_from_transformers | trees_from_transformers-master/utils/parser.py | import numpy as np
def not_coo_parser(score, sent):
assert len(score) == len(sent) - 1
if len(score) == 0:
parse_tree = f'(T {sent[0]} )'
elif len(score) == 1:
parse_tree = f'(T (T {sent[0]} ) (T {sent[1]} ) )'
else:
idx_max = np.argmax(score)
l_len = len(sent[:idx_max + 1])
r_len = len(sent[idx_max + 2:])
if l_len > 0 and r_len > 0:
l_tree = not_coo_parser(score[:idx_max], sent[:idx_max + 1])
r_tree = not_coo_parser(score[idx_max + 2:], sent[idx_max + 2:])
r_tree = f'(T (T {sent[idx_max +1]} ) {r_tree} )'
parse_tree = f'(T {l_tree} {r_tree} )'
else:
if l_len == 0:
r_tree = not_coo_parser(score[idx_max + 2:], sent[idx_max + 2:])
r_tree = f'(T (T {sent[idx_max +1]} ) {r_tree} )'
parse_tree = r_tree
else:
l_tree = not_coo_parser(score[:idx_max], sent[:idx_max + 1])
parse_tree = f'(T {l_tree} (T {sent[idx_max + 1]} ) )'
return parse_tree
def parser(score, sent):
assert len(score) == len(sent) - 1
if len(score) == 0:
parse_tree = f'(T {sent[0]} )'
elif len(score) == 1:
parse_tree = f'(T (T {sent[0]} ) (T {sent[1]} ) )'
else:
idx_max = np.argmax(score)
l_len = len(sent[:idx_max + 1])
r_len = len(sent[idx_max + 1:])
if l_len > 0 and r_len > 0:
l_tree = parser(score[:idx_max], sent[:idx_max + 1])
r_tree = parser(score[idx_max + 1:], sent[idx_max + 1:])
parse_tree = f'(T {l_tree} {r_tree} )'
else:
if l_len == 0:
r_tree = parser(score[idx_max + 1:], sent[idx_max + 1:])
parse_tree = r_tree
else:
l_tree = parser(score[:idx_max], sent[:idx_max + 1])
parse_tree = l_tree
return parse_tree
| 1,936 | 33.589286 | 80 | py |
trees_from_transformers | trees_from_transformers-master/utils/score.py | import numpy as np
import torch
from utils.yk import get_stats
class Score(object):
def __init__(self, n):
self.corpus_f1 = torch.zeros(n, 3, dtype=torch.float)
self.sent_f1 = torch.zeros(n, dtype=torch.float)
self.n = n
self.cnt = 0
self.labels = ['SBAR', 'NP', 'VP', 'PP', 'ADJP', 'ADVP']
self.label_recalls = np.zeros((n, 6), dtype=float)
self.label_cnts = np.zeros(6, dtype=float)
def update(self, pred_spans, gold_spans, gold_tags):
pred_sets = [set(ps[:-1]) for ps in pred_spans]
gold_set = set(gold_spans[:-1])
self.update_corpus_f1(pred_sets, gold_set)
self.update_sentence_f1(pred_sets, gold_set)
self.update_label_recalls(pred_spans, gold_spans, gold_tags)
self.cnt += 1
def update_label_recalls(self, pred, gold, tags):
for i, tag in enumerate(tags):
if tag not in self.labels:
continue
tag_idx = self.labels.index(tag)
self.label_cnts[tag_idx] += 1
for z in range(len(pred)):
if gold[i] in pred[z]:
self.label_recalls[z][tag_idx] += 1
def update_corpus_f1(self, pred, gold):
stats = torch.tensor([get_stats(pred[i], gold) for i in range(self.n)],
dtype=torch.float)
self.corpus_f1 += stats
def update_sentence_f1(self, pred, gold):
# sent-level F1 is based on L83-89 from
# https://github.com/yikangshen/PRPN/test_phrase_grammar.py
for i in range(self.n):
model_out, std_out = pred[i], gold
overlap = model_out.intersection(std_out)
prec = float(len(overlap)) / (len(model_out) + 1e-8)
reca = float(len(overlap)) / (len(std_out) + 1e-8)
if len(std_out) == 0:
reca = 1.
if len(model_out) == 0:
prec = 1.
f1 = 2 * prec * reca / (prec + reca + 1e-8)
self.sent_f1[i] += f1
def derive_final_score(self):
tp = self.corpus_f1[:, 0]
fp = self.corpus_f1[:, 1]
fn = self.corpus_f1[:, 2]
prec = tp / (tp + fp)
recall = tp / (tp + fn)
epsilon = 1e-8
self.corpus_f1 = 2 * prec * recall / (prec + recall + epsilon)
self.sent_f1 /= self.cnt
for i in range(len(self.label_recalls)):
for j in range(len(self.label_recalls[0])):
self.label_recalls[i][j] /= self.label_cnts[j]
| 2,521 | 35.550725 | 79 | py |
trees_from_transformers | trees_from_transformers-master/utils/tools.py | import logging
import random
import torch
specials = {'bert': '#', 'gpt2': 'Ġ', 'xlnet': '▁', 'roberta': 'Ġ'}
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
def select_indices(tokens, raw_tokens, model, mode):
mask = []
raw_i = 0
collapsed = ''
model = model.split('-')[0]
special = specials[model]
for i in range(len(tokens)):
token = tokens[i]
while len(token) > 0 and token[0] == special:
token = token[1:]
if collapsed == '' and len(token) > 0:
start_idx = i
collapsed += token
if collapsed == raw_tokens[raw_i]:
if mode == 'first':
mask.append(start_idx)
elif mode == 'last':
mask.append(i)
else:
raise NotImplementedError
raw_i += 1
collapsed = ''
if raw_i != len(raw_tokens):
raise Exception(f'Token mismatch: \n{tokens}\n{raw_tokens}')
return mask
def group_indices(tokens, raw_tokens, model):
mask = []
raw_i = 0
collapsed = ''
model = model.split('-')[0]
special = specials[model]
for i in range(len(tokens)):
token = tokens[i]
while len(token) > 0 and token[0] == special:
token = token[1:]
collapsed += token
mask.append(raw_i)
if collapsed == raw_tokens[raw_i]:
raw_i += 1
collapsed = ''
if raw_i != len(raw_tokens):
raise Exception(f'Token mismatch: \n{tokens}\n{raw_tokens}')
return torch.tensor(mask)
| 1,612 | 24.603175 | 68 | py |
trees_from_transformers | trees_from_transformers-master/utils/extractor.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class Extractor(nn.Module):
def __init__(self, n_hidden):
super(Extractor, self).__init__()
self.linear = nn.Linear(n_hidden * 2, 1)
nn.init.uniform_(self.linear.weight, -0.01, 0.01)
nn.init.uniform_(self.linear.bias, 0)
def forward(self, l, r):
h = torch.cat([l, r], dim=-1)
o = self.linear(h)
# (seq_len-1)
return o.squeeze(-1)
def loss(self, d, gold):
assert len(d) == len(gold)
gold = d.new_tensor(gold)
l = 0
for i in range(len(d)):
for j in range(i+1, len(d)):
l += F.relu(1 - torch.sign(gold[i]- gold[j]) * (d[i] - d[j]))
return l
| 752 | 27.961538 | 77 | py |
trees_from_transformers | trees_from_transformers-master/utils/measure.py | import math
import torch
import torch.nn.functional as F
from utils.score import Score
class Measure(object):
def __init__(self, n_layers, n_att):
self.h_measures = ['cos', 'l1', 'l2']
self.a_measures = ['hellinger', 'jsd']
self.a_avg_measures = ['avg_hellinger', 'avg_jsd']
self.measures = self.h_measures + self.a_measures + self.a_avg_measures
self.max_m_len = max([len(m) for m in self.measures]) + 2
self.scores = {m: Score(n_layers) for m in self.h_measures}
for m in self.a_measures:
self.scores[m] = Score(n_layers * n_att)
for m in self.a_avg_measures:
self.scores[m] = Score(n_layers)
def derive_dists(self, l_hidden, r_hidden, l_att, r_att):
syn_dists = {}
for m in self.h_measures:
syn_dists[m] = getattr(self, m)(l_hidden, r_hidden)
for m in self.a_measures:
syn_dists[m] = getattr(self, m)(l_att, r_att)
syn_dists[m] = syn_dists[m].view(-1, syn_dists[m].size(-1))
for m in self.a_avg_measures:
syn_dists[m] = getattr(self, m)(l_att, r_att)
return syn_dists
def derive_final_score(self):
for m in self.scores.keys():
self.scores[m].derive_final_score()
@staticmethod
def cos(l_hidden, r_hidden):
# (n_layers, seq_len-1, hidden_dim) * 2 -> (n_layers, seq_len-1)
return (F.cosine_similarity(l_hidden, r_hidden, dim=-1) + 1) / 2
@staticmethod
def l1(l_hidden, r_hidden):
# (n_layers, seq_len-1, hidden_dim) * 2 -> (n_layers, seq_len-1)
return torch.norm(l_hidden - r_hidden, p=1, dim=-1)
@staticmethod
def l2(l_hidden, r_hidden):
# (n_layers, seq_len-1, hidden_dim) * 2 -> (n_layers, seq_len-1)
return torch.norm(l_hidden - r_hidden, p=2, dim=-1)
@staticmethod
def kl(p, q):
eps = 1e-30
p, q = p + eps, q + eps
p, q = p / p.sum(dim=-1, keepdim=True), q / q.sum(dim=-1, keepdim=True)
kl = F.kl_div(torch.log(q), p, reduction='none').sum(dim=-1)
# kl = (p * (torch.log(p) - torch.log(q))).sum(dim=-1)
# To deal with the numerical instability of the KL-div function in PyTorch
if (kl < 0).sum() > 0:
kl = kl * (1 - (kl < 0).float())
assert torch.isinf(kl).sum() == 0
assert torch.isnan(kl).sum() == 0
return kl
@staticmethod
def jsd(l_att, r_att):
m = (l_att + r_att) / 2
l_kl = Measure.kl(l_att, m)
r_kl = Measure.kl(r_att, m)
d = torch.sqrt((l_kl + r_kl) / 2)
assert (d < 0).sum() == 0
assert torch.isnan(d).sum() == 0
return d
@staticmethod
def hellinger(l_att, r_att):
d = (((l_att.sqrt() - r_att.sqrt()) ** 2).sum(dim=-1)).sqrt()
d /= math.sqrt(2)
return d
@staticmethod
def avg_hellinger(l_att, r_att):
d = Measure.hellinger(l_att, r_att)
return d.mean(dim=1)
@staticmethod
def avg_jsd(l_att, r_att):
d = Measure.jsd(l_att, r_att)
return d.mean(dim=1) | 3,102 | 33.477778 | 82 | py |
trees_from_transformers | trees_from_transformers-master/data/dataset.py | from utils.yk import get_actions, get_nonbinary_spans, get_tags_tokens_lowercase
class Dataset(object):
def __init__(self, path, tokenizer):
self.path = path
self.tokenizer = tokenizer
self.cnt = 0
self.sents = []
self.raw_tokens = []
self.tokens = []
self.masks = []
self.gold_spans = []
self.gold_tags = []
self.gold_trees = []
flatten = lambda l: [item for sublist in l for item in sublist]
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
raw_tokens = get_tags_tokens_lowercase(line)[1]
sent = ' '.join(raw_tokens)
actions = get_actions(line)
self.cnt += 1
self.sents.append(sent)
self.raw_tokens.append(raw_tokens)
self.tokens.append(self.tokenizer.tokenize(sent))
mask = [len(self.tokenizer.tokenize(w)) * [i]
for i, w in enumerate(sent.split())]
self.masks.append(flatten(mask))
gold_spans, gold_tags, _, _ = get_nonbinary_spans(actions)
self.gold_spans.append(gold_spans)
self.gold_tags.append(gold_tags)
self.gold_trees.append(line.strip())
| 1,271 | 32.473684 | 80 | py |
pi-peps | pi-peps-master/docs/source/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'pi-peps'
copyright = '2019, Juraj Hasik, Alberto Sartori'
author = 'Juraj Hasik, Alberto Sartori'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pi-pepsdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pi-peps.tex', 'pi-peps Documentation',
'Juraj Hasik, Alberto Sartori', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pi-peps', 'pi-peps Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pi-peps', 'pi-peps Documentation',
author, 'pi-peps', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| 5,615 | 28.557895 | 79 | py |
SSTAP | SSTAP-main/post_processing.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import json
import multiprocessing as mp
from utils import iou_with_anchors
def load_json(file):
with open(file) as json_file:
data = json.load(json_file)
return data
def getDatasetDict(opt):
df = pd.read_csv(opt["video_info"])
json_data = load_json(opt["video_anno"])
database = json_data
video_dict = {}
for i in range(len(df)):
video_name = df.video.values[i]
video_info = database[video_name]
video_new_info = {}
video_new_info['duration_frame'] = video_info['duration_frame']
video_new_info['duration_second'] = video_info['duration_second']
video_new_info["feature_frame"] = video_info['feature_frame']
video_subset = df.subset.values[i]
video_new_info['annotations'] = video_info['annotations']
if video_subset == 'validation':
video_dict[video_name] = video_new_info
return video_dict
def soft_nms(df, alpha, t1, t2):
'''
df: proposals generated by network;
alpha: alpha value of Gaussian decaying function;
t1, t2: threshold for soft nms.
'''
df = df.sort_values(by="score", ascending=False)
tstart = list(df.xmin.values[:])
tend = list(df.xmax.values[:])
tscore = list(df.score.values[:])
rstart = []
rend = []
rscore = []
while len(tscore) > 1 and len(rscore) < 101:
max_index = tscore.index(max(tscore))
tmp_iou_list = iou_with_anchors(
np.array(tstart),
np.array(tend), tstart[max_index], tend[max_index])
for idx in range(0, len(tscore)):
if idx != max_index:
tmp_iou = tmp_iou_list[idx]
tmp_width = tend[max_index] - tstart[max_index]
if tmp_iou > t1 + (t2 - t1) * tmp_width:
tscore[idx] = tscore[idx] * np.exp(-np.square(tmp_iou) /
alpha)
rstart.append(tstart[max_index])
rend.append(tend[max_index])
rscore.append(tscore[max_index])
tstart.pop(max_index)
tend.pop(max_index)
tscore.pop(max_index)
newDf = pd.DataFrame()
newDf['score'] = rscore
newDf['xmin'] = rstart
newDf['xmax'] = rend
return newDf
def video_post_process(opt, video_list, video_dict):
for video_name in video_list:
df = pd.read_csv("./output/BMN_results/" + video_name + ".csv")
if len(df) > 1:
snms_alpha = opt["soft_nms_alpha"] # 0.4
snms_t1 = opt["soft_nms_low_thres"] # 0.5
snms_t2 = opt["soft_nms_high_thres"] # 0.9
df = soft_nms(df, snms_alpha, snms_t1, snms_t2)
df = df.sort_values(by="score", ascending=False)
video_info = video_dict[video_name]
video_duration = float(video_info["duration_frame"] // 16 * 16) / video_info["duration_frame"] * video_info[
"duration_second"]
proposal_list = []
for j in range(min(100, len(df))):
tmp_proposal = {}
tmp_proposal["score"] = df.score.values[j]
tmp_proposal["segment"] = [max(0, df.xmin.values[j]) * video_duration,
min(1, df.xmax.values[j]) * video_duration]
proposal_list.append(tmp_proposal)
result_dict[video_name[2:]] = proposal_list
def BMN_post_processing(opt):
video_dict = getDatasetDict(opt)
video_list = list(video_dict.keys()) # [:100]
global result_dict
result_dict = mp.Manager().dict()
num_videos = len(video_list)
num_videos_per_thread = num_videos // opt["post_process_thread"]
processes = []
for tid in range(opt["post_process_thread"] - 1):
tmp_video_list = video_list[tid * num_videos_per_thread:(tid + 1) * num_videos_per_thread]
p = mp.Process(target=video_post_process, args=(opt, tmp_video_list, video_dict))
p.start()
processes.append(p)
tmp_video_list = video_list[(opt["post_process_thread"] - 1) * num_videos_per_thread:]
p = mp.Process(target=video_post_process, args=(opt, tmp_video_list, video_dict))
p.start()
processes.append(p)
for p in processes:
p.join()
result_dict = dict(result_dict)
output_dict = {"version": "VERSION 1.3", "results": result_dict, "external_data": {}}
outfile = open(opt["result_file"], "w")
json.dump(output_dict, outfile)
outfile.close()
# opt = opts.parse_opt()
# opt = vars(opt)
# BSN_post_processing(opt)
| 4,633 | 34.106061 | 116 | py |
SSTAP | SSTAP-main/main.py | import sys
from dataset import VideoDataSet, VideoDataSet_unlabel
from loss_function import bmn_loss_func, get_mask
import os
import json
import torch
import torch.nn.parallel
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import numpy as np
import opts
from ipdb import set_trace
from models import BMN, TemporalShift, TemporalShift_random
import pandas as pd
import random
from post_processing import BMN_post_processing
from eval import evaluation_proposal
from ipdb import set_trace
seed = 400
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed) # Numpy module.
random.seed(seed) # Python random module.
torch.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
os.environ["CUDA_VISIBLE_DEVICES"] = '0,1,2,3'
blue = lambda x: '\033[94m' + x + '\033[0m'
sys.dont_write_bytecode = True
global_step = 0
eval_loss = []
consistency_rampup = 5
consistency = 6 # 30 # 3 # None
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def softmax_mse_loss(input_logits, target_logits):
"""Takes softmax on both sides and returns MSE loss
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
# input_softmax = F.softmax(input_logits, dim=1)
# target_softmax = F.softmax(target_logits, dim=1)
# num_classes = input_logits.size()[1]
# return F.mse_loss(input_softmax, target_softmax, reduction='sum') / num_classes # size_average=False
return F.mse_loss(input_logits, target_logits, reduction='mean')
def softmax_kl_loss(input_logits, target_logits):
"""Takes softmax on both sides and returns KL divergence
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
# input_log_softmax = F.log_softmax(input_logits, dim=1)
# target_softmax = F.softmax(target_logits, dim=1)
# return F.kl_div(input_log_softmax, target_softmax, reduction='sum')
return F.kl_div(input_logits, target_logits, reduction='mean')
def Motion_MSEloss(output,clip_label,motion_mask=torch.ones(100).cuda()):
z = torch.pow((output-clip_label),2)
loss = torch.mean(motion_mask*z)
return loss
def sigmoid_rampup(current, rampup_length):
"""Exponential rampup from https://arxiv.org/abs/1610.02242"""
if rampup_length == 0:
return 1.0
else:
current = np.clip(current, 0.0, rampup_length)
phase = 1.0 - current / rampup_length
return float(np.exp(-5.0 * phase * phase))
def linear_rampup(current, rampup_length):
"""Linear rampup"""
assert current >= 0 and rampup_length >= 0
if current >= rampup_length:
return 1.0
else:
return current / rampup_length
def cosine_rampdown(current, rampdown_length):
"""Cosine rampdown from https://arxiv.org/abs/1608.03983"""
assert 0 <= current <= rampdown_length
return float(.5 * (np.cos(np.pi * current / rampdown_length) + 1))
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return consistency * sigmoid_rampup(epoch, consistency_rampup)
def train_BMN(data_loader, model, optimizer, epoch, bm_mask):
model.train()
epoch_pemreg_loss = 0
epoch_pemclr_loss = 0
epoch_tem_loss = 0
epoch_loss = 0
for n_iter, (input_data, label_confidence, label_start, label_end) in enumerate(data_loader):
input_data = input_data.cuda()
label_start = label_start.cuda()
label_end = label_end.cuda()
label_confidence = label_confidence.cuda()
confidence_map, start, end = model(input_data) # [B, 2, 100, 100], [B,100],[B,100]
loss = bmn_loss_func(confidence_map, start, end, label_confidence, label_start, label_end, bm_mask.cuda()) # loss = tem_loss + 10 * pem_reg_loss + pem_cls_loss
# return loss, tem_loss, pem_reg_loss, pem_cls_loss
optimizer.zero_grad()
loss[0].backward()
optimizer.step()
epoch_pemreg_loss += loss[2].cpu().detach().numpy()
epoch_pemclr_loss += loss[3].cpu().detach().numpy()
epoch_tem_loss += loss[1].cpu().detach().numpy()
epoch_loss += loss[0].cpu().detach().numpy()
print(
"BMN training loss(epoch %d): tem_loss: %.03f, pem class_loss: %.03f, pem reg_loss: %.03f, total_loss: %.03f" % (
epoch, epoch_tem_loss / (n_iter + 1),
epoch_pemclr_loss / (n_iter + 1),
epoch_pemreg_loss / (n_iter + 1),
epoch_loss / (n_iter + 1)))
def train_BMN_Semi(data_loader, train_loader_unlabel, model, model_ema, optimizer, epoch, bm_mask):
global global_step
model.train()
epoch_pemreg_loss = 0
epoch_pemclr_loss = 0
epoch_tem_loss = 0
epoch_loss = 0
consistency_loss_all = 0
consistency_loss_ema_all = 0
consistency_criterion = softmax_mse_loss # softmax_kl_loss
temporal_perb = TemporalShift_random(400, 64)
order_clip_criterion = nn.CrossEntropyLoss()
consistency = True
clip_order = True
dropout2d = True
temporal_re = True
unlabeled_train_iter = iter(train_loader_unlabel)
for n_iter, (input_data, label_confidence, label_start, label_end) in enumerate(data_loader):
input_data = input_data.cuda()
label_start = label_start.cuda()
label_end = label_end.cuda()
label_confidence = label_confidence.cuda()
input_data_student = temporal_perb(input_data)
if dropout2d:
input_data_student = F.dropout2d(input_data_student, 0.2)
else:
input_data_student = F.dropout(input_data_student, 0.2)
confidence_map, start, end = model(input_data_student) # [B, 2, 100, 100], [B,100],[B,100]
loss = bmn_loss_func(confidence_map, start, end, label_confidence, label_start, label_end, bm_mask.cuda())
confidence_map = confidence_map * bm_mask.cuda()
if temporal_re:
input_recons = F.dropout2d(input_data.permute(0,2,1), 0.2).permute(0,2,1)
else:
input_recons = F.dropout2d(input_data, 0.2)
recons_feature = model(input_recons, recons=True)
try:
input_data_unlabel= unlabeled_train_iter.next()
input_data_unlabel = input_data_unlabel.cuda()
except:
unlabeled_train_iter = iter(train_loader_unlabel)
input_data_unlabel = unlabeled_train_iter.next()
input_data_unlabel = input_data_unlabel.cuda()
input_data_unlabel_student = temporal_perb(input_data_unlabel)
if dropout2d:
input_data_unlabel_student = F.dropout2d(input_data_unlabel_student, 0.2)
else:
input_data_unlabel_student = F.dropout(input_data_unlabel_student, 0.2)
confidence_map_unlabel_student, start_unlabel_student, end_unlabel_student = model(input_data_unlabel_student)
confidence_map_unlabel_student = confidence_map_unlabel_student * bm_mask.cuda()
# label
input_data_label_student_flip = F.dropout2d(input_data.flip(2).contiguous(), 0.1)
confidence_map_label_student_flip, start_label_student_flip, end_label_student_flip = model(
input_data_label_student_flip)
confidence_map_label_student_flip = confidence_map_label_student_flip * bm_mask.cuda()
# unlabel
input_data_unlabel_student_flip = F.dropout2d(input_data_unlabel.flip(2).contiguous(), 0.1)
confidence_map_unlabel_student_flip, start_unlabel_student_flip, end_unlabel_student_flip = model(
input_data_unlabel_student_flip)
confidence_map_unlabel_student_flip = confidence_map_unlabel_student_flip * bm_mask.cuda()
if temporal_re:
recons_input_student = F.dropout2d(input_data_unlabel.permute(0,2,1), 0.2).permute(0,2,1)
else:
recons_input_student = F.dropout2d(input_data_unlabel, 0.2)
recons_feature_unlabel_student = model(recons_input_student, recons=True)
loss_recons = 0.0005 * (
Motion_MSEloss(recons_feature, input_data) + Motion_MSEloss(recons_feature_unlabel_student,
input_data_unlabel)) # 0.0001
with torch.no_grad():
# input_data_unlabel = input_data_unlabel.cuda()
input_data_ema = F.dropout(input_data, 0.05) # 0.3
confidence_map_teacher, start_teacher, end_teacher = model_ema(input_data_ema)
confidence_map_teacher = confidence_map_teacher * bm_mask.cuda()
input_data_unlabel_teacher = F.dropout(input_data_unlabel, 0.05) # 0.3
confidence_map_unlabel_teacher, start_unlabel_teacher, end_unlabel_teacher = model_ema(
input_data_unlabel_teacher)
confidence_map_unlabel_teacher = confidence_map_unlabel_teacher * bm_mask.cuda()
# flip (label)
out = torch.zeros_like(confidence_map_unlabel_teacher)
out_m = confidence_map_unlabel_teacher.flip(3).contiguous()
for i in range(100):
out[:, :, i, :100 - i] = out_m[:, :, i, i:]
confidence_map_unlabel_teacher_flip = out
# flip (unlabel)
out = torch.zeros_like(confidence_map_teacher)
out_m = confidence_map_teacher.flip(3).contiguous()
for i in range(100):
out[:, :, i, :100 - i] = out_m[:, :, i, i:]
confidence_map_label_teacher_flip = out
# start_unlabel_teacher_flip = start_unlabel_teacher.flip(1).contiguous()
# end_unlabel_teacher_flip = end_unlabel_teacher.flip(1).contiguous()
# add mask
start_unlabel_teacher[start_unlabel_teacher >= 0.9] = 1.0
start_unlabel_teacher[start_unlabel_teacher <= 0.1] = 0.0 # 2_add
end_unlabel_teacher[end_unlabel_teacher >= 0.9] = 1.0
end_unlabel_teacher[end_unlabel_teacher <= 0.1] = 0.0
# flip (label)
start_label_teacher_flip = start_teacher.flip(1).contiguous()
end_label_teacher_flip = end_teacher.flip(1).contiguous()
# flip (unlabel)
start_unlabel_teacher_flip = start_unlabel_teacher.flip(1).contiguous()
end_unlabel_teacher_flip = end_unlabel_teacher.flip(1).contiguous()
mask = torch.eq(
(start_unlabel_teacher.max(1)[0] > 0.6).float() + (end_unlabel_teacher.max(1)[0] > 0.6).float(), 2.)
confidence_map_unlabel_teacher = confidence_map_unlabel_teacher[mask]
start_unlabel_teacher = start_unlabel_teacher[mask]
end_unlabel_teacher = end_unlabel_teacher[mask]
# flip
confidence_map_unlabel_teacher_flip = confidence_map_unlabel_teacher_flip[mask]
start_unlabel_teacher_flip = start_unlabel_teacher_flip[mask]
end_unlabel_teacher_flip = end_unlabel_teacher_flip[mask]
# add mask
confidence_map_unlabel_student = confidence_map_unlabel_student[mask]
start_unlabel_student = start_unlabel_student[mask]
end_unlabel_student = end_unlabel_student[mask]
# flip add mask
confidence_map_unlabel_student_flip = confidence_map_unlabel_student_flip[mask]
start_unlabel_student_flip = start_unlabel_student_flip[mask]
end_unlabel_student_flip = end_unlabel_student_flip[mask]
if consistency:
consistency_weight = get_current_consistency_weight(epoch)
# meters.update('cons_weight', consistency_weight)
# set_trace()
consistency_loss = consistency_weight * (consistency_criterion(confidence_map, confidence_map_teacher) +
consistency_criterion(start, start_teacher) +
consistency_criterion(end, end_teacher))
consistency_loss_ema = consistency_weight * (
consistency_criterion(confidence_map_unlabel_teacher, confidence_map_unlabel_student) +
consistency_criterion(start_unlabel_teacher, start_unlabel_student) +
consistency_criterion(end_unlabel_teacher, end_unlabel_student))
# set_trace()
if torch.isnan(consistency_loss_ema):
consistency_loss_ema = torch.tensor(0.).cuda()
consistency_loss_ema_flip = 0.1 * consistency_weight * (
consistency_criterion(confidence_map_unlabel_teacher_flip, confidence_map_unlabel_student_flip) +
consistency_criterion(start_unlabel_teacher_flip, start_unlabel_student_flip) +
consistency_criterion(end_unlabel_teacher_flip, end_unlabel_student_flip)) + 0.1 * consistency_weight * (
consistency_criterion(confidence_map_label_teacher_flip, confidence_map_label_student_flip) +
consistency_criterion(start_label_teacher_flip, start_label_student_flip) +
consistency_criterion(end_label_teacher_flip, end_label_student_flip))
# meters.update('cons_loss', consistency_loss.item())
else:
consistency_loss = torch.tensor(0).cuda()
consistency_loss_ema = torch.tensor(0).cuda()
consistency_loss_ema_flip = torch.tensor(0).cuda()
# meters.update('cons_loss', 0)
if clip_order:
input_data_all = torch.cat([input_data, input_data_unlabel], 0)
batch_size, C, T = input_data_all.size()
idx = torch.randperm(batch_size)
input_data_all_new = input_data_all[idx]
forw_input = torch.cat(
[input_data_all_new[:batch_size // 2, :, T // 2:], input_data_all_new[:batch_size // 2, :, :T // 2]], 2)
back_input = input_data_all_new[batch_size // 2:, :, :]
input_all = torch.cat([forw_input, back_input], 0)
label_order = [0] * (batch_size // 2) + [1] * (batch_size - batch_size // 2)
label_order = torch.tensor(label_order).long().cuda()
out = model(input_all, clip_order=True)
loss_clip_order = order_clip_criterion(out, label_order)
loss_all = loss[0] + consistency_loss + consistency_loss_ema + loss_recons + 0.01 * loss_clip_order + consistency_loss_ema_flip
optimizer.zero_grad()
loss_all.backward()
optimizer.step()
global_step += 1
update_ema_variables(model, model_ema, 0.999, float(global_step/20)) # //5 //25
epoch_pemreg_loss += loss[2].cpu().detach().numpy()
epoch_pemclr_loss += loss[3].cpu().detach().numpy()
epoch_tem_loss += loss[1].cpu().detach().numpy()
epoch_loss += loss[0].cpu().detach().numpy()
consistency_loss_all += consistency_loss.cpu().detach().numpy()
consistency_loss_ema_all += consistency_loss_ema.cpu().detach().numpy()
if n_iter % 10 == 0:
print(
"training %d (epoch %d): tem_loss: %.03f, pem class_loss: %.03f, pem reg_loss: %.03f, consistency_loss: %.05f, consistency_loss_ema: %.05f, total_loss: %.03f" % (global_step,
epoch, epoch_tem_loss / (n_iter + 1),
epoch_pemclr_loss / (n_iter + 1),
epoch_pemreg_loss / (n_iter + 1),
consistency_loss_all / (n_iter + 1),
consistency_loss_ema_all / (n_iter + 1),
epoch_loss / (n_iter + 1)))
print(
blue("BMN training loss(epoch %d): tem_loss: %.03f, pem class_loss: %.03f, pem reg_loss: %.03f, total_loss: %.03f" % (
epoch, epoch_tem_loss / (n_iter + 1),
epoch_pemclr_loss / (n_iter + 1),
epoch_pemreg_loss / (n_iter + 1),
epoch_loss / (n_iter + 1))))
def train_BMN_Semi_Full(data_loader, model, model_ema, optimizer, epoch, bm_mask):
global global_step
model.train()
epoch_pemreg_loss = 0
epoch_pemclr_loss = 0
epoch_tem_loss = 0
epoch_loss = 0
consistency_loss_all = 0
consistency_loss_ema_all = 0
consistency_criterion = softmax_mse_loss # softmax_kl_loss
# perturbance = nn.dropout(0.3)
temporal_perb = TemporalShift_random(400, 64) # TemporalShift(400, 8) 16
order_clip_criterion = nn.CrossEntropyLoss()
consistency = True
clip_order = True
dropout2d = True
temporal_re = True
# unlabeled_train_iter = iter(train_loader_unlabel)
for n_iter, (input_data, label_confidence, label_start, label_end) in enumerate(data_loader):
input_data = input_data.cuda()
label_start = label_start.cuda()
label_end = label_end.cuda()
label_confidence = label_confidence.cuda()
input_data_student = temporal_perb(input_data)
if dropout2d:
input_data_student = F.dropout2d(input_data_student, 0.2)
else:
input_data_student = F.dropout(input_data_student, 0.2)
confidence_map, start, end = model(input_data_student) # [B, 2, 100, 100], [B,100],[B,100]
loss = bmn_loss_func(confidence_map, start, end, label_confidence, label_start, label_end, bm_mask.cuda())
confidence_map = confidence_map * bm_mask.cuda()
if temporal_re:
input_recons = F.dropout2d(input_data.permute(0, 2, 1), 0.2).permute(0, 2, 1)
else:
input_recons = F.dropout2d(input_data, 0.2)
recons_feature = model(input_recons, recons=True)
# try:
# input_data_unlabel= unlabeled_train_iter.next()
# input_data_unlabel = input_data_unlabel.cuda()
# except:
# unlabeled_train_iter = iter(train_loader_unlabel)
# input_data_unlabel = unlabeled_train_iter.next()
# input_data_unlabel = input_data_unlabel.cuda()
# input_data_unlabel = F.dropout2d(input_data_unlabel.cuda(), 0.2)
# input_data_unlabel_student = temporal_perb(input_data_unlabel)
# if dropout2d:
# input_data_unlabel_student = F.dropout2d(input_data_unlabel_student, 0.2)
# else:
# input_data_unlabel_student = F.dropout(input_data_unlabel_student, 0.2)
# confidence_map_unlabel_student, start_unlabel_student, end_unlabel_student = model(input_data_unlabel_student)
# confidence_map_unlabel_student = confidence_map_unlabel_student * bm_mask.cuda()
input_data_label_student_flip = F.dropout2d(input_data.flip(2).contiguous(), 0.1)
confidence_map_label_student_flip, start_label_student_flip, end_label_student_flip = model(
input_data_label_student_flip)
confidence_map_label_student_flip = confidence_map_label_student_flip * bm_mask.cuda()
# recons_input_student = F.dropout2d(input_data_unlabel.cuda(), 0.2)
# recons_feature_unlabel_student = model(recons_input_student, recons=True)
# set_trace()
loss_recons = 0.0005 * (
Motion_MSEloss(recons_feature, input_data)) # 0.0001
with torch.no_grad():
# input_data_unlabel = input_data_unlabel.cuda()
input_data_ema = F.dropout(input_data, 0.05) # 0.3
confidence_map_teacher, start_teacher, end_teacher = model_ema(input_data_ema)
confidence_map_teacher = confidence_map_teacher * bm_mask.cuda()
# input_data_unlabel_teacher = F.dropout(input_data_unlabel, 0.05) # 0.3
# confidence_map_unlabel_teacher, start_unlabel_teacher, end_unlabel_teacher = model_ema(
# input_data_unlabel_teacher)
# confidence_map_unlabel_teacher = confidence_map_unlabel_teacher * bm_mask.cuda()
# flip
out = torch.zeros_like(confidence_map_teacher)
out_m = confidence_map_teacher.flip(3).contiguous()
for i in range(100):
out[:, :, i, :100 - i] = out_m[:, :, i, i:]
confidence_map_label_teacher = out
# start_unlabel_teacher_flip = start_unlabel_teacher.flip(1).contiguous()
# end_unlabel_teacher_flip = end_unlabel_teacher.flip(1).contiguous()
# add mask
# start_label_teacher[start_label_teacher >= 0.9] = 1.0
# start_label_teacher[start_label_teacher <= 0.1] = 0.0 # 2_add
# end_unlabel_teacher[end_unlabel_teacher >= 0.9] = 1.0
# end_unlabel_teacher[end_unlabel_teacher <= 0.1] = 0.0
# flip
start_label_teacher_flip = label_start.flip(1).contiguous()
end_label_teacher_flip = label_end.flip(1).contiguous()
# mask = torch.eq(
# (start_unlabel_teacher.max(1)[0] > 0.6).float() + (end_unlabel_teacher.max(1)[0] > 0.6).float(), 2.)
# confidence_map_unlabel_teacher = confidence_map_unlabel_teacher[mask]
# start_unlabel_teacher = start_unlabel_teacher[mask]
# end_unlabel_teacher = end_unlabel_teacher[mask]
# flip
# confidence_map_unlabel_teacher_flip = confidence_map_unlabel_teacher_flip[mask]
# start_unlabel_teacher_flip = start_unlabel_teacher_flip[mask]
# end_unlabel_teacher_flip = end_unlabel_teacher_flip[mask]
# add mask
# confidence_map_unlabel_student = confidence_map_unlabel_student[mask]
# start_unlabel_student = start_unlabel_student[mask]
# end_unlabel_student = end_unlabel_student[mask]
# flip add mask
# confidence_map_unlabel_student_flip = confidence_map_label_student_flip[mask]
# start_unlabel_student_flip = start_label_student_flip[mask]
# end_unlabel_student_flip = end_label_student_flip[mask]
if consistency:
consistency_weight = get_current_consistency_weight(epoch)
# meters.update('cons_weight', consistency_weight)
# set_trace()
consistency_loss = consistency_weight * (consistency_criterion(confidence_map, confidence_map_teacher) +
consistency_criterion(start, start_teacher) +
consistency_criterion(end, end_teacher))
consistency_loss_ema_flip = 0.1 * consistency_weight * (
consistency_criterion(confidence_map_label_student_flip, confidence_map_label_teacher) +
consistency_criterion(start_label_student_flip, start_label_teacher_flip) +
consistency_criterion(end_label_student_flip, end_label_teacher_flip))
# consistency_loss_ema_flip = 0.1 * consistency_weight * (
# consistency_criterion(confidence_map_label_teacher, confidence_map_label_student_flip) +
# consistency_criterion(start_label_teacher_flip, start_label_student_flip) +
# consistency_criterion(end_label_teacher_flip, end_label_student_flip))
# meters.update('cons_loss', consistency_loss.item())
else:
consistency_loss = torch.tensor(0).cuda()
consistency_loss_ema = torch.tensor(0).cuda()
consistency_loss_ema_flip = torch.tensor(0).cuda()
# meters.update('cons_loss', 0)
if clip_order:
input_data_all = input_data # torch.cat([input_data, input_data_unlabel], 0)
batch_size, C, T = input_data_all.size()
idx = torch.randperm(batch_size)
input_data_all_new = input_data_all[idx]
forw_input = torch.cat(
[input_data_all_new[:batch_size // 2, :, T // 2:], input_data_all_new[:batch_size // 2, :, :T // 2]], 2)
back_input = input_data_all_new[batch_size // 2:, :, :]
input_all = torch.cat([forw_input, back_input], 0)
label_order = [0] * (batch_size // 2) + [1] * (batch_size - batch_size // 2)
label_order = torch.tensor(label_order).long().cuda()
out = model(input_all, clip_order=True)
loss_clip_order = order_clip_criterion(out, label_order)
loss_all = loss[0] + consistency_loss + loss_recons + 0.01 * loss_clip_order + consistency_loss_ema_flip
optimizer.zero_grad()
loss_all.backward()
optimizer.step()
global_step += 1
update_ema_variables(model, model_ema, 0.999, float(global_step/20)) # //5 //25
epoch_pemreg_loss += loss[2].cpu().detach().numpy()
epoch_pemclr_loss += loss[3].cpu().detach().numpy()
epoch_tem_loss += loss[1].cpu().detach().numpy()
epoch_loss += loss[0].cpu().detach().numpy()
consistency_loss_all += consistency_loss.cpu().detach().numpy()
# consistency_loss_ema_all += consistency_loss_ema.cpu().detach().numpy()
if n_iter % 10 == 0:
print(
"training %d (epoch %d): tem_loss: %.03f, pem class_loss: %.03f, pem reg_loss: %.03f, consistency_loss: %.05f, total_loss: %.03f" % (global_step,
epoch, epoch_tem_loss / (n_iter + 1),
epoch_pemclr_loss / (n_iter + 1),
epoch_pemreg_loss / (n_iter + 1),
consistency_loss_all / (n_iter + 1),
# consistency_loss_ema_all / (n_iter + 1),
epoch_loss / (n_iter + 1)))
print(
blue("BMN training loss(epoch %d): tem_loss: %.03f, pem class_loss: %.03f, pem reg_loss: %.03f, total_loss: %.03f" % (
epoch, epoch_tem_loss / (n_iter + 1),
epoch_pemclr_loss / (n_iter + 1),
epoch_pemreg_loss / (n_iter + 1),
epoch_loss / (n_iter + 1))))
def test_BMN(data_loader, model, epoch, bm_mask):
global eval_loss
model.eval()
best_loss = 1e10
epoch_pemreg_loss = 0
epoch_pemclr_loss = 0
epoch_tem_loss = 0
epoch_loss = 0
for n_iter, (input_data, label_confidence, label_start, label_end) in enumerate(data_loader):
input_data = input_data.cuda()
label_start = label_start.cuda()
label_end = label_end.cuda()
label_confidence = label_confidence.cuda()
confidence_map, start, end = model(input_data)
loss = bmn_loss_func(confidence_map, start, end, label_confidence, label_start, label_end, bm_mask.cuda())
epoch_pemreg_loss += loss[2].cpu().detach().numpy()
epoch_pemclr_loss += loss[3].cpu().detach().numpy()
epoch_tem_loss += loss[1].cpu().detach().numpy()
epoch_loss += loss[0].cpu().detach().numpy()
print(
blue("BMN val loss(epoch %d): tem_loss: %.03f, pem class_loss: %.03f, pem reg_loss: %.03f, total_loss: %.03f" % (
epoch, epoch_tem_loss / (n_iter + 1),
epoch_pemclr_loss / (n_iter + 1),
epoch_pemreg_loss / (n_iter + 1),
epoch_loss / (n_iter + 1))))
eval_loss.append(epoch_loss / (n_iter + 1))
state = {'epoch': epoch + 1,
'state_dict': model.state_dict()}
torch.save(state, opt["checkpoint_path"] + "/BMN_checkpoint.pth.tar") # ./checkpoint
if epoch_loss < model.module.tem_best_loss:
model.module.tem_best_loss = epoch_loss
torch.save(state, opt["checkpoint_path"] + "/BMN_best.pth.tar")
# eval_loss.append(epoch_loss / (n_iter + 1))
opt_file = open(opt["checkpoint_path"] + "/output_eval_loss.json", "w")
json.dump(eval_loss, opt_file)
opt_file.close()
def test_BMN_ema(data_loader, model, epoch, bm_mask):
model.eval()
best_loss = 1e10
epoch_pemreg_loss = 0
epoch_pemclr_loss = 0
epoch_tem_loss = 0
epoch_loss = 0
for n_iter, (input_data, label_confidence, label_start, label_end) in enumerate(data_loader):
input_data = input_data.cuda()
label_start = label_start.cuda()
label_end = label_end.cuda()
label_confidence = label_confidence.cuda()
confidence_map, start, end = model(input_data)
loss = bmn_loss_func(confidence_map, start, end, label_confidence, label_start, label_end, bm_mask.cuda())
epoch_pemreg_loss += loss[2].cpu().detach().numpy()
epoch_pemclr_loss += loss[3].cpu().detach().numpy()
epoch_tem_loss += loss[1].cpu().detach().numpy()
epoch_loss += loss[0].cpu().detach().numpy()
print(
blue("BMN val_ema loss(epoch %d): tem_loss: %.03f, pem class_loss: %.03f, pem reg_loss: %.03f, total_loss: %.03f" % (
epoch, epoch_tem_loss / (n_iter + 1),
epoch_pemclr_loss / (n_iter + 1),
epoch_pemreg_loss / (n_iter + 1),
epoch_loss / (n_iter + 1))))
state = {'epoch': epoch + 1,
'state_dict': model.state_dict()}
torch.save(state, opt["checkpoint_path"] + "/BMN_checkpoint_ema.pth.tar") # ./checkpoint
if epoch_loss < model.module.tem_best_loss:
model.module.tem_best_loss = epoch_loss
torch.save(state, opt["checkpoint_path"] + "/BMN_best_ema.pth.tar")
def BMN_Train(opt):
model = BMN(opt)
model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3]).cuda()
model_ema = BMN(opt)
model_ema = torch.nn.DataParallel(model_ema, device_ids=[0, 1, 2, 3]).cuda()
for param in model_ema.parameters():
param.detach_()
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=opt["training_lr"],
weight_decay=opt["weight_decay"]) # 1e-4
train_loader = torch.utils.data.DataLoader(VideoDataSet(opt, subset="train"), # [16,400,100]
batch_size=opt["batch_size"], shuffle=True, drop_last=True,
num_workers=8, pin_memory=True)
if opt['use_semi'] and opt['unlabel_percent'] > 0.:
train_loader_unlabel = torch.utils.data.DataLoader(VideoDataSet_unlabel(opt, subset="unlabel"), # [16,400,100]
batch_size=min(max(round(opt["batch_size"]*opt['unlabel_percent']/(4*(1.-opt['unlabel_percent'])))*4, 4), 24), shuffle=True,drop_last=True,
num_workers=8, pin_memory=True)
test_loader = torch.utils.data.DataLoader(VideoDataSet(opt, subset="validation"),
batch_size=opt["batch_size"], shuffle=False,
num_workers=8, pin_memory=True)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=opt["step_size"], gamma=opt["step_gamma"]) # 7 0.1
bm_mask = get_mask(opt["temporal_scale"])
use_semi = opt['use_semi']
print('use {} label for training!!!'.format(1-opt['unlabel_percent']))
print('training batchsize : {}'.format(opt["batch_size"]))
print('unlabel_training batchsize : {}'.format(min(max(round(opt["batch_size"]*opt['unlabel_percent']/(4*(1.-opt['unlabel_percent'])))*4, 4), 24)))
for epoch in range(opt["train_epochs"]): # 9
# scheduler.step()
if use_semi:
if opt['unlabel_percent'] == 0.:
print('use Semi !!! use all label !!!')
train_BMN_Semi_Full(train_loader, model, model_ema, optimizer, epoch, bm_mask)
test_BMN(test_loader, model, epoch, bm_mask)
test_BMN_ema(test_loader, model_ema, epoch, bm_mask)
else:
print('use Semi !!!')
train_BMN_Semi(train_loader, train_loader_unlabel, model, model_ema, optimizer, epoch, bm_mask)
test_BMN(test_loader, model, epoch, bm_mask)
test_BMN_ema(test_loader, model_ema, epoch, bm_mask)
else:
print('use Fewer label !!!')
train_BMN(train_loader, model, optimizer, epoch, bm_mask)
test_BMN(test_loader, model, epoch, bm_mask)
scheduler.step()
def BMN_inference(opt, eval_name):
model = BMN(opt)
model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3]).cuda()
model_checkpoint_dir = opt["checkpoint_path"] + eval_name # BMN_checkpoint.pth.tar BMN_best.pth.tar
checkpoint = torch.load(model_checkpoint_dir) # BMN_best.pth.tar
print('load :', model_checkpoint_dir, ' OK !')
model.load_state_dict(checkpoint['state_dict'])
model.eval()
test_loader = torch.utils.data.DataLoader(VideoDataSet(opt, subset="validation"),
batch_size=8, shuffle=False,
num_workers=8, pin_memory=True, drop_last=False)
tscale = opt["temporal_scale"]
with torch.no_grad():
for idx, input_data in test_loader:
# set_trace()
length = idx.shape[0]
# for ii in range(length):
video_name = []
for ii in range(length):
video_name_video = test_loader.dataset.video_list[idx[ii]]
video_name.append(video_name_video)
input_data = input_data.cuda()
confidence_map, start, end = model(input_data)
# set_trace()
for ii in range(length):
start_scores = start[ii].detach().cpu().numpy()
end_scores = end[ii].detach().cpu().numpy()
clr_confidence = (confidence_map[ii][1]).detach().cpu().numpy()
reg_confidence = (confidence_map[ii][0]).detach().cpu().numpy()
max_start = max(start_scores)
max_end = max(end_scores)
####################################################################################################
# generate the set of start points and end points
start_bins = np.zeros(len(start_scores))
start_bins[0] = 1 # [1,0,0...,0,1]
for idx in range(1, tscale - 1):
if start_scores[idx] > start_scores[idx + 1] and start_scores[idx] > start_scores[idx - 1]:
start_bins[idx] = 1
elif start_scores[idx] > (0.5 * max_start):
start_bins[idx] = 1
end_bins = np.zeros(len(end_scores))
end_bins[-1] = 1
for idx in range(1, tscale - 1):
if end_scores[idx] > end_scores[idx + 1] and end_scores[idx] > end_scores[idx - 1]:
end_bins[idx] = 1
elif end_scores[idx] > (0.5 * max_end):
end_bins[idx] = 1
########################################################################################################
#########################################################################
#
new_props = []
for idx in range(tscale):
for jdx in range(tscale):
start_index = jdx
end_index = start_index + idx+1
if end_index < tscale and start_bins[start_index] == 1 and end_bins[end_index] == 1:
xmin = start_index/tscale
xmax = end_index/tscale
xmin_score = start_scores[start_index]
xmax_score = end_scores[end_index]
clr_score = clr_confidence[idx, jdx]
reg_score = reg_confidence[idx, jdx]
score = xmin_score * xmax_score * clr_score*reg_score
new_props.append([xmin, xmax, xmin_score, xmax_score, clr_score, reg_score, score])
new_props = np.stack(new_props)
#########################################################################
col_name = ["xmin", "xmax", "xmin_score", "xmax_score", "clr_score", "reg_socre", "score"]
new_df = pd.DataFrame(new_props, columns=col_name)
new_df.to_csv("./output/BMN_results/" + video_name[ii] + ".csv", index=False)
def BMN_inference_ema(opt, eval_name):
model = BMN(opt)
model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3]).cuda()
model_checkpoint_dir = opt["checkpoint_path"] + eval_name # BMN_checkpoint.pth.tar BMN_best.pth.tar
checkpoint = torch.load(model_checkpoint_dir) # BMN_best.pth.tar
print('load :', model_checkpoint_dir, ' OK !')
model.load_state_dict(checkpoint['state_dict'])
model.eval()
test_loader = torch.utils.data.DataLoader(VideoDataSet(opt, subset="validation"),
batch_size=8, shuffle=False,
num_workers=8, pin_memory=True, drop_last=False)
tscale = opt["temporal_scale"]
with torch.no_grad():
for idx, input_data in test_loader:
# set_trace()
length = idx.shape[0]
# for ii in range(length):
video_name = []
for ii in range(length):
video_name_video = test_loader.dataset.video_list[idx[ii]]
video_name.append(video_name_video)
input_data = input_data.cuda()
confidence_map, start, end = model(input_data)
# set_trace()
for ii in range(length):
start_scores = start[ii].detach().cpu().numpy()
end_scores = end[ii].detach().cpu().numpy()
clr_confidence = (confidence_map[ii][1]).detach().cpu().numpy()
reg_confidence = (confidence_map[ii][0]).detach().cpu().numpy()
max_start = max(start_scores)
max_end = max(end_scores)
####################################################################################################
# generate the set of start points and end points
start_bins = np.zeros(len(start_scores))
start_bins[0] = 1 # [1,0,0...,0,1]
for idx in range(1, tscale - 1):
if start_scores[idx] > start_scores[idx + 1] and start_scores[idx] > start_scores[idx - 1]:
start_bins[idx] = 1
elif start_scores[idx] > (0.5 * max_start):
start_bins[idx] = 1
end_bins = np.zeros(len(end_scores))
end_bins[-1] = 1
for idx in range(1, tscale - 1):
if end_scores[idx] > end_scores[idx + 1] and end_scores[idx] > end_scores[idx - 1]:
end_bins[idx] = 1
elif end_scores[idx] > (0.5 * max_end):
end_bins[idx] = 1
########################################################################################################
#########################################################################
new_props = []
for idx in range(tscale):
for jdx in range(tscale):
start_index = jdx
end_index = start_index + idx+1
if end_index < tscale and start_bins[start_index] == 1 and end_bins[end_index] == 1:
xmin = start_index/tscale
xmax = end_index/tscale
xmin_score = start_scores[start_index]
xmax_score = end_scores[end_index]
clr_score = clr_confidence[idx, jdx]
reg_score = reg_confidence[idx, jdx]
score = xmin_score * xmax_score * clr_score*reg_score
new_props.append([xmin, xmax, xmin_score, xmax_score, clr_score, reg_score, score])
new_props = np.stack(new_props)
#########################################################################
col_name = ["xmin", "xmax", "xmin_score", "xmax_score", "clr_score", "reg_socre", "score"]
new_df = pd.DataFrame(new_props, columns=col_name)
new_df.to_csv("./output/BMN_results/" + video_name[ii] + ".csv", index=False)
def main(opt):
if opt["mode"] == "train":
BMN_Train(opt)
elif opt["mode"] == "inference":
if not os.path.exists("output/BMN_results"):
os.makedirs("output/BMN_results")
print('unlabel percent: ', opt['unlabel_percent'])
print('eval student model !!')
for eval_name in ['/BMN_checkpoint.pth.tar', '/BMN_best.pth.tar']:
BMN_inference(opt, eval_name)
print("Post processing start")
BMN_post_processing(opt)
print("Post processing finished")
evaluation_proposal(opt)
print('eval teacher model !!')
for eval_name in ['/BMN_checkpoint_ema.pth.tar', '/BMN_best_ema.pth.tar']:
BMN_inference_ema(opt, eval_name)
print("Post processing start")
BMN_post_processing(opt)
print("Post processing finished")
evaluation_proposal(opt)
if __name__ == '__main__':
opt = opts.parse_opt()
opt = vars(opt)
if not os.path.exists(opt["checkpoint_path"]):
os.makedirs(opt["checkpoint_path"])
if not os.path.exists('./output'):
os.makedirs('./output')
opt_file = open(opt["checkpoint_path"] + "/opts.json", "w")
json.dump(opt, opt_file)
opt_file.close()
main(opt)
| 42,436 | 48.173812 | 190 | py |
SSTAP | SSTAP-main/gen_unlabel_videos.py | import numpy as np
import pandas as pd
import json
import random
def load_json(file):
with open(file) as json_file:
json_data = json.load(json_file)
return json_data
anno_df = pd.read_csv("./data/activitynet_annotations/video_info_new.csv")
anno_database = load_json("./data/activitynet_annotations/anet_anno_action.json")
subset = 'training'
training_video = []
action_dict = {}
action_dict_num = {}
# get all training video names
for i in range(len(anno_df)):
video_name = anno_df.video.values[i]
video_info = anno_database[video_name]
video_subset = anno_df.subset.values[i]
if subset in video_subset:
training_video.append(video_name)
label = video_info["annotations"][0]['label']
if label not in action_dict:
action_dict[label] = [video_name]
else:
action_dict[label].append(video_name)
for label_name in action_dict:
action_dict_num[label_name] = len(action_dict[label_name])
opt_file = open("./data/activitynet_annotations/per_label_num.json", "w")
json.dump(action_dict_num, opt_file)
opt_file.close()
# unlabel percents
label_percent = np.linspace(0.1, 0.9, 9)
# unlabeled_video = []
for percent in label_percent:
unlabeled_video = []
new_props = []
for label_name in action_dict:
unlabeled_video.extend(random.sample(action_dict[label_name], round(percent*len(action_dict[label_name]))))
for i in range(len(anno_df)):
video_name = anno_df.video.values[i]
numFrame = anno_df.numFrame.values[i]
seconds = anno_df.seconds.values[i]
fps = anno_df.fps.values[i]
rfps = anno_df.rfps.values[i]
featureFrame = anno_df.featureFrame.values[i]
video_info = anno_database[video_name]
video_subset = anno_df.subset.values[i]
if video_name in unlabeled_video:
new_props.append([video_name, numFrame, seconds, fps, rfps, 'training_unlabel', featureFrame])
else:
new_props.append([video_name, numFrame, seconds, fps, rfps, video_subset, featureFrame])
new_props = np.stack(new_props)
col_name = ["video", "numFrame", "seconds", "fps", "rfps", "subset", "featureFrame"]
new_df = pd.DataFrame(new_props, columns=col_name)
new_df.to_csv("./data/activitynet_annotations/video_info_new_{}.csv".format(round(percent, 1)), index=False) | 2,370 | 36.046875 | 115 | py |
SSTAP | SSTAP-main/opts.py | import argparse
def parse_opt():
parser = argparse.ArgumentParser()
# Overall settings
parser.add_argument(
'--mode',
type=str,
default='train')
parser.add_argument(
'--checkpoint_path',
type=str,
default='./checkpoint')
parser.add_argument(
'--use_semi',
type=bool,
default=True)
parser.add_argument(
'--training_lr',
type=float,
default=0.001)
parser.add_argument(
'--unlabel_percent',
type=float,
default=0.5) # 0.5
parser.add_argument(
'--weight_decay',
type=float,
default=1e-4)
parser.add_argument(
'--train_epochs',
type=int,
default=10)
parser.add_argument(
'--batch_size',
type=int,
default=16) # 16
parser.add_argument(
'--step_size',
type=int,
default=7)
parser.add_argument(
'--step_gamma',
type=float,
default=0.1)
# Overall Dataset settings
parser.add_argument(
'--video_info',
type=str,
default="./data/activitynet_annotations/video_info_new.csv")
parser.add_argument(
'--video_anno',
type=str,
default="./data/activitynet_annotations/anet_anno_action.json")
parser.add_argument(
'--temporal_scale',
type=int,
default=100)
parser.add_argument(
'--feature_path',
type=str,
default="../BSN/data/activitynet_feature_cuhk/")
parser.add_argument(
'--num_sample',
type=int,
default=32)
parser.add_argument(
'--num_sample_perbin',
type=int,
default=3)
parser.add_argument(
'--prop_boundary_ratio',
type=int,
default=0.5)
parser.add_argument(
'--feat_dim',
type=int,
default=400)
# Post processing
parser.add_argument(
'--post_process_thread',
type=int,
default=8)
parser.add_argument(
'--soft_nms_alpha',
type=float,
default=0.4)
parser.add_argument(
'--soft_nms_low_thres',
type=float,
default=0.5)
parser.add_argument(
'--soft_nms_high_thres',
type=float,
default=0.9)
parser.add_argument(
'--result_file',
type=str,
default="./output/result_proposal.json")
parser.add_argument(
'--save_fig_path',
type=str,
default="./output/evaluation_result.jpg")
args = parser.parse_args()
return args
| 2,615 | 21.747826 | 71 | py |
SSTAP | SSTAP-main/utils.py | import numpy as np
def ioa_with_anchors(anchors_min, anchors_max, box_min, box_max):
# calculate the overlap proportion between the anchor and all bbox for supervise signal,
# the length of the anchor is 0.01
len_anchors = anchors_max - anchors_min
int_xmin = np.maximum(anchors_min, box_min)
int_xmax = np.minimum(anchors_max, box_max)
inter_len = np.maximum(int_xmax - int_xmin, 0.)
scores = np.divide(inter_len, len_anchors)
return scores
def iou_with_anchors(anchors_min, anchors_max, box_min, box_max):
"""Compute jaccard score between a box and the anchors.
"""
len_anchors = anchors_max - anchors_min
int_xmin = np.maximum(anchors_min, box_min)
int_xmax = np.minimum(anchors_max, box_max)
inter_len = np.maximum(int_xmax - int_xmin, 0.)
union_len = len_anchors - inter_len + box_max - box_min
# print inter_len,union_len
jaccard = np.divide(inter_len, union_len)
return jaccard | 960 | 37.44 | 92 | py |
SSTAP | SSTAP-main/dataset.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import json
import torch.utils.data as data
import torch
from utils import ioa_with_anchors, iou_with_anchors
from ipdb import set_trace
def load_json(file):
with open(file) as json_file:
json_data = json.load(json_file)
return json_data
class VideoDataSet(data.Dataset):
def __init__(self, opt, subset="train"):
self.temporal_scale = opt["temporal_scale"] # 100
self.temporal_gap = 1. / self.temporal_scale
self.subset = subset
self.mode = opt["mode"]
self.feature_path = opt["feature_path"]
self.video_info_path = "./data/activitynet_annotations/video_info_new_{}.csv".format(opt['unlabel_percent'])
self.video_anno_path = opt["video_anno"]
self._getDatasetDict()
self._get_match_map()
# set_trace()
def _getDatasetDict(self):
anno_df = pd.read_csv(self.video_info_path)
anno_database = load_json(self.video_anno_path)
self.video_dict = {}
for i in range(len(anno_df)):
video_name = anno_df.video.values[i]
video_info = anno_database[video_name]
video_subset = anno_df.subset.values[i]
if self.subset in video_subset:
if 'unlabel' not in video_subset:
self.video_dict[video_name] = video_info
self.video_list = list(self.video_dict.keys())
print("%s subset video numbers: %d" % (self.subset, len(self.video_list)))
def __getitem__(self, index):
video_data = self._load_file(index)
if self.mode == "train":
match_score_start, match_score_end, confidence_score = self._get_train_label(index, self.anchor_xmin,
self.anchor_xmax)
return video_data,confidence_score, match_score_start, match_score_end # [400,100],[100,100],[100]
else:
return index, video_data
def _get_match_map(self):
match_map = []
for idx in range(self.temporal_scale):
tmp_match_window = []
xmin = self.temporal_gap * idx
for jdx in range(1, self.temporal_scale + 1):
xmax = xmin + self.temporal_gap * jdx
tmp_match_window.append([xmin, xmax])
match_map.append(tmp_match_window)
match_map = np.array(match_map) # 100x100x2
match_map = np.transpose(match_map, [1, 0, 2]) # [0,1] [1,2] [2,3].....[99,100]
match_map = np.reshape(match_map, [-1, 2]) # [0,2] [1,3] [2,4].....[99,101] # duration x start
self.match_map = match_map # duration is same in row, start is same in col [10000,2]
self.anchor_xmin = [self.temporal_gap * (i-0.5) for i in range(self.temporal_scale)] # [-0.5/100,0.5/100,...98.5/100]
self.anchor_xmax = [self.temporal_gap * (i+0.5) for i in range(1, self.temporal_scale + 1)] # [1.5/100,...,100.5/100]
def _load_file(self, index):
video_name = self.video_list[index]
video_df = pd.read_csv(self.feature_path + "csv_mean_" + str(self.temporal_scale) + "/" + video_name + ".csv")
video_data = video_df.values[:, :]
video_data = torch.Tensor(video_data)
video_data = torch.transpose(video_data, 0, 1)
video_data.float()
return video_data
def _get_train_label(self, index, anchor_xmin, anchor_xmax):
video_name = self.video_list[index] # video_name
video_info = self.video_dict[video_name]
video_frame = video_info['duration_frame']
video_second = video_info['duration_second']
feature_frame = video_info['feature_frame']
corrected_second = float(feature_frame) / video_frame * video_second # there are some frames not used
video_labels = video_info['annotations'] # the measurement is second, not frame
##############################################################################################
# change the measurement from second to percentage
gt_bbox = []
gt_iou_map = []
for j in range(len(video_labels)):
tmp_info = video_labels[j]
tmp_start = max(min(1, tmp_info['segment'][0] / corrected_second), 0)
tmp_end = max(min(1, tmp_info['segment'][1] / corrected_second), 0)
gt_bbox.append([tmp_start, tmp_end]) # gt_bbox [0~1]
tmp_gt_iou_map = iou_with_anchors(
self.match_map[:, 0], self.match_map[:, 1], tmp_start, tmp_end) # [100*100]
tmp_gt_iou_map = np.reshape(tmp_gt_iou_map,
[self.temporal_scale, self.temporal_scale])
gt_iou_map.append(tmp_gt_iou_map)
gt_iou_map = np.array(gt_iou_map) # gt [100*100]
gt_iou_map = np.max(gt_iou_map, axis=0)
gt_iou_map = torch.Tensor(gt_iou_map) # [100,100]
##############################################################################################
####################################################################################################
# generate R_s and R_e
gt_bbox = np.array(gt_bbox) # gt [start,end]
gt_xmins = gt_bbox[:, 0]
gt_xmaxs = gt_bbox[:, 1]
gt_lens = gt_xmaxs - gt_xmins
gt_len_small = 3 * self.temporal_gap # np.maximum(self.temporal_gap, self.boundary_ratio * gt_lens)
gt_start_bboxs = np.stack((gt_xmins - gt_len_small / 2, gt_xmins + gt_len_small / 2), axis=1)
gt_end_bboxs = np.stack((gt_xmaxs - gt_len_small / 2, gt_xmaxs + gt_len_small / 2), axis=1)
#####################################################################################################
##########################################################################################################
# calculate the ioa for all timestamp
match_score_start = []
for jdx in range(len(anchor_xmin)):
match_score_start.append(np.max(
ioa_with_anchors(anchor_xmin[jdx], anchor_xmax[jdx], gt_start_bboxs[:, 0], gt_start_bboxs[:, 1])))
match_score_end = []
for jdx in range(len(anchor_xmin)):
match_score_end.append(np.max(
ioa_with_anchors(anchor_xmin[jdx], anchor_xmax[jdx], gt_end_bboxs[:, 0], gt_end_bboxs[:, 1])))
match_score_start = torch.Tensor(match_score_start)
match_score_end = torch.Tensor(match_score_end)
############################################################################################################
return match_score_start, match_score_end, gt_iou_map
def __len__(self):
return len(self.video_list)
class VideoDataSet_unlabel(data.Dataset):
def __init__(self, opt, subset="unlabel"):
self.temporal_scale = opt["temporal_scale"] # 100
self.temporal_gap = 1. / self.temporal_scale
self.subset = subset
self.mode = opt["mode"]
self.feature_path = opt["feature_path"]
self.video_info_path = "./data/activitynet_annotations/video_info_new_{}.csv".format(opt['unlabel_percent'])
self.video_anno_path = opt["video_anno"]
self._getDatasetDict()
self.unlabel_percent = opt['unlabel_percent']
self._get_match_map()
def _getDatasetDict(self):
anno_df = pd.read_csv(self.video_info_path)
anno_database = load_json(self.video_anno_path)
self.video_dict = {}
for i in range(len(anno_df)):
video_name = anno_df.video.values[i]
video_info = anno_database[video_name]
video_subset = anno_df.subset.values[i]
if self.subset in video_subset:
self.video_dict[video_name] = 'unseen'
self.video_list = list(self.video_dict.keys())
print("%s unlabeled subset video numbers: %d" % (self.subset, len(self.video_list)))
def __getitem__(self, index):
video_data = self._load_file(index)
if self.mode == "train":
# match_score_start, match_score_end, confidence_score = self._get_train_label(index, self.anchor_xmin,
# self.anchor_xmax)
return video_data # ,confidence_score, match_score_start, match_score_end # [400,100],[100,100],[100]
else:
return index, video_data
def _get_match_map(self):
match_map = []
for idx in range(self.temporal_scale):
tmp_match_window = []
xmin = self.temporal_gap * idx
for jdx in range(1, self.temporal_scale + 1):
xmax = xmin + self.temporal_gap * jdx
tmp_match_window.append([xmin, xmax])
match_map.append(tmp_match_window)
match_map = np.array(match_map) # 100x100x2
match_map = np.transpose(match_map, [1, 0, 2]) # [0,1] [1,2] [2,3].....[99,100]
match_map = np.reshape(match_map, [-1, 2]) # [0,2] [1,3] [2,4].....[99,101] # duration x start
self.match_map = match_map # duration is same in row, start is same in col [10000,2]
self.anchor_xmin = [self.temporal_gap * (i-0.5) for i in range(self.temporal_scale)] # [-0.5/100,0.5/100,...98.5/100]
self.anchor_xmax = [self.temporal_gap * (i+0.5) for i in range(1, self.temporal_scale + 1)] # [1.5/100,...,100.5/100]
def _load_file(self, index):
video_name = self.video_list[index]
video_df = pd.read_csv(self.feature_path + "csv_mean_" + str(self.temporal_scale) + "/" + video_name + ".csv")
video_data = video_df.values[:, :]
video_data = torch.Tensor(video_data)
video_data = torch.transpose(video_data, 0, 1)
video_data.float()
return video_data
def _get_train_label(self, index, anchor_xmin, anchor_xmax):
video_name = self.video_list[index] # video_name
video_info = self.video_dict[video_name]
video_frame = video_info['duration_frame']
video_second = video_info['duration_second']
feature_frame = video_info['feature_frame']
corrected_second = float(feature_frame) / video_frame * video_second # there are some frames not used
video_labels = video_info['annotations'] # the measurement is second, not frame
##############################################################################################
# change the measurement from second to percentage
gt_bbox = []
gt_iou_map = []
for j in range(len(video_labels)):
tmp_info = video_labels[j]
tmp_start = max(min(1, tmp_info['segment'][0] / corrected_second), 0)
tmp_end = max(min(1, tmp_info['segment'][1] / corrected_second), 0)
gt_bbox.append([tmp_start, tmp_end]) # gt_bbox [0~1]
tmp_gt_iou_map = iou_with_anchors(
self.match_map[:, 0], self.match_map[:, 1], tmp_start, tmp_end) # [100*100]
tmp_gt_iou_map = np.reshape(tmp_gt_iou_map,
[self.temporal_scale, self.temporal_scale])
gt_iou_map.append(tmp_gt_iou_map)
gt_iou_map = np.array(gt_iou_map) # gt个[100*100]
gt_iou_map = np.max(gt_iou_map, axis=0)
gt_iou_map = torch.Tensor(gt_iou_map) # [100,100]
##############################################################################################
####################################################################################################
# generate R_s and R_e
gt_bbox = np.array(gt_bbox) # gt个[start,end]
gt_xmins = gt_bbox[:, 0]
gt_xmaxs = gt_bbox[:, 1]
gt_lens = gt_xmaxs - gt_xmins
gt_len_small = 3 * self.temporal_gap # np.maximum(self.temporal_gap, self.boundary_ratio * gt_lens)
gt_start_bboxs = np.stack((gt_xmins - gt_len_small / 2, gt_xmins + gt_len_small / 2), axis=1)
gt_end_bboxs = np.stack((gt_xmaxs - gt_len_small / 2, gt_xmaxs + gt_len_small / 2), axis=1)
#####################################################################################################
##########################################################################################################
# calculate the ioa for all timestamp
match_score_start = []
for jdx in range(len(anchor_xmin)):
match_score_start.append(np.max(
ioa_with_anchors(anchor_xmin[jdx], anchor_xmax[jdx], gt_start_bboxs[:, 0], gt_start_bboxs[:, 1])))
match_score_end = []
for jdx in range(len(anchor_xmin)):
match_score_end.append(np.max(
ioa_with_anchors(anchor_xmin[jdx], anchor_xmax[jdx], gt_end_bboxs[:, 0], gt_end_bboxs[:, 1])))
match_score_start = torch.Tensor(match_score_start)
match_score_end = torch.Tensor(match_score_end)
############################################################################################################
return match_score_start, match_score_end, gt_iou_map
def __len__(self):
return len(self.video_list)
if __name__ == '__main__':
import opts
opt = opts.parse_opt()
opt = vars(opt)
train_loader = torch.utils.data.DataLoader(VideoDataSet(opt, subset="train"),
batch_size=opt["batch_size"], shuffle=True,
num_workers=8, pin_memory=True)
for aaa,bbb,ccc,ddd in train_loader: # len(train_loader)=604
set_trace()
print(aaa.shape,bbb.shape,ccc.shape,ddd.shape) # torch.Size([16, 400, 100]) torch.Size([16, 100, 100]) torch.Size([16, 100]) torch.Size([16, 100])
# set_trace()
break
| 14,230 | 51.707407 | 155 | py |
SSTAP | SSTAP-main/loss_function.py | # -*- coding: utf-8 -*-
import torch
import numpy as np
import torch.nn.functional as F
def get_mask(tscale):
bm_mask = []
for idx in range(tscale):
mask_vector = [1 for i in range(tscale - idx)
] + [0 for i in range(idx)]
bm_mask.append(mask_vector)
bm_mask = np.array(bm_mask, dtype=np.float32)
return torch.Tensor(bm_mask)
''' [1, 1, 1, 1, 1]
[1, 1, 1, 1, 0]
[1, 1, 1, 0, 0]
[1, 1, 0, 0, 0]
[1, 0, 0, 0, 0]'''
def bmn_loss_func(pred_bm, pred_start, pred_end, gt_iou_map, gt_start, gt_end, bm_mask):
pred_bm_reg = pred_bm[:, 0].contiguous()
pred_bm_cls = pred_bm[:, 1].contiguous()
gt_iou_map = gt_iou_map * bm_mask # [b,100,100]*[100,100] ->[B,100,100]
pem_reg_loss = pem_reg_loss_func(pred_bm_reg, gt_iou_map, bm_mask)
pem_cls_loss = pem_cls_loss_func(pred_bm_cls, gt_iou_map, bm_mask)
tem_loss = tem_loss_func(pred_start, pred_end, gt_start, gt_end)
loss = tem_loss + 10 * pem_reg_loss + pem_cls_loss
return loss, tem_loss, pem_reg_loss, pem_cls_loss
def tem_loss_func(pred_start, pred_end, gt_start, gt_end):
def bi_loss(pred_score, gt_label):
pred_score = pred_score.view(-1)
gt_label = gt_label.view(-1)
pmask = (gt_label > 0.5).float()
num_entries = len(pmask)
num_positive = torch.sum(pmask)
ratio = num_entries / num_positive
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
epsilon = 0.000001
loss_pos = coef_1 * torch.log(pred_score + epsilon) * pmask
loss_neg = coef_0 * torch.log(1.0 - pred_score + epsilon)*(1.0 - pmask)
loss = -1 * torch.mean(loss_pos + loss_neg)
return loss
loss_start = bi_loss(pred_start, gt_start)
loss_end = bi_loss(pred_end, gt_end)
loss = loss_start + loss_end
return loss
def pem_reg_loss_func(pred_score, gt_iou_map, mask):
u_hmask = (gt_iou_map > 0.7).float()
u_mmask = ((gt_iou_map <= 0.7) & (gt_iou_map > 0.3)).float()
u_lmask = ((gt_iou_map <= 0.3) & (gt_iou_map > 0.)).float()
u_lmask = u_lmask * mask
num_h = torch.sum(u_hmask)
num_m = torch.sum(u_mmask)
num_l = torch.sum(u_lmask)
r_m = num_h / num_m
u_smmask = torch.Tensor(np.random.rand(*gt_iou_map.shape)).cuda()
u_smmask = u_mmask * u_smmask
u_smmask = (u_smmask > (1. - r_m)).float()
r_l = num_h / num_l
u_slmask = torch.Tensor(np.random.rand(*gt_iou_map.shape)).cuda()
u_slmask = u_lmask * u_slmask
u_slmask = (u_slmask > (1. - r_l)).float()
weights = u_hmask + u_smmask + u_slmask
loss = F.mse_loss(pred_score* weights, gt_iou_map* weights)
loss = 0.5 * torch.sum(loss*torch.ones(*weights.shape).cuda()) / torch.sum(weights)
return loss
def pem_cls_loss_func(pred_score, gt_iou_map, mask):
pmask = (gt_iou_map > 0.9).float()
nmask = (gt_iou_map <= 0.9).float()
nmask = nmask * mask
num_positive = torch.sum(pmask)
num_entries = num_positive + torch.sum(nmask)
ratio = num_entries / num_positive
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
epsilon = 0.000001
loss_pos = coef_1 * torch.log(pred_score + epsilon) * pmask
loss_neg = coef_0 * torch.log(1.0 - pred_score + epsilon) * nmask
loss = -1 * torch.sum(loss_pos + loss_neg) / num_entries
return loss
| 3,482 | 32.171429 | 90 | py |
SSTAP | SSTAP-main/eval.py | # -*- coding: utf-8 -*-
import sys
import warnings
warnings.filterwarnings('ignore')
sys.path.append('./Evaluation')
from eval_proposal import ANETproposal
import matplotlib.pyplot as plt
import numpy as np
def run_evaluation(ground_truth_filename, proposal_filename,
max_avg_nr_proposals=100,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
subset='validation'):
anet_proposal = ANETproposal(ground_truth_filename, proposal_filename,
tiou_thresholds=tiou_thresholds,
max_avg_nr_proposals=max_avg_nr_proposals,
subset=subset, verbose=True, check_status=False)
anet_proposal.evaluate()
recall = anet_proposal.recall
average_recall = anet_proposal.avg_recall
average_nr_proposals = anet_proposal.proposals_per_video
return (average_nr_proposals, average_recall, recall)
def plot_metric(opt,average_nr_proposals, average_recall, recall, tiou_thresholds=np.linspace(0.5, 0.95, 10)):
fn_size = 14
plt.figure(num=None, figsize=(12, 8))
ax = plt.subplot(1,1,1)
colors = ['k', 'r', 'yellow', 'b', 'c', 'm', 'b', 'pink', 'lawngreen', 'indigo']
area_under_curve = np.zeros_like(tiou_thresholds)
for i in range(recall.shape[0]):
area_under_curve[i] = np.trapz(recall[i], average_nr_proposals)
for idx, tiou in enumerate(tiou_thresholds[::2]):
ax.plot(average_nr_proposals, recall[2*idx,:], color=colors[idx+1],
label="tiou=[" + str(tiou) + "], area=" + str(int(area_under_curve[2*idx]*100)/100.),
linewidth=4, linestyle='--', marker=None)
# Plots Average Recall vs Average number of proposals.
ax.plot(average_nr_proposals, average_recall, color=colors[0],
label="tiou = 0.5:0.05:0.95," + " area=" + str(int(np.trapz(average_recall, average_nr_proposals)*100)/100.),
linewidth=4, linestyle='-', marker=None)
handles, labels = ax.get_legend_handles_labels()
ax.legend([handles[-1]] + handles[:-1], [labels[-1]] + labels[:-1], loc='best')
plt.ylabel('Average Recall', fontsize=fn_size)
plt.xlabel('Average Number of Proposals per Video', fontsize=fn_size)
plt.grid(b=True, which="both")
plt.ylim([0, 1.0])
plt.setp(plt.axes().get_xticklabels(), fontsize=fn_size)
plt.setp(plt.axes().get_yticklabels(), fontsize=fn_size)
#plt.show()
plt.savefig(opt["save_fig_path"])
def evaluation_proposal(opt):
uniform_average_nr_proposals_valid, uniform_average_recall_valid, uniform_recall_valid = run_evaluation(
"./Evaluation/data/activity_net_1_3_new.json", # filter_activity_net_1_3_new.json
opt["result_file"],
max_avg_nr_proposals=100,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
subset='validation')
plot_metric(opt,uniform_average_nr_proposals_valid, uniform_average_recall_valid, uniform_recall_valid)
print( "AR@1 is \t",np.mean(uniform_recall_valid[:,0]))
print( "AR@5 is \t",np.mean(uniform_recall_valid[:,4]))
print( "AR@10 is \t",np.mean(uniform_recall_valid[:,9]))
print( "AR@100 is \t",np.mean(uniform_recall_valid[:,-1])) | 3,247 | 44.111111 | 122 | py |
SSTAP | SSTAP-main/models.py | # -*- coding: utf-8 -*-
import math
import numpy as np
import torch
import torch.nn as nn
from ipdb import set_trace
import random
import torch.nn.functional as F
class TemporalShift(nn.Module):
def __init__(self, n_segment=3, n_div=8, inplace=False):
super(TemporalShift, self).__init__()
# self.net = net
self.n_segment = n_segment
self.fold_div = n_div
self.inplace = inplace
self.channels_range = list(range(400)) # feature_channels
if inplace:
print('=> Using in-place shift...')
# print('=> Using fold div: {}'.format(self.fold_div))
def forward(self, x):
# self.fold_div = n_div
x = self.shift(x, self.n_segment, fold_div=self.fold_div, inplace=self.inplace, channels_range =self.channels_range)
return x
@staticmethod
def shift(x, n_segment, fold_div=8, inplace=False, channels_range=[1,2]):
x = x.permute(0, 2, 1) # [B,C,T] --> [B, T, C]
# set_trace()
n_batch, T, c = x.size()
# nt, c, h, w = x.size()
# n_batch = nt // n_segment
# x = x.view(n_batch, n_segment, c, h, w)
# x = x.view(n_batch, T, c, h, w)
fold = c // 2*fold_div
# all = random.sample(channels_range, fold*2)
# forward = sorted(all[:fold])
# backward = sorted(all[fold:])
# fixed = list(set(channels_range) - set(all))
# fold = c // fold_div
if inplace:
# Due to some out of order error when performing parallel computing.
# May need to write a CUDA kernel.
raise NotImplementedError
# out = InplaceShift.apply(x, fold)
else:
out = torch.zeros_like(x)
out[:, :-1, :fold] = x[:, 1:, :fold] # shift left
out[:, 1:, fold: 2 * fold] = x[:, :-1, fold: 2 * fold] # shift right
out[:, :, 2 * fold:200] = x[:, :, 2 * fold:200] # not shift
out[:, :-1, 200:200+fold] = x[:, 1:, 200:200+fold] # shift left
out[:, 1:, 200+fold: 200+2 * fold] = x[:, :-1, 200+fold: 200+2 * fold] # shift right
out[:, :, 200+2 * fold:] = x[:, :, 200 + 2 * fold:] # not shift
# out = torch.zeros_like(x)
# out[:, :-1, forward] = x[:, 1:, forward] # shift left
# out[:, 1:, backward] = x[:, :-1, backward] # shift right
# out[:, :, fixed] = x[:, :, fixed] # not shift
# return out.view(nt, c, h, w)
return out.permute(0, 2, 1)
class TemporalShift_random(nn.Module):
def __init__(self, n_segment=3, n_div=8, inplace=False):
super(TemporalShift_random, self).__init__()
# self.net = net
self.n_segment = n_segment
self.fold_div = n_div
self.inplace = inplace
self.channels_range = list(range(400)) # feature_channels
if inplace:
print('=> Using in-place shift...')
# print('=> Using fold div: {}'.format(self.fold_div))
def forward(self, x):
# self.fold_div = n_div
x = self.shift(x, self.n_segment, fold_div=self.fold_div, inplace=self.inplace, channels_range =self.channels_range)
return x
@staticmethod
def shift(x, n_segment, fold_div=8, inplace=False, channels_range=[1,2]):
x = x.permute(0, 2, 1) # [B,C,T] --> [B, T, C]
# set_trace()
n_batch, T, c = x.size()
# nt, c, h, w = x.size()
# n_batch = nt // n_segment
# x = x.view(n_batch, n_segment, c, h, w)
# x = x.view(n_batch, T, c, h, w)
fold = c // fold_div
all = random.sample(channels_range, fold*2)
forward = sorted(all[:fold])
backward = sorted(all[fold:])
fixed = list(set(channels_range) - set(all))
# fold = c // fold_div
if inplace:
# Due to some out of order error when performing parallel computing.
# May need to write a CUDA kernel.
raise NotImplementedError
# out = InplaceShift.apply(x, fold)
else:
# out = torch.zeros_like(x)
# out[:, :-1, :fold] = x[:, 1:, :fold] # shift left
# out[:, 1:, fold: 2 * fold] = x[:, :-1, fold: 2 * fold] # shift right
# out[:, :, 2 * fold:] = x[:, :, 2 * fold:] # not shift
out = torch.zeros_like(x)
out[:, :-1, forward] = x[:, 1:, forward] # shift left
out[:, 1:, backward] = x[:, :-1, backward] # shift right
out[:, :, fixed] = x[:, :, fixed] # not shift
# return out.view(nt, c, h, w)
return out.permute(0, 2, 1)
class InplaceShift(torch.autograd.Function):
# Special thanks to @raoyongming for the help to this function
@staticmethod
def forward(ctx, input, fold):
# not support higher order gradient
# input = input.detach_()
ctx.fold_ = fold
n, t, c, h, w = input.size()
buffer = input.data.new(n, t, fold, h, w).zero_()
buffer[:, :-1] = input.data[:, 1:, :fold]
input.data[:, :, :fold] = buffer
buffer.zero_()
buffer[:, 1:] = input.data[:, :-1, fold: 2 * fold]
input.data[:, :, fold: 2 * fold] = buffer
return input
@staticmethod
def backward(ctx, grad_output):
# grad_output = grad_output.detach_()
fold = ctx.fold_
n, t, c, h, w = grad_output.size()
buffer = grad_output.data.new(n, t, fold, h, w).zero_()
buffer[:, 1:] = grad_output.data[:, :-1, :fold]
grad_output.data[:, :, :fold] = buffer
buffer.zero_()
buffer[:, :-1] = grad_output.data[:, 1:, fold: 2 * fold]
grad_output.data[:, :, fold: 2 * fold] = buffer
return grad_output, None
class BMN(nn.Module):
def __init__(self, opt):
super(BMN, self).__init__()
self.tscale = opt["temporal_scale"] # 100
self.prop_boundary_ratio = opt["prop_boundary_ratio"] # 0.5
self.num_sample = opt["num_sample"] # 32
self.num_sample_perbin = opt["num_sample_perbin"] # 3
self.feat_dim=opt["feat_dim"] # 400
self.tem_best_loss = 10000000
self.hidden_dim_1d = 256
self.hidden_dim_2d = 128
self.hidden_dim_3d = 512
self._get_interp1d_mask()
# Base Module
self.x_1d_b = nn.Sequential(
nn.Conv1d(self.feat_dim, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4),
nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4), # 256
nn.ReLU(inplace=True)
)
self.recons = nn.Sequential(
nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4),
nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, self.feat_dim, kernel_size=3, padding=1, groups=4), # 256
# nn.ReLU(inplace=True)
)
self.clip_order = nn.Sequential(
# nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4),
# nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=3, padding=1), # 256
nn.ReLU(inplace=True)
)
self.clip_order_drop = nn.Dropout(0.5)
self.clip_order_linear = nn.Linear(100, 2)
# Temporal Evaluation Module
self.x_1d_s = nn.Sequential(
nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4),
nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=1),
nn.Sigmoid()
)
self.x_1d_e = nn.Sequential(
nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4),
nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=1),
nn.Sigmoid()
)
# Proposal Evaluation Module
self.x_1d_p = nn.Sequential(
nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1),
nn.ReLU(inplace=True)
)
self.x_3d_p = nn.Sequential(
nn.Conv3d(self.hidden_dim_1d, self.hidden_dim_3d, kernel_size=(self.num_sample, 1, 1), stride=(self.num_sample, 1, 1)), # 512
nn.ReLU(inplace=True)
)
self.x_2d_p = nn.Sequential(
nn.Conv2d(self.hidden_dim_3d, self.hidden_dim_2d, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv2d(self.hidden_dim_2d, self.hidden_dim_2d, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(self.hidden_dim_2d, self.hidden_dim_2d, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(self.hidden_dim_2d, 2, kernel_size=1),
nn.Sigmoid()
)
def forward(self, x, recons=False, clip_order=False): # [B,400,100]
base_feature = self.x_1d_b(x) # [B,256,100]
recons_feature = self.recons(base_feature)
if recons:
return recons_feature
batch_size, C, T = base_feature.size()
if clip_order:
return self.clip_order_linear(self.clip_order_drop(self.clip_order(base_feature).view(batch_size, T)))
start = self.x_1d_s(base_feature).squeeze(1) # [B,1,100]->[B,100] sigmoid()
end = self.x_1d_e(base_feature).squeeze(1)
confidence_map = self.x_1d_p(base_feature) # [B,256,100]———>[B,256,100]+relu()
confidence_map = self._boundary_matching_layer(confidence_map) # [B, 256, 32, 100, 100]
# set_trace()
confidence_map = self.x_3d_p(confidence_map).squeeze(2)
confidence_map = self.x_2d_p(confidence_map) # [B, 2, 100, 100]
return confidence_map, start, end # [B, 2, 100, 100], [B,100],[B,100]
def _boundary_matching_layer(self, x):
input_size = x.size() # [B,256,100]
out = torch.matmul(x, self.sample_mask).reshape(input_size[0],input_size[1],self.num_sample,self.tscale,self.tscale)
return out # sample_mask= [100, 320000]
def _get_interp1d_bin_mask(self, seg_xmin, seg_xmax, tscale, num_sample, num_sample_perbin):
# generate sample mask for a boundary-matching pair
plen = float(seg_xmax - seg_xmin) # during
plen_sample = plen / (num_sample * num_sample_perbin - 1.0)
total_samples = [
seg_xmin + plen_sample * ii
for ii in range(num_sample * num_sample_perbin)
] # num_sample * num_sample_perbin
p_mask = []
for idx in range(num_sample): # 32
bin_samples = total_samples[idx * num_sample_perbin:(idx + 1) * num_sample_perbin]
bin_vector = np.zeros([tscale])
for sample in bin_samples:
sample_upper = math.ceil(sample)
sample_decimal, sample_down = math.modf(sample)
if int(sample_down) <= (tscale - 1) and int(sample_down) >= 0:
bin_vector[int(sample_down)] += 1 - sample_decimal # down
if int(sample_upper) <= (tscale - 1) and int(sample_upper) >= 0:
bin_vector[int(sample_upper)] += sample_decimal # upper
bin_vector = 1.0 / num_sample_perbin * bin_vector
p_mask.append(bin_vector)
p_mask = np.stack(p_mask, axis=1) # 100*32
return p_mask
def _get_interp1d_mask(self):
# generate sample mask for each point in Boundary-Matching Map
mask_mat = []
for start_index in range(self.tscale): # 100
mask_mat_vector = []
for duration_index in range(self.tscale): # 100
if start_index + duration_index < self.tscale: #
p_xmin = start_index # start
p_xmax = start_index + duration_index # end
center_len = float(p_xmax - p_xmin) + 1 # during
sample_xmin = p_xmin - center_len * self.prop_boundary_ratio # sample_start
sample_xmax = p_xmax + center_len * self.prop_boundary_ratio # sample_end
p_mask = self._get_interp1d_bin_mask(
sample_xmin, sample_xmax, self.tscale, self.num_sample, # 32
self.num_sample_perbin)
else:
p_mask = np.zeros([self.tscale, self.num_sample]) # [100,32]
mask_mat_vector.append(p_mask) #
mask_mat_vector = np.stack(mask_mat_vector, axis=2) # [100,32,100]
mask_mat.append(mask_mat_vector)
mask_mat = np.stack(mask_mat, axis=3) # [100,32,100,100]
mask_mat = mask_mat.astype(np.float32)
self.sample_mask = nn.Parameter(torch.Tensor(mask_mat).view(self.tscale, -1), requires_grad=False) # [100,32*100*100]
if __name__ == '__main__':
import opts
opt = opts.parse_opt()
opt = vars(opt)
model=BMN(opt).cuda()
input=torch.randn(2,400,100).cuda()
a,b,c=model(input)
print(a.shape,b.shape,c.shape)
| 13,366 | 43.115512 | 138 | py |
SSTAP | SSTAP-main/data/activitynet_feature_cuhk/data_process.py | # -*- coding: utf-8 -*-
import random
import numpy as np
import scipy
import pandas as pd
import pandas
import numpy
import json
def resizeFeature(inputData,newSize):
# inputX: (temporal_length,feature_dimension) #
originalSize=len(inputData)
#print originalSize
if originalSize==1:
inputData=np.reshape(inputData,[-1])
return np.stack([inputData]*newSize)
x=numpy.array(range(originalSize))
f=scipy.interpolate.interp1d(x,inputData,axis=0)
x_new=[i*float(originalSize-1)/(newSize-1) for i in range(newSize)]
y_new=f(x_new)
return y_new
def readData(video_name,data_type=["spatial","temporal"]):
spatial_dir="./spatial/csv_action/"
temporal_dir="./temporal/csv_action/"
data=[]
for dtype in data_type:
if dtype=="spatial":
df=pandas.read_csv(spatial_dir+video_name+".csv")
elif dtype=="temporal":
df=pandas.read_csv(temporal_dir+video_name+".csv")
data.append(df.values[:,:])
lens=[len(d) for d in data]
#print lens
min_len=min(lens)
new_data=[d[:min_len] for d in data]
new_data=numpy.concatenate(new_data,axis=1)
return new_data
def load_json(file):
with open(file) as json_file:
data = json.load(json_file)
return data
def getDatasetDict():
df=pd.read_csv("./info/video_info.csv")
json_data= load_json("./info/activity_net.v1-3.min.json")
database=json_data['database']
out_dict={}
for i in range(len(df)):
video_name=df.video.values[i]
video_info=database[video_name[2:]]
video_new_info={}
video_new_info['duration_frame']=df.numFrame.values[i]
video_new_info['duration_second']=df.seconds.values[i]
video_new_info['annotations']=video_info['annotations']
out_dict[video_name]=video_new_info
return out_dict
def poolData(data,videoAnno,num_prop=100,num_bin=1,num_sample_bin=3,pool_type="mean"):
feature_frame=len(data)*16
video_frame=videoAnno['duration_frame']
video_second=videoAnno['duration_second']
corrected_second=float(feature_frame)/video_frame*video_second
fps=float(video_frame)/video_second
st=16/fps
if len(data)==1:
video_feature=np.stack([data]*num_prop)
video_feature=np.reshape(video_feature,[num_prop,400])
return video_feature
x=[st/2+ii*st for ii in range(len(data))]
f=scipy.interpolate.interp1d(x,data,axis=0)
video_feature=[]
zero_sample=np.zeros(num_bin*400)
tmp_anchor_xmin=[1.0/num_prop*i for i in range(num_prop)]
tmp_anchor_xmax=[1.0/num_prop*i for i in range(1,num_prop+1)]
num_sample=num_bin*num_sample_bin
for idx in range(num_prop):
xmin=max(x[0]+0.0001,tmp_anchor_xmin[idx]*corrected_second)
xmax=min(x[-1]-0.0001,tmp_anchor_xmax[idx]*corrected_second)
if xmax<x[0]:
#print "fuck"
video_feature.append(zero_sample)
continue
if xmin>x[-1]:
video_feature.append(zero_sample)
continue
plen=(xmax-xmin)/(num_sample-1)
x_new=[xmin+plen*ii for ii in range(num_sample)]
y_new=f(x_new)
y_new_pool=[]
for b in range(num_bin):
tmp_y_new=y_new[num_sample_bin*b:num_sample_bin*(b+1)]
if pool_type=="mean":
tmp_y_new=np.mean(y_new,axis=0)
elif pool_type=="max":
tmp_y_new=np.max(y_new,axis=0)
y_new_pool.append(tmp_y_new)
y_new_pool=np.stack(y_new_pool)
y_new_pool=np.reshape(y_new_pool,[-1])
video_feature.append(y_new_pool)
video_feature=np.stack(video_feature)
return video_feature
videoDict=getDatasetDict()
videoNameList=videoDict.keys()
random.shuffle(videoNameList)
col_names=[]
for i in range(400):
col_names.append("f"+str(i))
for videoName in videoNameList:
videoAnno=videoDict[videoName]
data=readData(videoName)
numFrame=videoAnno['duration_frame']
featureFrame=len(data)*16
videoAnno["feature_frame"]=featureFrame
videoDict[videoName]=videoAnno
print numFrame,featureFrame
videoFeature_mean=poolData(data,videoAnno,num_prop=100,num_bin=1,num_sample_bin=3,pool_type="mean")
outDf=pd.DataFrame(videoFeature_mean,columns=col_names)
outDf.to_csv("./csv_mean_100/"+videoName+".csv",index=False)
outfile=open("./anet_anno_anet.json","w")
json.dump(videoDict,outfile)
outfile.close()
| 4,484 | 31.737226 | 103 | py |
SSTAP | SSTAP-main/data/activitynet_feature_cuhk/ldb_process.py | # -*- coding: utf-8 -*-
"""
Created on Mon May 15 22:31:31 2017
@author: wzmsltw
"""
import caffe
import leveldb
import numpy as np
from caffe.proto import caffe_pb2
import pandas as pd
col_names=[]
for i in range(200):
col_names.append("f"+str(i))
df=pd.read_table("./input_spatial_list.txt",names=['image','frame','label'],sep=" ")
db = leveldb.LevelDB('./LDB')
datum = caffe_pb2.Datum()
i=0
video_name="init"
videoData=np.reshape([],[-1,200])
for key, value in db.RangeIter():
tmp_video_name=df.image.values[i].split('/')[-1]
if tmp_video_name !=video_name:
outDf=pd.DataFrame(videoData,columns=col_names)
outDf.to_csv("./csv_raw/"+video_name+".csv",index=False)
videoData=np.reshape([],[-1,200])
video_name=tmp_video_name
i+=1
datum.ParseFromString(value)
label = datum.label
data = caffe.io.datum_to_array(datum)
data=np.reshape(data,[1,200])
videoData=np.concatenate((videoData,data))
del db
| 983 | 21.883721 | 84 | py |
SSTAP | SSTAP-main/Evaluation/eval_proposal.py | import json
import numpy as np
import pandas as pd
def interpolated_prec_rec(prec, rec):
"""Interpolated AP - VOCdevkit from VOC 2011.
"""
mprec = np.hstack([[0], prec, [0]])
mrec = np.hstack([[0], rec, [1]])
for i in range(len(mprec) - 1)[::-1]:
mprec[i] = max(mprec[i], mprec[i + 1])
idx = np.where(mrec[1::] != mrec[0:-1])[0] + 1
ap = np.sum((mrec[idx] - mrec[idx - 1]) * mprec[idx])
return ap
def segment_iou(target_segment, candidate_segments):
"""Compute the temporal intersection over union between a
target segment and all the test segments.
Parameters
----------
target_segment : 1d array
Temporal target segment containing [starting, ending] times.
candidate_segments : 2d array
Temporal candidate segments containing N x [starting, ending] times.
Outputs
-------
tiou : 1d array
Temporal intersection over union score of the N's candidate segments.
"""
tt1 = np.maximum(target_segment[0], candidate_segments[:, 0])
tt2 = np.minimum(target_segment[1], candidate_segments[:, 1])
# Intersection including Non-negative overlap score.
segments_intersection = (tt2 - tt1).clip(0)
# Segment union.
segments_union = (candidate_segments[:, 1] - candidate_segments[:, 0]) \
+ (target_segment[1] - target_segment[0]) - segments_intersection
# Compute overlap as the ratio of the intersection
# over union of two segments.
tIoU = segments_intersection.astype(float) / segments_union
return tIoU
def wrapper_segment_iou(target_segments, candidate_segments):
"""Compute intersection over union btw segments
Parameters
----------
target_segments : ndarray
2-dim array in format [m x 2:=[init, end]]
candidate_segments : ndarray
2-dim array in format [n x 2:=[init, end]]
Outputs
-------
tiou : ndarray
2-dim array [n x m] with IOU ratio.
Note: It assumes that candidate-segments are more scarce that target-segments
"""
if candidate_segments.ndim != 2 or target_segments.ndim != 2:
raise ValueError('Dimension of arguments is incorrect')
n, m = candidate_segments.shape[0], target_segments.shape[0]
tiou = np.empty((n, m))
for i in range(m):
tiou[:, i] = segment_iou(target_segments[i,:], candidate_segments)
return tiou
class ANETproposal(object):
GROUND_TRUTH_FIELDS = ['database', 'taxonomy', 'version']
PROPOSAL_FIELDS = ['results', 'version', 'external_data']
def __init__(self, ground_truth_filename=None, proposal_filename=None,
ground_truth_fields=GROUND_TRUTH_FIELDS,
proposal_fields=PROPOSAL_FIELDS,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
max_avg_nr_proposals=None,
subset='validation', verbose=False,
check_status=False):
if not ground_truth_filename:
raise IOError('Please input a valid ground truth file.')
if not proposal_filename:
raise IOError('Please input a valid proposal file.')
self.subset = subset
self.tiou_thresholds = tiou_thresholds
self.max_avg_nr_proposals = max_avg_nr_proposals
self.verbose = verbose
self.gt_fields = ground_truth_fields
self.pred_fields = proposal_fields
self.recall = None
self.avg_recall = None
self.proposals_per_video = None
self.check_status = check_status
# Retrieve blocked videos from server.
if self.check_status:
self.blocked_videos = get_blocked_videos()
else:
self.blocked_videos = list()
# Import ground truth and proposals.
self.ground_truth, self.activity_index = self._import_ground_truth(
ground_truth_filename)
self.proposal = self._import_proposal(proposal_filename)
if self.verbose:
print ('[INIT] Loaded annotations from {} subset.'.format(subset))
nr_gt = len(self.ground_truth)
print ('\tNumber of ground truth instances: {}'.format(nr_gt))
nr_pred = len(self.proposal)
print ('\tNumber of proposals: {}'.format(nr_pred))
print ('\tFixed threshold for tiou score: {}'.format(self.tiou_thresholds))
def _import_ground_truth(self, ground_truth_filename):
"""Reads ground truth file, checks if it is well formatted, and returns
the ground truth instances and the activity classes.
Parameters
----------
ground_truth_filename : str
Full path to the ground truth json file.
Outputs
-------
ground_truth : df
Data frame containing the ground truth instances.
activity_index : dict
Dictionary containing class index.
"""
with open(ground_truth_filename, 'r') as fobj:
data = json.load(fobj)
# Checking format
if not all([field in data.keys() for field in self.gt_fields]):
raise IOError('Please input a valid ground truth file.')
# Read ground truth data.
activity_index, cidx = {}, 0
video_lst, t_start_lst, t_end_lst, label_lst = [], [], [], []
for videoid, v in data['database'].items():
if self.subset != v['subset']:
continue
if videoid in self.blocked_videos:
continue
for ann in v['annotations']:
if ann['label'] not in activity_index:
activity_index[ann['label']] = cidx
cidx += 1
video_lst.append(videoid)
t_start_lst.append(ann['segment'][0])
t_end_lst.append(ann['segment'][1])
label_lst.append(activity_index[ann['label']])
ground_truth = pd.DataFrame({'video-id': video_lst,
't-start': t_start_lst,
't-end': t_end_lst,
'label': label_lst})
return ground_truth, activity_index
def _import_proposal(self, proposal_filename):
"""Reads proposal file, checks if it is well formatted, and returns
the proposal instances.
Parameters
----------
proposal_filename : str
Full path to the proposal json file.
Outputs
-------
proposal : df
Data frame containing the proposal instances.
"""
with open(proposal_filename, 'r') as fobj:
data = json.load(fobj)
# Checking format...
if not all([field in data.keys() for field in self.pred_fields]):
raise IOError('Please input a valid proposal file.')
# Read predictions.
video_lst, t_start_lst, t_end_lst = [], [], []
score_lst = []
for videoid, v in data['results'].items():
if videoid in self.blocked_videos:
continue
for result in v:
video_lst.append(videoid)
t_start_lst.append(result['segment'][0])
t_end_lst.append(result['segment'][1])
score_lst.append(result['score'])
proposal = pd.DataFrame({'video-id': video_lst,
't-start': t_start_lst,
't-end': t_end_lst,
'score': score_lst})
return proposal
def evaluate(self):
"""Evaluates a proposal file. To measure the performance of a
method for the proposal task, we computes the area under the
average recall vs average number of proposals per video curve.
"""
recall, avg_recall, proposals_per_video = average_recall_vs_avg_nr_proposals(
self.ground_truth, self.proposal,
max_avg_nr_proposals=self.max_avg_nr_proposals,
tiou_thresholds=self.tiou_thresholds)
area_under_curve = np.trapz(avg_recall, proposals_per_video)
if self.verbose:
print('[RESULTS] Performance on ActivityNet proposal task.')
print('\tArea Under the AR vs AN curve: {}%'.format(100.*float(area_under_curve)/proposals_per_video[-1]))
self.recall = recall
self.avg_recall = avg_recall
self.proposals_per_video = proposals_per_video
def average_recall_vs_avg_nr_proposals(ground_truth, proposals,
max_avg_nr_proposals=None,
tiou_thresholds=np.linspace(0.5, 0.95, 10)):
""" Computes the average recall given an average number
of proposals per video.
Parameters
----------
ground_truth : df
Data frame containing the ground truth instances.
Required fields: ['video-id', 't-start', 't-end']
proposal : df
Data frame containing the proposal instances.
Required fields: ['video-id, 't-start', 't-end', 'score']
tiou_thresholds : 1darray, optional
array with tiou thresholds.
Outputs
-------
recall : 2darray
recall[i,j] is recall at ith tiou threshold at the jth average number of average number of proposals per video.
average_recall : 1darray
recall averaged over a list of tiou threshold. This is equivalent to recall.mean(axis=0).
proposals_per_video : 1darray
average number of proposals per video.
"""
# Get list of videos.
video_lst = ground_truth['video-id'].unique()
if not max_avg_nr_proposals:
max_avg_nr_proposals = float(proposals.shape[0])/video_lst.shape[0]
ratio = max_avg_nr_proposals*float(video_lst.shape[0])/proposals.shape[0]
# Adaptation to query faster
ground_truth_gbvn = ground_truth.groupby('video-id')
proposals_gbvn = proposals.groupby('video-id')
# For each video, computes tiou scores among the retrieved proposals.
score_lst = []
total_nr_proposals = 0
for videoid in video_lst:
# Get proposals for this video.
# try:
proposals_videoid = proposals_gbvn.get_group(videoid)
# except:
# continue
this_video_proposals = proposals_videoid.loc[:, ['t-start', 't-end']].values
# Sort proposals by score.
sort_idx = proposals_videoid['score'].argsort()[::-1]
this_video_proposals = this_video_proposals[sort_idx, :]
# Get ground-truth instances associated to this video.
ground_truth_videoid = ground_truth_gbvn.get_group(videoid)
this_video_ground_truth = ground_truth_videoid.loc[:,['t-start', 't-end']].values
if this_video_proposals.shape[0] == 0:
n = this_video_ground_truth.shape[0]
score_lst.append(np.zeros((n, 1)))
continue
if this_video_proposals.ndim != 2:
this_video_proposals = np.expand_dims(this_video_proposals, axis=0)
if this_video_ground_truth.ndim != 2:
this_video_ground_truth = np.expand_dims(this_video_ground_truth, axis=0)
nr_proposals = np.minimum(int(this_video_proposals.shape[0] * ratio), this_video_proposals.shape[0])
total_nr_proposals += nr_proposals
this_video_proposals = this_video_proposals[:nr_proposals, :]
# Compute tiou scores.
tiou = wrapper_segment_iou(this_video_proposals, this_video_ground_truth)
score_lst.append(tiou)
# Given that the length of the videos is really varied, we
# compute the number of proposals in terms of a ratio of the total
# proposals retrieved, i.e. average recall at a percentage of proposals
# retrieved per video.
# Computes average recall.
pcn_lst = np.arange(1, 101) / 100.0 *(max_avg_nr_proposals*float(video_lst.shape[0])/total_nr_proposals)
matches = np.empty((video_lst.shape[0], pcn_lst.shape[0]))
positives = np.empty(video_lst.shape[0])
recall = np.empty((tiou_thresholds.shape[0], pcn_lst.shape[0]))
# Iterates over each tiou threshold.
for ridx, tiou in enumerate(tiou_thresholds):
# Inspect positives retrieved per video at different
# number of proposals (percentage of the total retrieved).
for i, score in enumerate(score_lst):
# Total positives per video.
positives[i] = score.shape[0]
# Find proposals that satisfies minimum tiou threshold.
true_positives_tiou = score >= tiou
# Get number of proposals as a percentage of total retrieved.
pcn_proposals = np.minimum((score.shape[1] * pcn_lst).astype(np.int), score.shape[1])
for j, nr_proposals in enumerate(pcn_proposals):
# Compute the number of matches for each percentage of the proposals
matches[i, j] = np.count_nonzero((true_positives_tiou[:, :nr_proposals]).sum(axis=1))
# Computes recall given the set of matches per video.
recall[ridx, :] = matches.sum(axis=0) / positives.sum()
# Recall is averaged.
avg_recall = recall.mean(axis=0)
# Get the average number of proposals per video.
proposals_per_video = pcn_lst * (float(total_nr_proposals) / video_lst.shape[0])
return recall, avg_recall, proposals_per_video
| 13,318 | 38.877246 | 119 | py |
SSTAP | SSTAP-main/Evaluation/utils.py | import json
import urllib2
import numpy as np
API = 'http://ec2-52-11-11-89.us-west-2.compute.amazonaws.com/challenge16/api.py'
def get_blocked_videos(api=API):
api_url = '{}?action=get_blocked'.format(api)
req = urllib2.Request(api_url)
response = urllib2.urlopen(req)
return json.loads(response.read())
def interpolated_prec_rec(prec, rec):
"""Interpolated AP - VOCdevkit from VOC 2011.
"""
mprec = np.hstack([[0], prec, [0]])
mrec = np.hstack([[0], rec, [1]])
for i in range(len(mprec) - 1)[::-1]:
mprec[i] = max(mprec[i], mprec[i + 1])
idx = np.where(mrec[1::] != mrec[0:-1])[0] + 1
ap = np.sum((mrec[idx] - mrec[idx - 1]) * mprec[idx])
return ap
def segment_iou(target_segment, candidate_segments):
"""Compute the temporal intersection over union between a
target segment and all the test segments.
Parameters
----------
target_segment : 1d array
Temporal target segment containing [starting, ending] times.
candidate_segments : 2d array
Temporal candidate segments containing N x [starting, ending] times.
Outputs
-------
tiou : 1d array
Temporal intersection over union score of the N's candidate segments.
"""
tt1 = np.maximum(target_segment[0], candidate_segments[:, 0])
tt2 = np.minimum(target_segment[1], candidate_segments[:, 1])
# Intersection including Non-negative overlap score.
segments_intersection = (tt2 - tt1).clip(0)
# Segment union.
segments_union = (candidate_segments[:, 1] - candidate_segments[:, 0]) \
+ (target_segment[1] - target_segment[0]) - segments_intersection
# Compute overlap as the ratio of the intersection
# over union of two segments.
tIoU = segments_intersection.astype(float) / segments_union
return tIoU
def wrapper_segment_iou(target_segments, candidate_segments):
"""Compute intersection over union btw segments
Parameters
----------
target_segments : ndarray
2-dim array in format [m x 2:=[init, end]]
candidate_segments : ndarray
2-dim array in format [n x 2:=[init, end]]
Outputs
-------
tiou : ndarray
2-dim array [n x m] with IOU ratio.
Note: It assumes that candidate-segments are more scarce that target-segments
"""
if candidate_segments.ndim != 2 or target_segments.ndim != 2:
raise ValueError('Dimension of arguments is incorrect')
n, m = candidate_segments.shape[0], target_segments.shape[0]
tiou = np.empty((n, m))
for i in xrange(m):
tiou[:, i] = segment_iou(target_segments[i,:], candidate_segments)
return tiou
| 2,648 | 33.855263 | 81 | py |
xSLHA | xSLHA-master/setup.py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="xslha",
version="1.0.2",
author="Florian Staub",
author_email="[email protected]",
description="A python package to read (big/many) SLHA files",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/fstaub/xSLHA",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS"
],
)
| 710 | 28.625 | 65 | py |
xSLHA | xSLHA-master/xslha/main.py | import subprocess
import os
from six import string_types
# SLHA parser
# by Florian Staub ([email protected])
# ----------------------------------------------------------
# SLHA Class
# ----------------------------------------------------------
class SLHA():
def __init__(self):
self.blocks = {}
self.br = {}
self.widths = {}
self.br1L = {}
self.widths1L = {}
self.xsections = {}
self.block_name = None
self.entries = {}
self.reading_block = False
self.reading_decay = False
self.reading_xsection = False
self.reading_hb_fermion = False
self.reading_hb_boson = False
self.decay1L = False
self.decay_part = 0
# return wdith and BR
def BR(self, init, final):
# frozenset: make sure that the final states are order-less
return self.br[init][tuple(sorted(final))]
def Width(self, pdg):
return self.widths[pdg]
def Value(self, block, number):
'''return value of a parameter defined by block and entry
or the width or an BR'''
if block == 'WIDTH':
return self.widths[number]
elif block == 'BR':
return self.br[number[0]][tuple(sorted(number[1]))]
elif block == 'WIDTH1L':
return self.widths1L[number]
elif block == 'BR1L':
return self.br1L[number[0]][tuple(sorted(number[1]))]
elif block == 'XSECTION':
xs = self.xsections[tuple(number)]
return [[x, xs[x]] for x in xs.keys()]
else:
return self.blocks[block.upper()][
str(number)[1:-1].replace(" ", "")]
def start_decay(self, li):
parsed = list(filter(None, li.split(' ')))
self.decay1L = li.upper().startswith("DECAY1L")
self.decay_part = int(parsed[1])
if self.decay1L:
self.widths1L[self.decay_part] = float(parsed[2])
else:
self.widths[self.decay_part] = float(parsed[2])
self.entries = {}
self.reading_block, self.reading_decay, self.reading_xsection \
= False, True, False
def start_block(self, li):
self.block_name = (list(filter(None, li.split(' ')))[1]).upper()
self.entries = {}
self.reading_block, self.reading_decay, self.reading_xsection \
= True, False, False
self.reading_hb_boson = \
self.block_name in ["HIGGSBOUNDSINPUTHIGGSCOUPLINGSBOSONS",
"HIGGSCOUPLINGSBOSONS"]
self.reading_hb_fermion = \
self.block_name in ["HIGGSBOUNDSINPUTHIGGSCOUPLINGSFERMIONS",
"HIGGSCOUPLINGSFERMIONS"]
def start_xsection(self, li):
parsed = list(filter(None, li.split(' ')))
if "#" in parsed:
parsed = parsed[:parsed.index("#")] # remove comments
self.xs_head = tuple(
[float(parsed[1]),
tuple([int(parsed[2]), int(parsed[3])]),
tuple([int(parsed[-2]), int(parsed[-1])])
])
self.entries = {}
self.reading_block, self.reading_decay, self.reading_xsection \
= False, False, True
def flush(self):
'''store the information once a block is completely parsed'''
if len(self.entries) > 0:
if self.reading_block:
self.blocks[self.block_name] = self.entries
if self.reading_decay:
if self.decay1L:
self.br1L[self.decay_part] = self.entries
else:
self.br[self.decay_part] = self.entries
if self.reading_xsection:
self.xsections[self.xs_head] = self.entries
# ----------------------------------------------------------
# Reading
# ----------------------------------------------------------
# now the main function to read the SLHA file
def read(file, separator=None, verbose=False):
spc = SLHA()
if separator is not None:
all_files = []
count = 1
with open(file) as infile:
for line in infile:
li = line.strip().upper()
if li.startswith("#") or len(li) < 1:
continue
if separator is not None:
if li.startswith(separator):
spc.flush()
if max(len(spc.blocks.keys()),len(spc.widths.keys())) > 0:
all_files.append(spc)
# start next point
spc = SLHA()
count = count + 1
if verbose:
print("Read spc file:", count)
continue
# New block started
if li.startswith("BLOCK"):
spc.flush() # store information which was read
spc.start_block(li)
elif li.startswith("DECAY"):
spc.flush() # store information which was read
spc.start_decay(li)
elif li.startswith("XSECTION"):
spc.flush() # store information which was read
spc.start_xsection(li)
# Reading and parsing values
else:
parsed = list(filter(None, li.split(' ')))
if "#" in parsed:
parsed = parsed[:parsed.index("#")] # remove comments
if spc.reading_block:
if spc.reading_hb_fermion:
spc.entries[",".join(parsed[3:])] = \
[float(parsed[0]), float(parsed[1])]
elif spc.reading_hb_boson:
spc.entries[",".join(parsed[2:])] = \
float(parsed[0])
else:
# Value might be a string like in SPINFO block
try:
value = float(parsed[-1])
except:
value = parsed[-2]
spc.entries[",".join(parsed[0:-1])] = value
if spc.reading_decay:
spc.entries[
tuple(sorted(eval("[" + ",".join(parsed[2:]) + "]")))
] = float(parsed[0])
if spc.reading_xsection:
spc.entries[
tuple(eval("[" + ",".join(parsed[0:-2]) + "]"))
] = float(parsed[-2])
spc.flush() # save the very last block in the file
if verbose:
print("Read %i blocks and %i decays" % (len(spc.blocks), len(spc.br)))
if separator is None:
return spc
else:
if len(spc.entries) > 0:
all_files.append(spc)
return all_files
# wrapper for faster read-in of multiple files
# squeeze the file (just keeping the necessary entries) to make the reading more efficient
# example: read_small_spc(filename,["# m0","# m12","# relic"],separator="ENDOF")
def read_small(file, entries, sep):
if entries is None:
out = read(file, separator=sep)
else:
string = "--regexp=\"" + sep + "\" --regexp=\"Block\" "
for i in entries:
string = string + "--regexp=\"" + i + "\" "
if os.path.isfile("temp.spc"):
subprocess.call("rm temp.spc", shell=True)
subprocess.call("cat " + file + " | grep -i " + string
+ " > temp_read_small.spc", shell=True)
out = read("temp_read_small.spc", separator=sep)
subprocess.call("rm temp_read_small.spc", shell=True)
return out
def read_dir(dir, entries=None):
if os.path.isfile("temp_read_dir.spc"):
subprocess.call("rm temp_read_dir.spc", shell=True)
# subprocess.check_call("cat "+dir+"/* > temp_read_dir.spc",shell=True)
subprocess.check_call("tail -n+1 " + dir + "/* > temp_read_dir.spc",
shell=True)
out = read_small("temp_read_dir.spc", entries, "==>")
subprocess.call("rm temp_read_dir.spc", shell=True)
return out
#def read_dir(dir,entries=None):
#subprocess.call("rm temp_read_dir.spc",shell=True)
#subprocess.check_call("cat "+dir+"/* > temp_read_dir.spc",shell=True)
#with open("temp_read_dir.spc") as infile:
#for line in infile:
#li=line.strip().upper()
#if li.startswith("#") or len(li)<1:
#continue
#else:
#file_sep=li[:li.index("#")]
#break
#out=read_small("temp_read_dir.spc",entries,file_sep)
#subprocess.call("rm temp_read_dir.spc",shell=True)
#return out
# ----------------------------------------------------------
# Writing
# ----------------------------------------------------------
def write(blocks, file):
with open(file, 'w+') as f:
for b in blocks:
write_block_head(b, f)
write_block_entries(blocks[b], f)
def write_block_entries(values, file):
for v in values.keys():
file.write(' %s %10.4e # \n' % (v, float(values[v])))
def write_les_houches(block, values, point, file):
write_block_head(block, file)
write_block_numbers(block, values, point, file)
def write_block_head(name, file):
file.write("Block " + name.upper() + " # \n")
def write_block_numbers(name, values, Variable, file):
for v in values.keys():
# if type(values[v]) is string_types:
if isinstance(values[v], string_types): # to be 2 and 3 compatible
if str(eval(values[v]))==values[v]:
file.write(' %s %s # %s \n'
% (v, values[v],
name.upper() + "[" + str(v) + "]"))
else:
file.write(' %s %10.4e # %s \n'
% (v, float(eval(values[v])),
name.upper() + "[" + str(v) + "]"))
elif isinstance(values[v], int):
file.write(' %s %i # %s \n'
% (v, (values[v]), name.upper() + "[" + str(v) + "]"))
else:
file.write(' %s %10.4e # %s \n'
% (v, float(values[v]),
name.upper() + "[" + str(v) + "]"))
| 10,207 | 34.817544 | 90 | py |
xSLHA | xSLHA-master/xslha/__init__.py | from .main import *
name = "xslha"
| 35 | 11 | 19 | py |
enterprise_extensions | enterprise_extensions-master/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
requirements = [
"numpy>=1.16.3",
"scipy>=1.2.0",
"ephem>=3.7.6.0",
"healpy>=1.14.0",
"scikit-sparse>=0.4.5",
"pint-pulsar>=0.8.2",
"libstempo>=2.4.0",
"enterprise-pulsar>=3.3.0",
"scikit-learn>=0.24",
"emcee",
"ptmcmcsampler",
]
test_requirements = []
# Extract version
def get_version():
with open("enterprise_extensions/__init__.py") as f:
for line in f.readlines():
if "__version__" in line:
return line.split('"')[1]
setup(
name="enterprise_extensions",
version=get_version(),
description="Extensions, model shortcuts, and utilities for the enterprise PTA analysis framework.",
long_description=readme + "\n\n" + history,
long_description_content_type='text/x-rst',
classifiers=[
"Topic :: Scientific/Engineering :: Astronomy",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Mathematics",
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords="gravitational-wave, black-hole binary, pulsar-timing arrays",
url="https://github.com/stevertaylor/enterprise_extensions",
author="Stephen R. Taylor, Paul T. Baker, Jeffrey S. Hazboun, Sarah Vigeland",
author_email="[email protected]",
license="MIT",
packages=[
"enterprise_extensions",
"enterprise_extensions.frequentist",
"enterprise_extensions.chromatic",
],
package_data={
"enterprise_extensions.chromatic": [
"ACE_SWEPAM_daily_proton_density_1998_2018_MJD_cm-3.txt"
]
},
test_suite="tests",
tests_require=test_requirements,
install_requires=requirements,
zip_safe=False,
)
| 2,094 | 27.310811 | 104 | py |
enterprise_extensions | enterprise_extensions-master/tests/test_hypermodel.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `enterprise_extensions` package."""
import json
import logging
import os
import pickle
import pytest
from enterprise_extensions import models, hypermodel
testdir = os.path.dirname(os.path.abspath(__file__))
datadir = os.path.join(testdir, 'data')
outdir = os.path.join(testdir, 'test_out')
psr_names = ['J0613-0200', 'J1713+0747', 'J1909-3744']
with open(datadir+'/ng11yr_noise.json', 'r') as fin:
noise_dict = json.load(fin)
@pytest.fixture
def dmx_psrs(caplog):
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
caplog.set_level(logging.CRITICAL)
psrs = []
for p in psr_names:
with open(datadir+'/{0}_ng9yr_dmx_DE436_epsr.pkl'.format(p), 'rb') as fin:
psrs.append(pickle.load(fin))
return psrs
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_hypermodel(dmx_psrs, caplog):
m2a = models.model_2a(dmx_psrs, noisedict=noise_dict)
m3a = models.model_3a(dmx_psrs, noisedict=noise_dict)
ptas = {0: m2a, 1: m3a}
hm = hypermodel.HyperModel(ptas)
assert hasattr(hm, 'get_lnlikelihood')
assert 'gw_log10_A' in hm.param_names
assert 'nmodel' in hm.param_names
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_hyper_sampler(dmx_psrs, caplog):
m2a = models.model_2a(dmx_psrs, noisedict=noise_dict)
m3a = models.model_3a(dmx_psrs, noisedict=noise_dict)
ptas = {0: m2a, 1: m3a}
hm = hypermodel.HyperModel(ptas)
samp = hm.setup_sampler(outdir=outdir, human='tester')
assert hasattr(samp, "sample")
paramfile = os.path.join(outdir, "pars.txt")
assert os.path.isfile(paramfile)
with open(paramfile, "r") as f:
params = [line.rstrip('\n') for line in f]
for ptapar, filepar in zip(hm.param_names, params):
assert ptapar == filepar
x0 = hm.initial_sample()
assert len(x0) == len(hm.param_names)
| 1,969 | 28.402985 | 82 | py |
enterprise_extensions | enterprise_extensions-master/tests/test_chromatic.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `enterprise_extensions.chromatic` submodule."""
import logging
import os
import pickle
import numpy as np
import pytest
from enterprise_extensions.chromatic import solar_wind as sw
testdir = os.path.dirname(os.path.abspath(__file__))
datadir = os.path.join(testdir, 'data')
psr_names = ['J0613-0200', 'J1944+0907']
@pytest.fixture
def nodmx_psrs(caplog):
caplog.set_level(logging.CRITICAL)
psrs = []
for p in psr_names:
with open(datadir+'/{0}_ng11yr_nodmx_DE436_epsr.pkl'.format(p), 'rb') as fin:
psrs.append(pickle.load(fin))
return psrs
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_sw_r_to_p(nodmx_psrs):
p0 = nodmx_psrs[0]
dt_sw1 = sw.solar_wind_r_to_p(p0.toas, p0.freqs, p0.planetssb,
p0.sunssb, p0.pos_t,
n_earth=5, power=2, log10_ne=False)
dt_sw2 = sw.solar_wind(p0.toas, p0.freqs, p0.planetssb,
p0.sunssb, p0.pos_t, n_earth=5)
assert all(np.isclose(dt_sw1, dt_sw2, atol=1e-8))
| 1,126 | 25.209302 | 85 | py |
enterprise_extensions | enterprise_extensions-master/tests/test_models.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `enterprise_extensions` package."""
import json
import logging
import os
import pickle
import pytest
from enterprise import constants as const
from enterprise_extensions import model_utils, models
testdir = os.path.dirname(os.path.abspath(__file__))
datadir = os.path.join(testdir, 'data')
psr_names = ['J0613-0200', 'J1713+0747', 'J1909-3744']
with open(datadir+'/ng11yr_noise.json', 'r') as fin:
noise_dict = json.load(fin)
@pytest.fixture
def dmx_psrs(caplog):
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
caplog.set_level(logging.CRITICAL)
psrs = []
for p in psr_names:
with open(datadir+'/{0}_ng9yr_dmx_DE436_epsr.pkl'.format(p), 'rb') as fin:
psrs.append(pickle.load(fin))
return psrs
@pytest.fixture
def nodmx_psrs(caplog):
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
caplog.set_level(logging.CRITICAL)
psrs = []
for p in psr_names:
with open(datadir+'/{0}_ng9yr_nodmx_DE436_epsr.pkl'.format(p), 'rb') as fin:
psrs.append(pickle.load(fin))
return psrs
def test_model_singlepsr_noise(nodmx_psrs, caplog):
# caplog.set_level(logging.CRITICAL)
m=models.model_singlepsr_noise(nodmx_psrs[1])
assert hasattr(m, 'get_lnlikelihood')
def test_model_singlepsr_noise_faclike(nodmx_psrs, caplog):
# caplog.set_level(logging.CRITICAL)
# default behaviour
m=models.model_singlepsr_noise(nodmx_psrs[1],
factorized_like=True, Tspan=10*const.yr)
m.get_basis()
assert 'gw_log10_A' in m.param_names
assert 'J1713+0747_red_noise_log10_A' in m.param_names
assert m.signals["J1713+0747_gw"]._labels[''][-1] == const.fyr
# gw but no RN
m=models.model_singlepsr_noise(nodmx_psrs[1], red_var=False,
factorized_like=True, Tspan=10*const.yr)
assert hasattr(m, 'get_lnlikelihood')
assert 'gw_log10_A' in m.param_names
assert 'J1713+0747_red_noise_log10_A' not in m.param_names
def test_model_singlepsr_noise_sw(nodmx_psrs, caplog):
# caplog.set_level(logging.CRITICAL)
m=models.model_singlepsr_noise(nodmx_psrs[1], dm_sw_deter=True,
dm_sw_gp=True, swgp_basis='powerlaw')
assert hasattr(m, 'get_lnlikelihood')
x0 = {pname: p.sample() for pname, p in zip(m.param_names, m.params)}
m.get_lnlikelihood(x0)
m=models.model_singlepsr_noise(nodmx_psrs[1], dm_sw_deter=True,
dm_sw_gp=True, swgp_basis='periodic')
assert hasattr(m, 'get_lnlikelihood')
x0 = {pname: p.sample() for pname, p in zip(m.param_names, m.params)}
m.get_lnlikelihood(x0)
m=models.model_singlepsr_noise(nodmx_psrs[1], dm_sw_deter=True,
dm_sw_gp=True, swgp_basis='sq_exp')
assert hasattr(m, 'get_lnlikelihood')
x0 = {pname: p.sample() for pname, p in zip(m.param_names, m.params)}
m.get_lnlikelihood(x0)
def test_model_singlepsr_noise_dip_cusp(nodmx_psrs, caplog):
# caplog.set_level(logging.CRITICAL)
dip_kwargs = {'dm_expdip': True,
'dmexp_sign': 'negative',
'num_dmdips': 2,
'dm_expdip_tmin': [54700, 57450],
'dm_expdip_tmax': [54850, 57560],
'dmdip_seqname': ['1st_ism', '2nd_ism'],
'dm_cusp': False,
'dm_cusp_sign': 'negative',
'dm_cusp_idx': [2, 4],
'dm_cusp_sym': False,
'dm_cusp_tmin': None,
'dm_cusp_tmax': None,
'num_dm_cusps': 2,
'dm_dual_cusp': True,
'dm_dual_cusp_tmin': [54700, 57450],
'dm_dual_cusp_tmax': [54850, 57560], }
m=models.model_singlepsr_noise(nodmx_psrs[1], dm_sw_deter=True,
dm_sw_gp=True, **dip_kwargs)
assert hasattr(m, 'get_lnlikelihood')
x0 = {pname: p.sample() for pname, p in zip(m.param_names, m.params)}
m.get_lnlikelihood(x0)
def test_model_singlepsr_noise_chrom_nondiag(nodmx_psrs, caplog):
# caplog.set_level(logging.CRITICAL)
m=models.model_singlepsr_noise(nodmx_psrs[0], dm_var=True,
dm_type=None, chrom_gp=True,
chrom_gp_kernel='nondiag')
assert 'J0613-0200_chrom_gp_log10_sigma' in m.param_names
assert 'J0613-0200_chrom_gp_log10_ell' in m.param_names
assert 'J0613-0200_chrom_gp_log10_ell2' not in m.param_names
assert 'J0613-0200_chrom_gp_log10_alpha_wgt' not in m.param_names
assert 'J0613-0200_chrom_gp_log10_p' in m.param_names
assert 'J0613-0200_chrom_gp_log10_gam_p' in m.param_names
assert hasattr(m, 'get_lnlikelihood')
x0 = {pname: p.sample() for pname, p in zip(m.param_names, m.params)}
m.get_lnlikelihood(x0)
m=models.model_singlepsr_noise(nodmx_psrs[1], dm_var=True,
dm_type=None, chrom_gp=True,
chrom_gp_kernel='nondiag')
assert 'J1713+0747_chrom_gp_log10_sigma' in m.param_names
assert 'J1713+0747_chrom_gp_log10_ell' in m.param_names
assert 'J1713+0747_chrom_gp_log10_ell2' not in m.param_names
assert 'J1713+0747_chrom_gp_log10_alpha_wgt' not in m.param_names
assert 'J1713+0747_chrom_gp_log10_p' in m.param_names
assert 'J1713+0747_chrom_gp_log10_gam_p' in m.param_names
assert hasattr(m, 'get_lnlikelihood')
x0 = {pname: p.sample() for pname, p in zip(m.param_names, m.params)}
m.get_lnlikelihood(x0)
m=models.model_singlepsr_noise(nodmx_psrs[2], dm_var=True,
dm_type=None, chrom_gp=True,
chrom_gp_kernel='nondiag')
assert 'J1909-3744_chrom_gp_log10_sigma' in m.param_names
assert 'J1909-3744_chrom_gp_log10_ell' in m.param_names
assert 'J1909-3744_chrom_gp_log10_ell2' not in m.param_names
assert 'J1909-3744_chrom_gp_log10_alpha_wgt' not in m.param_names
assert 'J1909-3744_chrom_gp_log10_p' in m.param_names
assert 'J1909-3744_chrom_gp_log10_gam_p' in m.param_names
assert hasattr(m, 'get_lnlikelihood')
x0 = {pname: p.sample() for pname, p in zip(m.param_names, m.params)}
m.get_lnlikelihood(x0)
m=models.model_singlepsr_noise(nodmx_psrs[0], dm_var=True,
dm_type=None, chrom_gp=True,
chrom_gp_kernel='nondiag',
chrom_kernel='periodic_rfband')
assert 'J0613-0200_chrom_gp_log10_sigma' in m.param_names
assert 'J0613-0200_chrom_gp_log10_ell' in m.param_names
assert 'J0613-0200_chrom_gp_log10_ell2' in m.param_names
assert 'J0613-0200_chrom_gp_log10_alpha_wgt' in m.param_names
assert 'J0613-0200_chrom_gp_log10_p' in m.param_names
assert 'J0613-0200_chrom_gp_log10_gam_p' in m.param_names
assert hasattr(m, 'get_lnlikelihood')
x0 = {pname: p.sample() for pname, p in zip(m.param_names, m.params)}
m.get_lnlikelihood(x0)
m=models.model_singlepsr_noise(nodmx_psrs[1], dm_var=True,
dm_type=None, chrom_gp=True,
chrom_gp_kernel='nondiag',
chrom_kernel='periodic_rfband')
assert 'J1713+0747_chrom_gp_log10_sigma' in m.param_names
assert 'J1713+0747_chrom_gp_log10_ell' in m.param_names
assert 'J1713+0747_chrom_gp_log10_ell2' in m.param_names
assert 'J1713+0747_chrom_gp_log10_alpha_wgt' in m.param_names
assert 'J1713+0747_chrom_gp_log10_p' in m.param_names
assert 'J1713+0747_chrom_gp_log10_gam_p' in m.param_names
assert hasattr(m, 'get_lnlikelihood')
x0 = {pname: p.sample() for pname, p in zip(m.param_names, m.params)}
m.get_lnlikelihood(x0)
m=models.model_singlepsr_noise(nodmx_psrs[2], dm_var=True,
dm_type=None, chrom_gp=True,
chrom_gp_kernel='nondiag',
chrom_kernel='periodic_rfband')
assert 'J1909-3744_chrom_gp_log10_sigma' in m.param_names
assert 'J1909-3744_chrom_gp_log10_ell' in m.param_names
assert 'J1909-3744_chrom_gp_log10_ell2' in m.param_names
assert 'J1909-3744_chrom_gp_log10_alpha_wgt' in m.param_names
assert 'J1909-3744_chrom_gp_log10_p' in m.param_names
assert 'J1909-3744_chrom_gp_log10_gam_p' in m.param_names
assert hasattr(m, 'get_lnlikelihood')
x0 = {pname: p.sample() for pname, p in zip(m.param_names, m.params)}
m.get_lnlikelihood(x0)
m=models.model_singlepsr_noise(nodmx_psrs[0], dm_var=True,
dm_type=None, chrom_gp=True,
chrom_gp_kernel='nondiag',
chrom_kernel='sq_exp')
assert 'J0613-0200_chrom_gp_log10_sigma' in m.param_names
assert 'J0613-0200_chrom_gp_log10_ell' in m.param_names
assert 'J0613-0200_chrom_gp_log10_p' not in m.param_names
assert 'J0613-0200_chrom_gp_log10_gam_p' not in m.param_names
assert hasattr(m, 'get_lnlikelihood')
x0 = {pname: p.sample() for pname, p in zip(m.param_names, m.params)}
m.get_lnlikelihood(x0)
m=models.model_singlepsr_noise(nodmx_psrs[1], dm_var=True,
dm_type=None, chrom_gp=True,
chrom_gp_kernel='nondiag',
chrom_kernel='sq_exp')
assert 'J1713+0747_chrom_gp_log10_sigma' in m.param_names
assert 'J1713+0747_chrom_gp_log10_ell' in m.param_names
assert 'J1713+0747_chrom_gp_log10_p' not in m.param_names
assert 'J1713+0747_chrom_gp_log10_gam_p' not in m.param_names
assert hasattr(m, 'get_lnlikelihood')
x0 = {pname: p.sample() for pname, p in zip(m.param_names, m.params)}
m.get_lnlikelihood(x0)
m=models.model_singlepsr_noise(nodmx_psrs[2], dm_var=True,
dm_type=None, chrom_gp=True,
chrom_gp_kernel='nondiag',
chrom_kernel='sq_exp')
assert 'J1909-3744_chrom_gp_log10_sigma' in m.param_names
assert 'J1909-3744_chrom_gp_log10_ell' in m.param_names
assert 'J1909-3744_chrom_gp_log10_p' not in m.param_names
assert 'J1909-3744_chrom_gp_log10_gam_p' not in m.param_names
assert hasattr(m, 'get_lnlikelihood')
x0 = {pname: p.sample() for pname, p in zip(m.param_names, m.params)}
m.get_lnlikelihood(x0)
m=models.model_singlepsr_noise(nodmx_psrs[0], dm_var=True,
dm_type=None, chrom_gp=True,
chrom_gp_kernel='nondiag',
chrom_kernel='sq_exp_rfband')
assert 'J0613-0200_chrom_gp_log10_sigma' in m.param_names
assert 'J0613-0200_chrom_gp_log10_ell' in m.param_names
assert 'J0613-0200_chrom_gp_log10_ell2' in m.param_names
assert 'J0613-0200_chrom_gp_log10_alpha_wgt' in m.param_names
assert 'J0613-0200_chrom_gp_log10_p' not in m.param_names
assert 'J0613-0200_chrom_gp_log10_gam_p' not in m.param_names
assert hasattr(m, 'get_lnlikelihood')
x0 = {pname: p.sample() for pname, p in zip(m.param_names, m.params)}
m.get_lnlikelihood(x0)
m=models.model_singlepsr_noise(nodmx_psrs[1], dm_var=True,
dm_type=None, chrom_gp=True,
chrom_gp_kernel='nondiag',
chrom_kernel='sq_exp_rfband')
assert 'J1713+0747_chrom_gp_log10_sigma' in m.param_names
assert 'J1713+0747_chrom_gp_log10_ell' in m.param_names
assert 'J1713+0747_chrom_gp_log10_ell2' in m.param_names
assert 'J1713+0747_chrom_gp_log10_alpha_wgt' in m.param_names
assert 'J1713+0747_chrom_gp_log10_p' not in m.param_names
assert 'J1713+0747_chrom_gp_log10_gam_p' not in m.param_names
assert hasattr(m, 'get_lnlikelihood')
x0 = {pname: p.sample() for pname, p in zip(m.param_names, m.params)}
m.get_lnlikelihood(x0)
m=models.model_singlepsr_noise(nodmx_psrs[2], dm_var=True,
dm_type=None, chrom_gp=True,
chrom_gp_kernel='nondiag',
chrom_kernel='sq_exp_rfband')
assert 'J1909-3744_chrom_gp_log10_sigma' in m.param_names
assert 'J1909-3744_chrom_gp_log10_ell' in m.param_names
assert 'J1909-3744_chrom_gp_log10_ell2' in m.param_names
assert 'J1909-3744_chrom_gp_log10_alpha_wgt' in m.param_names
assert 'J1909-3744_chrom_gp_log10_p' not in m.param_names
assert 'J1909-3744_chrom_gp_log10_gam_p' not in m.param_names
assert hasattr(m, 'get_lnlikelihood')
x0 = {pname: p.sample() for pname, p in zip(m.param_names, m.params)}
m.get_lnlikelihood(x0)
def test_model_singlepsr_noise_chrom_diag(nodmx_psrs, caplog):
# caplog.set_level(logging.CRITICAL)
m=models.model_singlepsr_noise(nodmx_psrs[1], chrom_gp=True,
chrom_gp_kernel='diag')
assert hasattr(m, 'get_lnlikelihood')
x0 = {pname: p.sample() for pname, p in zip(m.param_names, m.params)}
m.get_lnlikelihood(x0)
m=models.model_singlepsr_noise(nodmx_psrs[1], chrom_gp=True,
chrom_gp_kernel='diag',
chrom_psd='turnover')
assert hasattr(m, 'get_lnlikelihood')
x0 = {pname: p.sample() for pname, p in zip(m.param_names, m.params)}
m.get_lnlikelihood(x0)
m=models.model_singlepsr_noise(nodmx_psrs[1], chrom_gp=True,
chrom_gp_kernel='diag',
chrom_psd='spectrum')
assert hasattr(m, 'get_lnlikelihood')
x0 = {pname: p.sample() for pname, p in zip(m.param_names, m.params)}
m.get_lnlikelihood(x0)
def test_model_singlepsr_fact_like(nodmx_psrs, caplog):
# caplog.set_level(logging.CRITICAL)
psr = nodmx_psrs[1]
Tspan = model_utils.get_tspan([psr])
m=models.model_singlepsr_noise(psr, chrom_gp=True,
chrom_gp_kernel='diag',
factorized_like=False,
Tspan=Tspan, fact_like_gamma=13./3,
gw_components=5)
assert hasattr(m, 'get_lnlikelihood')
x0 = {pname: p.sample() for pname, p in zip(m.param_names, m.params)}
m.get_lnlikelihood(x0)
def test_modelbwmsglpsr(nodmx_psrs, caplog):
nodmx_psr=nodmx_psrs[0]
m=models.model_bwm_sglpsr(nodmx_psr) # should I be testing the Log and Lookup Likelihoods?
# If this test belongs in enterprise/tests instead, do
# I need to include the lookup table in tests/data?
assert hasattr(m, 'get_lnlikelihood')
assert "ramp_log10_A" in m.param_names
assert "ramp_t0" in m.param_names
def test_modelbwm(nodmx_psrs, caplog):
m=models.model_bwm(nodmx_psrs) # should I be testing the Log and Lookup Likelihoods?
# If this test belongs in enterprise/tests instead, do
# I need to include the lookup table in tests/data?
assert hasattr(m, 'get_lnlikelihood')
assert "bwm_log10_A" in m.param_names
assert "bwm_t0" in m.param_names
assert "bwm_phi" in m.param_names
assert "bwm_pol" in m.param_names
assert "bwm_costheta" in m.param_names
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_model1(dmx_psrs, caplog):
# caplog.set_level(logging.CRITICAL)
m1=models.model_1(dmx_psrs, noisedict=noise_dict)
assert hasattr(m1, 'get_lnlikelihood')
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_model2a(dmx_psrs, caplog):
caplog.set_level(logging.CRITICAL)
m2a=models.model_2a(dmx_psrs, noisedict=noise_dict)
assert hasattr(m2a, 'get_lnlikelihood')
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_model2a_pshift(dmx_psrs, caplog):
caplog.set_level(logging.CRITICAL)
m2a=models.model_2a(dmx_psrs, noisedict=noise_dict, pshift=True, pseed=42)
assert hasattr(m2a, 'get_lnlikelihood')
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_model2a_5gwb(dmx_psrs, caplog):
caplog.set_level(logging.CRITICAL)
m2a=models.model_2a(dmx_psrs, n_gwbfreqs=5, noisedict=noise_dict)
assert hasattr(m2a, 'get_lnlikelihood')
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_model2a_broken_plaw(dmx_psrs, caplog):
caplog.set_level(logging.CRITICAL)
m2a=models.model_2a(dmx_psrs, psd='broken_powerlaw', delta_common=0,
noisedict=noise_dict)
assert hasattr(m2a, 'get_lnlikelihood')
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_model2b(dmx_psrs, caplog):
caplog.set_level(logging.CRITICAL)
m2b=models.model_2b(dmx_psrs, noisedict=noise_dict)
assert hasattr(m2b, 'get_lnlikelihood')
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_model2c(dmx_psrs, caplog):
caplog.set_level(logging.CRITICAL)
m2c=models.model_2c(dmx_psrs, noisedict=noise_dict)
assert hasattr(m2c, 'get_lnlikelihood')
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_model2d(dmx_psrs, caplog):
caplog.set_level(logging.CRITICAL)
m2d=models.model_2d(dmx_psrs, noisedict=noise_dict)
assert hasattr(m2d, 'get_lnlikelihood')
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_model3a(dmx_psrs, caplog):
caplog.set_level(logging.CRITICAL)
m3a=models.model_3a(dmx_psrs, noisedict=noise_dict)
assert hasattr(m3a, 'get_lnlikelihood')
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_model3a_pshift(dmx_psrs, caplog):
caplog.set_level(logging.CRITICAL)
m3a=models.model_3a(dmx_psrs, noisedict=noise_dict, pshift=True, pseed=42)
assert hasattr(m3a, 'get_lnlikelihood')
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_model3a_5rnfreqs(dmx_psrs, caplog):
caplog.set_level(logging.CRITICAL)
m3a=models.model_3a(dmx_psrs, n_rnfreqs=5, noisedict=noise_dict)
assert hasattr(m3a, 'get_lnlikelihood')
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_model3a_broken_plaw(dmx_psrs, caplog):
caplog.set_level(logging.CRITICAL)
m3a=models.model_3a(dmx_psrs, psd='broken_powerlaw', delta_common=0,
noisedict=noise_dict)
assert hasattr(m3a, 'get_lnlikelihood')
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_model3b(dmx_psrs, caplog):
caplog.set_level(logging.CRITICAL)
m3b=models.model_3b(dmx_psrs)
assert hasattr(m3b, 'get_lnlikelihood')
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_model3c(dmx_psrs, caplog):
caplog.set_level(logging.CRITICAL)
m3c=models.model_3c(dmx_psrs, noisedict=noise_dict)
assert hasattr(m3c, 'get_lnlikelihood')
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_model3d(dmx_psrs, caplog):
caplog.set_level(logging.CRITICAL)
m3d=models.model_3d(dmx_psrs, noisedict=noise_dict)
assert hasattr(m3d, 'get_lnlikelihood')
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_model_fdm(dmx_psrs, caplog):
fdm=models.model_fdm(dmx_psrs, noisedict=noise_dict)
assert hasattr(fdm, 'get_lnlikelihood')
| 19,443 | 42.792793 | 95 | py |
enterprise_extensions | enterprise_extensions-master/tests/test_sampler.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `enterprise_extensions` package."""
import json
import logging
import os
import pickle
import pytest
from enterprise_extensions import models, sampler
from enterprise_extensions.empirical_distr import (
make_empirical_distributions, make_empirical_distributions_KDE)
testdir = os.path.dirname(os.path.abspath(__file__))
datadir = os.path.join(testdir, 'data')
outdir = os.path.join(testdir, 'test_out')
psr_names = ['J0613-0200', 'J1713+0747', 'J1909-3744']
with open(datadir+'/ng11yr_noise.json', 'r') as fin:
noise_dict = json.load(fin)
@pytest.fixture
def dmx_psrs(caplog):
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
caplog.set_level(logging.CRITICAL)
psrs = []
for p in psr_names:
with open(datadir+'/{0}_ng9yr_dmx_DE436_epsr.pkl'.format(p), 'rb') as fin:
psrs.append(pickle.load(fin))
return psrs
@pytest.fixture
def empirical_distribution_1d(caplog):
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
caplog.set_level(logging.CRITICAL)
with open(datadir+'/emp_dist_1d.pkl', 'rb') as fin:
emp_dists = pickle.load(fin)
return emp_dists
@pytest.fixture
def empirical_distribution_1d_kde(caplog):
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
caplog.set_level(logging.CRITICAL)
with open(datadir+'/emp_dist_samples.pkl', 'rb') as fin:
emp_dists = pickle.load(fin)
return emp_dists
@pytest.fixture
def empirical_distribution_2d(caplog):
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
caplog.set_level(logging.CRITICAL)
with open(datadir+'/emp_dist_2d.pkl', 'rb') as fin:
emp_dists = pickle.load(fin)
return emp_dists
@pytest.fixture
def empirical_distribution_2d_kde(caplog):
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
caplog.set_level(logging.CRITICAL)
with open(datadir+'/emp_dist_2d_kde.pkl', 'rb') as fin:
emp_dists = pickle.load(fin)
return emp_dists
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_jumpproposal(dmx_psrs, caplog):
m2a = models.model_2a(dmx_psrs, noisedict=noise_dict)
jp = sampler.JumpProposal(m2a)
assert jp.draw_from_prior.__name__ == 'draw_from_prior'
assert jp.draw_from_signal_prior.__name__ == 'draw_from_signal_prior'
assert (jp.draw_from_par_prior('J1713+0747').__name__ ==
'draw_from_J1713+0747_prior')
assert (jp.draw_from_par_log_uniform({'gw': (-20, -10)}).__name__ ==
'draw_from_gw_log_uniform')
assert (jp.draw_from_signal('red noise').__name__ ==
'draw_from_red noise_signal')
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_setup_sampler(dmx_psrs, caplog):
m2a = models.model_2a(dmx_psrs, noisedict=noise_dict)
samp = sampler.setup_sampler(m2a, outdir=outdir, human='tester')
assert hasattr(samp, "sample")
paramfile = os.path.join(outdir, "pars.txt")
assert os.path.isfile(paramfile)
with open(paramfile, "r") as f:
params = [line.rstrip('\n') for line in f]
for ptapar, filepar in zip(m2a.param_names, params):
assert ptapar == filepar
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_extend_emp_dists_1d(dmx_psrs, caplog):
with open(datadir+'/emp_dist_samples.pkl', 'rb') as fin:
tmp_data = pickle.load(fin)
m2a = models.model_2a(dmx_psrs, noisedict=noise_dict)
new_dist = make_empirical_distributions(m2a, tmp_data['names'], tmp_data['names'],
tmp_data['samples'], save_dists=False)
# run extend when edges match priors
new_dist = sampler.extend_emp_dists(m2a, new_dist)
# change priors so they don't match edges of
# empirical distribution
for ii in range(len(tmp_data['names'])):
m2a.params[ii].prior._defaults['pmin'] -= 0.1
new_dist = sampler.extend_emp_dists(m2a, new_dist)
assert len(new_dist) == 6
for i in range(6):
assert new_dist[i]._edges[0] <= m2a.params[i].prior._defaults['pmin']
assert new_dist[i]._edges[-1] >= m2a.params[i].prior._defaults['pmax']
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_extend_emp_dists_2d(dmx_psrs, caplog):
with open(datadir+'/emp_dist_samples.pkl', 'rb') as fin:
tmp_data = pickle.load(fin)
m2a = models.model_2a(dmx_psrs, noisedict=noise_dict)
parnames = [[tmp_data['names'][0], tmp_data['names'][1]],
[tmp_data['names'][2], tmp_data['names'][3]],
[tmp_data['names'][4], tmp_data['names'][5]]]
new_dist = make_empirical_distributions(m2a, parnames, tmp_data['names'],
tmp_data['samples'], save_dists=False)
# case 1, edges match priors
new_dist = sampler.extend_emp_dists(m2a, new_dist)
# case 2, edges don't match priors (set priors to be different)
for ii in range(len(tmp_data['names'])):
m2a.params[ii].prior._defaults['pmin'] -= 0.1
m2a.params[ii].prior._defaults['pmax'] += 0.1
new_dist = sampler.extend_emp_dists(m2a, new_dist)
assert len(new_dist) == 3
for i in range(3):
k = 2 * i
assert new_dist[i]._edges[0][0] <= m2a.params[k].prior._defaults['pmin']
assert new_dist[i]._edges[0][-1] <= m2a.params[k].prior._defaults['pmax']
assert new_dist[i]._edges[1][0] <= m2a.params[k + 1].prior._defaults['pmin']
assert new_dist[i]._edges[1][-1] <= m2a.params[k + 1].prior._defaults['pmax']
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_extend_emp_dists_1d_kde(dmx_psrs, caplog):
with open(datadir+'/emp_dist_samples.pkl', 'rb') as fin:
tmp_data = pickle.load(fin)
m2a = models.model_2a(dmx_psrs, noisedict=noise_dict)
new_dist = make_empirical_distributions_KDE(m2a, tmp_data['names'], tmp_data['names'],
tmp_data['samples'], save_dists=False)
new_dist = sampler.extend_emp_dists(m2a, new_dist)
for ii in range(len(tmp_data['names'])):
m2a.params[ii].prior._defaults['pmin'] -= 0.1
new_dist = sampler.extend_emp_dists(m2a, new_dist)
assert len(new_dist) == 6
for i in range(6):
assert new_dist[i].minval <= m2a.params[i].prior._defaults['pmin']
assert new_dist[i].maxval >= m2a.params[i].prior._defaults['pmax']
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_extend_emp_dists_2d_kde(dmx_psrs, caplog):
with open(datadir+'/emp_dist_samples.pkl', 'rb') as fin:
tmp_data = pickle.load(fin)
m2a = models.model_2a(dmx_psrs, noisedict=noise_dict)
parnames = [[tmp_data['names'][0], tmp_data['names'][1]],
[tmp_data['names'][2], tmp_data['names'][3]],
[tmp_data['names'][4], tmp_data['names'][5]]]
new_dist = make_empirical_distributions_KDE(m2a, parnames, tmp_data['names'],
tmp_data['samples'], save_dists=False)
# case 1
new_dist = sampler.extend_emp_dists(m2a, new_dist)
# case 2
for ii in range(len(tmp_data['names'])):
m2a.params[ii].prior._defaults['pmin'] -= 0.1
m2a.params[ii].prior._defaults['pmax'] += 0.1
new_dist = sampler.extend_emp_dists(m2a, new_dist)
assert len(new_dist) == 3
for i in range(3):
k = 2 * i
assert new_dist[i].minvals[0] <= m2a.params[k].prior._defaults['pmin']
assert new_dist[i].maxvals[0] <= m2a.params[k].prior._defaults['pmax']
assert new_dist[i].minvals[1] <= m2a.params[k + 1].prior._defaults['pmin']
assert new_dist[i].maxvals[1] <= m2a.params[k + 1].prior._defaults['pmax']
| 7,924 | 36.559242 | 90 | py |
enterprise_extensions | enterprise_extensions-master/tests/__init__.py | 0 | 0 | 0 | py |
|
enterprise_extensions | enterprise_extensions-master/tests/test_frequentist.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `enterprise_extensions` package."""
import json
import logging
import os
import pickle
import numpy as np
import pytest
from enterprise_extensions import models
from enterprise_extensions.frequentist import chi_squared as chisqr
testdir = os.path.dirname(os.path.abspath(__file__))
datadir = os.path.join(testdir, 'data')
psr_names = ['J0613-0200', 'J1944+0907']
with open(datadir+'/ng11yr_noise.json', 'r') as fin:
noise_dict = json.load(fin)
@pytest.fixture
def dmx_psrs(caplog):
caplog.set_level(logging.CRITICAL)
psrs = []
for p in psr_names:
with open(datadir+'/{0}_ng11yr_dmx_DE436_epsr.pkl'.format(p), 'rb') as fin:
psrs.append(pickle.load(fin))
return psrs
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
@pytest.fixture
def pta_model1(dmx_psrs, caplog):
m2a=models.model_1(dmx_psrs, noisedict=noise_dict, tnequad=True)
return m2a
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_chisqr(dmx_psrs, pta_model1):
chi2 = chisqr.get_chi2(pta_model1, noise_dict)
dof = 0
dof += np.sum([p.toas.size for p in dmx_psrs])
dof -= np.sum([len(p.fitpars) for p in dmx_psrs])
dof -= len(pta_model1.param_names)
red_chi2 = chi2/dof
print(red_chi2)
rchi2 = chisqr.get_reduced_chi2(pta_model1, noise_dict)
assert rchi2 == red_chi2
assert np.isclose(1.0, rchi2, atol=0.01)
| 1,453 | 24.508772 | 83 | py |
enterprise_extensions | enterprise_extensions-master/tests/test_enterprise_extensions.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `enterprise_extensions` package."""
import pytest
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
| 561 | 23.434783 | 78 | py |
enterprise_extensions | enterprise_extensions-master/tests/test_os.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `enterprise_extensions` package."""
import json
import logging
import os
import pickle
import numpy as np
import pytest
from enterprise.signals import signal_base, gp_signals, parameter, utils
from enterprise_extensions import models, blocks, model_utils
from enterprise_extensions.frequentist import optimal_statistic as optstat
testdir = os.path.dirname(os.path.abspath(__file__))
datadir = os.path.join(testdir, 'data')
psr_names = ['J0613-0200', 'J1713+0747', 'J1909-3744']
with open(datadir+'/ng11yr_noise.json', 'r') as fin:
noise_dict = json.load(fin)
@pytest.fixture
def dmx_psrs(caplog):
caplog.set_level(logging.CRITICAL)
psrs = []
for p in psr_names:
with open(datadir+'/{0}_ng9yr_dmx_DE436_epsr.pkl'.format(p), 'rb') as fin:
psrs.append(pickle.load(fin))
return psrs
@pytest.fixture
def nodmx_psrs(caplog):
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
caplog.set_level(logging.CRITICAL)
psrs = []
for p in psr_names:
with open(datadir+'/{0}_ng9yr_nodmx_DE436_epsr.pkl'.format(p), 'rb') as fin:
psrs.append(pickle.load(fin))
return psrs
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
@pytest.fixture
def pta_model2a(dmx_psrs, caplog):
m2a=models.model_2a(dmx_psrs, noisedict=noise_dict, tnequad=True)
return m2a
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_os(nodmx_psrs, pta_model2a):
OS = optstat.OptimalStatistic(psrs=nodmx_psrs, pta=pta_model2a)
OS.compute_os()
chain = np.zeros((10, len(pta_model2a.params)+4))
for ii in range(10):
entry = [par.sample() for par in pta_model2a.params]
entry.extend([OS.pta.get_lnlikelihood(entry)-OS.pta.get_lnprior(entry),
OS.pta.get_lnlikelihood(entry),
0.5, 1])
chain[ii, :] = np.array(entry)
OS.compute_noise_marginalized_os(chain, param_names=OS.pta.param_names, N=10)
OS.compute_noise_maximized_os(chain, param_names=OS.pta.param_names)
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
@pytest.fixture
def pta_pshift(dmx_psrs, caplog):
Tspan = model_utils.get_tspan(dmx_psrs)
tm = gp_signals.TimingModel()
wn = blocks.white_noise_block(inc_ecorr=True, tnequad=True)
rn = blocks.red_noise_block(Tspan=Tspan)
pseed = parameter.Uniform(0, 10000)('gw_pseed')
gw_log10_A = parameter.Uniform(-18, -14)('gw_log10_A')
gw_gamma = parameter.Constant(13./3)('gw_gamma')
gw_pl = utils.powerlaw(log10_A=gw_log10_A, gamma=gw_gamma)
gw_pshift = gp_signals.FourierBasisGP(spectrum=gw_pl,
components=5,
Tspan=Tspan,
name='gw',
pshift=True,
pseed=pseed)
model = tm + wn + rn + gw_pshift
pta_pshift = signal_base.PTA([model(p) for p in dmx_psrs])
pta_pshift.set_default_params(noise_dict)
return pta_pshift
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_os_pseed(dmx_psrs, pta_pshift):
OS = optstat.OptimalStatistic(psrs=dmx_psrs, pta=pta_pshift)
params = {pnm: p.sample() for pnm, p in zip(pta_pshift.param_names,
pta_pshift.params)}
params.update({'gw_pseed': 1})
_, _, _, A1, rho1 = OS.compute_os(params=params)
params.update({'gw_pseed': 2})
_, _, _, A2, rho2 = OS.compute_os(params=params)
print(A1, A2)
print(rho1, rho2)
assert A1!=A2
assert rho1!=rho2
| 3,721 | 31.938053 | 84 | py |
enterprise_extensions | enterprise_extensions-master/tests/altpol_tests.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for altpol functions in e_e Code.
"""
import json
import logging
import os
import pickle
import enterprise.signals.parameter as parameter
import numpy as np
import pytest
from enterprise.signals import gp_signals, signal_base
from enterprise_extensions import model_orfs, models
from enterprise_extensions.frequentist import optimal_statistic as optstat
testdir = os.path.dirname(os.path.abspath(__file__))
datadir = os.path.join(testdir, 'data')
psr_names = ['J0613-0200', 'J1713+0747', 'J1909-3744']
with open(datadir+'/ng11yr_noise.json', 'r') as fin:
noise_dict = json.load(fin)
@pytest.fixture
def nodmx_psrs(caplog):
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
caplog.set_level(logging.CRITICAL)
psrs = []
for p in psr_names:
with open(datadir+'/{0}_ng9yr_nodmx_DE436_epsr.pkl'.format(p), 'rb') as fin:
psrs.append(pickle.load(fin))
return psrs
def test_model_general_alt_correlations(nodmx_psrs, caplog):
# caplog.set_level(logging.CRITICAL)
m=models.model_general(nodmx_psrs, noisedict=noise_dict,
orf='hd,gw_monopole,gw_dipole,st,gt,dipole,monopole')
assert hasattr(m, 'get_lnlikelihood')
def test_model_2a_altpol_spectrum(nodmx_psrs, caplog):
log10_A_tt = parameter.LinearExp(-18, -12)('log10_A_tt')
log10_A_st = parameter.LinearExp(-18, -12)('log10_A_st')
log10_A_vl = parameter.LinearExp(-18, -15)('log10_A_vl')
log10_A_sl = parameter.LinearExp(-18, -16)('log10_A_sl')
kappa = parameter.Uniform(0, 15)('kappa')
p_dist = parameter.Normal(1.0, 0.2)
pl = model_orfs.generalized_gwpol_psd(log10_A_tt=log10_A_tt, log10_A_st=log10_A_st,
log10_A_vl=log10_A_vl, log10_A_sl=log10_A_sl,
kappa=kappa, p_dist=p_dist, alpha_tt=-2/3, alpha_alt=-1)
s = models.white_noise_block(vary=False, inc_ecorr=True)
s += models.red_noise_block(prior='log-uniform')
s += gp_signals.FourierBasisGP(spectrum=pl, name='gw')
s += gp_signals.TimingModel()
m = signal_base.PTA([s(psr) for psr in nodmx_psrs])
m.set_default_params(noise_dict)
for param in m.params:
if 'gw_p_dist' in str(param):
# get pulsar name and distance
psr_name = str(param).split('_')[0].strip('"')
psr_dist = [p._pdist for p in nodmx_psrs if psr_name in p.name][0]
# edit prior settings
param._prior = parameter.Normal(mu=psr_dist[0],
sigma=psr_dist[1])
param._mu = psr_dist[0]
param._sigma = psr_dist[1]
assert hasattr(m, 'get_lnlikelihood')
"""
Tests for altpol functions in OS Code.
"""
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
@pytest.fixture
def pta_model2a(nodmx_psrs, caplog):
m2a=models.model_2a(nodmx_psrs, noisedict=noise_dict)
return m2a
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_os(nodmx_psrs, pta_model2a):
orfs = ['hd', 'gw_monopole', 'gw_dipole', 'st', 'dipole', 'monopole']
for orf in orfs:
OS = optstat.OptimalStatistic(psrs=nodmx_psrs, pta=pta_model2a, orf=orf)
assert hasattr(OS, 'Fmats')
OS.compute_os()
chain = np.zeros((10, len(pta_model2a.params)+4))
for ii in range(10):
entry = [par.sample() for par in pta_model2a.params]
entry.extend([OS.pta.get_lnlikelihood(entry)-OS.pta.get_lnprior(entry),
OS.pta.get_lnlikelihood(entry),
0.5, 1])
chain[ii, :] = np.array(entry)
OS.compute_noise_marginalized_os(chain, param_names=OS.pta.param_names, N=10)
OS.compute_noise_maximized_os(chain, param_names=OS.pta.param_names)
| 3,893 | 33.157895 | 98 | py |
enterprise_extensions | enterprise_extensions-master/docs/conf.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# enterprise_extensions documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
# sys.path.insert(0, os.path.abspath('..'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import enterprise_extensions
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# get doctrings for __init__ method
autoclass_content = "both"
# make order or docs 'groupwise'
autodoc_member_order = "groupwise"
# we won't even try installing these
autodoc_mock_imports = ["enterprise","libstempo", "PINT", "astropy", "healpy", "sksparse", "ephem"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'enterprise_extensions'
copyright = u"2019, Stephen R. Taylor, Jeffrey S. Hazboun, Paul T. Baker, Sarah J. Vigeland"
author = u"Stephen R. Taylor, Jeffrey S. Hazboun, Paul T. Baker, Sarah J. Vigeland"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = enterprise_extensions.__version__
# The full version, including alpha/beta/rc tags.
release = enterprise_extensions.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'#'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'enterprise_extensionsdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'enterprise_extensions.tex',
u'enterprise_extensions Documentation',
u'Stephen R. Taylor', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'enterprise_extensions',
u'enterprise_extensions Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'enterprise_extensions',
u'enterprise_extensions Documentation',
author,
'enterprise_extensions',
'One line description of project.',
'Miscellaneous'),
]
| 5,714 | 30.75 | 99 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/hypermodel.py | # -*- coding: utf-8 -*-
import os
import numpy as np
import scipy.linalg as sl
from enterprise import constants as const
from PTMCMCSampler.PTMCMCSampler import PTSampler as ptmcmc
from .sampler import JumpProposal, get_parameter_groups, save_runtime_info
class HyperModel(object):
"""
Class to define hyper-model that is the concatenation of all models.
"""
def __init__(self, models, log_weights=None):
self.models = models
self.num_models = len(self.models)
self.log_weights = log_weights
#########
self.param_names, ind = np.unique(np.concatenate([p.param_names
for p in self.models.values()]),
return_index=True)
self.param_names = self.param_names[np.argsort(ind)]
self.param_names = np.append(self.param_names, 'nmodel').tolist()
#########
self.pulsars = np.unique(np.concatenate([p.pulsars
for p in self.models.values()]))
self.pulsars = np.sort(self.pulsars)
#########
self.params = [p for p in self.models[0].params] # start of param list
uniq_params = [str(p) for p in self.models[0].params] # which params are unique
for model in self.models.values():
# find differences between next model and concatenation of previous
param_diffs = np.setdiff1d([str(p) for p in model.params], uniq_params)
mask = np.array([str(p) in param_diffs for p in model.params])
# concatenate for next loop iteration
uniq_params = np.union1d([str(p) for p in model.params], uniq_params)
# extend list of unique parameters
self.params.extend([pp for pp in np.array(model.params)[mask]])
#########
#########
# get signal collections
self.snames = dict.fromkeys(np.unique(sum(sum([[[qq.signal_name for qq in pp._signals]
for pp in self.models[mm]._signalcollections]
for mm in self.models], []), [])))
for key in self.snames:
self.snames[key] = []
for mm in self.models:
for sc in self.models[mm]._signalcollections:
for signal in sc._signals:
self.snames[signal.signal_name].extend(signal.params)
for key in self.snames:
self.snames[key] = list(set(self.snames[key]))
for key in self.snames:
uniq_params, ind = np.unique([p.name for p in self.snames[key]],
return_index=True)
uniq_params = uniq_params[np.argsort(ind)].tolist()
all_params = [p.name for p in self.snames[key]]
self.snames[key] = np.array(self.snames[key])[[all_params.index(q)
for q in uniq_params]].tolist()
#########
def get_lnlikelihood(self, x):
# find model index variable
idx = list(self.param_names).index('nmodel')
nmodel = int(np.rint(x[idx]))
# find parameters of active model
q = []
for par in self.models[nmodel].param_names:
idx = self.param_names.index(par)
q.append(x[idx])
# only active parameters enter likelihood
active_lnlike = self.models[nmodel].get_lnlikelihood(q)
if self.log_weights is not None:
active_lnlike += self.log_weights[nmodel]
return active_lnlike
def get_lnprior(self, x):
# find model index variable
idx = list(self.param_names).index('nmodel')
nmodel = int(np.rint(x[idx]))
if nmodel not in self.models.keys():
return -np.inf
else:
lnP = 0
for p in self.models.values():
q = []
for par in p.param_names:
idx = self.param_names.index(par)
q.append(x[idx])
lnP += p.get_lnprior(np.array(q))
return lnP
def get_parameter_groups(self):
unique_groups = []
for p in self.models.values():
groups = get_parameter_groups(p)
# check for any duplicate groups
# e.g. the GWB may have different indices in model 1 and model 2
for group in groups:
check_group = []
for idx in group:
param_name = p.param_names[idx]
check_group.append(self.param_names.index(param_name))
if check_group not in unique_groups:
unique_groups.append(check_group)
unique_groups.extend([[len(self.param_names) - 1]])
return unique_groups
def initial_sample(self):
"""
Draw an initial sample from within the hyper-model prior space.
"""
x0 = [np.array(p.sample()).ravel().tolist() for p in self.models[0].params]
uniq_params = [str(p) for p in self.models[0].params]
for model in self.models.values():
param_diffs = np.setdiff1d([str(p) for p in model.params], uniq_params)
mask = np.array([str(p) in param_diffs for p in model.params])
x0.extend([np.array(pp.sample()).ravel().tolist() for pp in np.array(model.params)[mask]])
uniq_params = np.union1d([str(p) for p in model.params], uniq_params)
x0.extend([[0.1]])
return np.array([p for sublist in x0 for p in sublist])
def draw_from_nmodel_prior(self, x, iter, beta):
"""
Model-index uniform distribution prior draw.
"""
q = x.copy()
idx = list(self.param_names).index('nmodel')
q[idx] = np.random.uniform(-0.5, self.num_models-0.5)
lqxy = 0
return q, float(lqxy)
def setup_sampler(self, outdir='chains', resume=False, sample_nmodel=True,
empirical_distr=None, groups=None, human=None,
loglkwargs={}, logpkwargs={}):
"""
Sets up an instance of PTMCMC sampler.
We initialize the sampler the likelihood and prior function
from the PTA object. We set up an initial jump covariance matrix
with fairly small jumps as this will be adapted as the MCMC runs.
We will setup an output directory in `outdir` that will contain
the chain (first n columns are the samples for the n parameters
and last 4 are log-posterior, log-likelihood, acceptance rate, and
an indicator variable for parallel tempering but it doesn't matter
because we aren't using parallel tempering).
We then add several custom jump proposals to the mix based on
whether or not certain parameters are in the model. These are
all either draws from the prior distribution of parameters or
draws from uniform distributions.
"""
# dimension of parameter space
ndim = len(self.param_names)
# initial jump covariance matrix
if os.path.exists(outdir+'/cov.npy') and resume:
cov = np.load(outdir+'/cov.npy')
# check that the one we load is the same shape as our data
cov_new = np.diag(np.ones(ndim) * 1.0**2)
if cov.shape != cov_new.shape:
msg = 'The covariance matrix (cov.npy) in the output folder is '
msg += 'the wrong shape for the parameters given. '
msg += 'Start with a different output directory or '
msg += 'change resume to False to overwrite the run that exists.'
raise ValueError(msg)
else:
cov = np.diag(np.ones(ndim) * 1.0**2) # used to be 0.1
# parameter groupings
if groups is None:
groups = self.get_parameter_groups()
sampler = ptmcmc(ndim, self.get_lnlikelihood, self.get_lnprior, cov,
groups=groups, outDir=outdir, resume=resume,
loglkwargs=loglkwargs, logpkwargs=logpkwargs)
save_runtime_info(self, sampler.outDir, human)
# additional jump proposals
jp = JumpProposal(self, self.snames, empirical_distr=empirical_distr)
sampler.jp = jp
# always add draw from prior
sampler.addProposalToCycle(jp.draw_from_prior, 5)
# try adding empirical proposals
if empirical_distr is not None:
print('Adding empirical proposals...\n')
sampler.addProposalToCycle(jp.draw_from_empirical_distr, 25)
# Red noise prior draw
if 'red noise' in self.snames:
print('Adding red noise prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_red_prior, 10)
# DM GP noise prior draw
if 'dm_gp' in self.snames:
print('Adding DM GP noise prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dm_gp_prior, 10)
# DM annual prior draw
if 'dm_s1yr' in jp.snames:
print('Adding DM annual prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dm1yr_prior, 10)
# DM dip prior draw
if 'dmexp' in '\t'.join(jp.snames):
print('Adding DM exponential dip prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dmexpdip_prior, 10)
# DM cusp prior draw
if 'dm_cusp' in jp.snames:
print('Adding DM exponential cusp prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dmexpcusp_prior, 10)
# DMX prior draw
if 'dmx_signal' in jp.snames:
print('Adding DMX prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dmx_prior, 10)
# Chromatic GP noise prior draw
if 'chrom_gp' in self.snames:
print('Adding Chromatic GP noise prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_chrom_gp_prior, 10)
# SW prior draw
if 'gp_sw' in jp.snames:
print('Adding Solar Wind DM GP prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dm_sw_prior, 10)
# Chromatic GP noise prior draw
if 'chrom_gp' in self.snames:
print('Adding Chromatic GP noise prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_chrom_gp_prior, 10)
# Ephemeris prior draw
if 'd_jupiter_mass' in self.param_names:
print('Adding ephemeris model prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_ephem_prior, 10)
# GWB uniform distribution draw
if np.any([('gw' in par and 'log10_A' in par) for par in self.param_names]):
print('Adding GWB uniform distribution draws...\n')
sampler.addProposalToCycle(jp.draw_from_gwb_log_uniform_distribution, 10)
# Dipole uniform distribution draw
if 'dipole_log10_A' in self.param_names:
print('Adding dipole uniform distribution draws...\n')
sampler.addProposalToCycle(jp.draw_from_dipole_log_uniform_distribution, 10)
# Monopole uniform distribution draw
if 'monopole_log10_A' in self.param_names:
print('Adding monopole uniform distribution draws...\n')
sampler.addProposalToCycle(jp.draw_from_monopole_log_uniform_distribution, 10)
# BWM prior draw
if 'bwm_log10_A' in self.param_names:
print('Adding BWM prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_bwm_prior, 10)
# FDM prior draw
if 'fdm_log10_A' in self.param_names:
print('Adding FDM prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_fdm_prior, 10)
# CW prior draw
if 'cw_log10_h' in self.param_names:
print('Adding CW prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_cw_log_uniform_distribution, 10)
# free spectrum prior draw
if np.any(['log10_rho' in par for par in self.param_names]):
print('Adding free spectrum prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_gw_rho_prior, 25)
# Prior distribution draw for parameters named GW
if any([str(p).split(':')[0] for p in list(self.params) if 'gw' in str(p)]):
print('Adding gw param prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_par_prior(
par_names=[str(p).split(':')[0] for
p in list(self.params)
if 'gw' in str(p)]), 10)
# Model index distribution draw
if sample_nmodel:
if 'nmodel' in self.param_names:
print('Adding nmodel uniform distribution draws...\n')
sampler.addProposalToCycle(self.draw_from_nmodel_prior, 25)
return sampler
def get_process_timeseries(self, psr, chain, burn, comp='DM',
mle=False, model=0):
"""
Construct a time series realization of various constrained processes.
:param psr: enterprise pulsar object
:param chain: MCMC chain from sampling all models
:param burn: desired number of initial samples to discard
:param comp: which process to reconstruct? (red noise or DM) [default=DM]
:param mle: create time series from ML of GP hyper-parameters? [default=False]
:param model: which sub-model within the super-model to reconstruct from? [default=0]
:return ret: time-series of the reconstructed process
"""
wave = 0
pta = self.models[model]
model_chain = chain[np.rint(chain[:, -5])==model, :]
# get parameter dictionary
if mle:
ind = np.argmax(model_chain[:, -4])
else:
ind = np.random.randint(burn, model_chain.shape[0])
params = {par: model_chain[ind, ct]
for ct, par in enumerate(self.param_names)
if par in pta.param_names}
# deterministic signal part
wave += pta.get_delay(params=params)[0]
# get linear parameters
# Nvec = pta.get_ndiag(params)[0] # Not currently used in code
phiinv = pta.get_phiinv(params, logdet=False)[0]
T = pta.get_basis(params)[0]
d = pta.get_TNr(params)[0]
TNT = pta.get_TNT(params)[0]
# Red noise piece
Sigma = TNT + (np.diag(phiinv) if phiinv.ndim == 1 else phiinv)
try:
u, s, _ = sl.svd(Sigma)
mn = np.dot(u, np.dot(u.T, d)/s)
Li = u * np.sqrt(1/s)
except np.linalg.LinAlgError:
Q, R = sl.qr(Sigma)
Sigi = sl.solve(R, Q.T)
mn = np.dot(Sigi, d)
u, s, _ = sl.svd(Sigi)
Li = u * np.sqrt(1/s)
b = mn + np.dot(Li, np.random.randn(Li.shape[0]))
# find basis indices
pardict = {}
for sc in pta._signalcollections:
ntot = 0
for sig in sc._signals:
if sig.signal_type == 'basis':
basis = sig.get_basis(params=params)
nb = basis.shape[1]
pardict[sig.signal_name] = np.arange(ntot, nb+ntot)
ntot += nb
# DM quadratic + GP
if comp == 'DM':
idx = pardict['dm_gp']
wave += np.dot(T[:, idx], b[idx])
ret = wave * (psr.freqs**2 * const.DM_K * 1e12)
elif comp == 'scattering':
idx = pardict['scattering_gp']
wave += np.dot(T[:, idx], b[idx])
ret = wave * (psr.freqs**4) # * const.DM_K * 1e12)
elif comp == 'red':
idx = pardict['red noise']
wave += np.dot(T[:, idx], b[idx])
ret = wave
elif comp == 'FD':
idx = pardict['FD']
wave += np.dot(T[:, idx], b[idx])
ret = wave
elif comp == 'all':
wave += np.dot(T, b)
ret = wave
else:
ret = wave
return ret
def summary(self, to_stdout=False):
"""generate summary string for HyperModel, including all PTAs
:param to_stdout: [bool]
print summary to `stdout` instead of returning it
:return: [string]
"""
summary = ""
for ii, pta in self.models.items():
summary += "model " + str(ii) + "\n"
summary += "=" * 9 + "\n\n"
summary += pta.summary()
summary += "=" * 90 + "\n\n"
if to_stdout:
print(summary)
else:
return summary
| 16,731 | 37.200913 | 102 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/gp_kernels.py | # -*- coding: utf-8 -*-
import numpy as np
from enterprise.signals import signal_base, utils
__all__ = ['linear_interp_basis_dm',
'linear_interp_basis_freq',
'dmx_ridge_prior',
'periodic_kernel',
'se_kernel',
'se_dm_kernel',
'get_tf_quantization_matrix',
'tf_kernel',
'sf_kernel',
]
# linear interpolation basis in time with nu^-2 scaling
@signal_base.function
def linear_interp_basis_dm(toas, freqs, dt=30*86400):
# get linear interpolation basis in time
U, avetoas = utils.linear_interp_basis(toas, dt=dt)
# scale with radio frequency
Dm = (1400/freqs)**2
return U * Dm[:, None], avetoas
@signal_base.function
def linear_interp_basis_chromatic(toas, freqs, dt=30*86400, idx=4):
"""Linear interpolation basis in time with nu^-4 scaling"""
# get linear interpolation basis in time
U, avetoas = utils.linear_interp_basis(toas, dt=dt)
# scale with radio frequency
Dm = (1400/freqs)**idx
return U * Dm[:, None], avetoas
@signal_base.function
def linear_interp_basis_freq(freqs, df=64):
"""Linear interpolation in radio frequency"""
return utils.linear_interp_basis(freqs, dt=df)
@signal_base.function
def dmx_ridge_prior(avetoas, log10_sigma=-7):
"""DMX-like signal with Gaussian prior"""
sigma = 10**log10_sigma
return sigma**2 * np.ones_like(avetoas)
@signal_base.function
def periodic_kernel(avetoas, log10_sigma=-7, log10_ell=2,
log10_gam_p=0, log10_p=0):
"""Quasi-periodic kernel for DM"""
r = np.abs(avetoas[None, :] - avetoas[:, None])
# convert units to seconds
sigma = 10**log10_sigma
l = 10**log10_ell * 86400
p = 10**log10_p * 3.16e7
gam_p = 10**log10_gam_p
d = np.eye(r.shape[0]) * (sigma/500)**2
K = sigma**2 * np.exp(-r**2/2/l**2 - gam_p*np.sin(np.pi*r/p)**2) + d
return K
@signal_base.function
def se_kernel(avefreqs, log10_sigma=-7, log10_lam=3):
"""Squared-exponential kernel for FD"""
tm = np.abs(avefreqs[None, :] - avefreqs[:, None])
lam = 10**log10_lam
sigma = 10**log10_sigma
d = np.eye(tm.shape[0]) * (sigma/500)**2
return sigma**2 * np.exp(-tm**2/2/lam) + d
@signal_base.function
def se_dm_kernel(avetoas, log10_sigma=-7, log10_ell=2):
"""Squared-exponential kernel for DM"""
r = np.abs(avetoas[None, :] - avetoas[:, None])
# Convert everything into seconds
l = 10**log10_ell * 86400
sigma = 10**log10_sigma
d = np.eye(r.shape[0]) * (sigma/500)**2
K = sigma**2 * np.exp(-r**2/2/l**2) + d
return K
@signal_base.function
def get_tf_quantization_matrix(toas, freqs, dt=30*86400, df=None, dm=False, dm_idx=2):
"""
Quantization matrix in time and radio frequency to cut down on the kernel
size.
"""
if df is None:
dfs = [(600, 1000), (1000, 1900), (1900, 3000), (3000, 5000)]
else:
fmin = freqs.min()
fmax = freqs.max()
fs = np.arange(fmin, fmax+df, df)
dfs = [(fs[ii], fs[ii+1]) for ii in range(len(fs)-1)]
Us, avetoas, avefreqs, masks = [], [], [], []
for rng in dfs:
mask = np.logical_and(freqs>=rng[0], freqs<rng[1])
if any(mask):
masks.append(mask)
U, _ = utils.create_quantization_matrix(toas[mask],
dt=dt, nmin=1)
avetoa = np.array([toas[mask][idx.astype(bool)].mean()
for idx in U.T])
avefreq = np.array([freqs[mask][idx.astype(bool)].mean()
for idx in U.T])
Us.append(U)
avetoas.append(avetoa)
avefreqs.append(avefreq)
nc = np.sum(U.shape[1] for U in Us)
U = np.zeros((len(toas), nc))
avetoas = np.concatenate(avetoas)
idx = np.argsort(avetoas)
avefreqs = np.concatenate(avefreqs)
nctot = 0
for ct, mask in enumerate(masks):
Umat = Us[ct]
nn = Umat.shape[1]
U[mask, nctot:nn+nctot] = Umat
nctot += nn
if dm:
weights = (1400/freqs)**dm_idx
else:
weights = np.ones_like(freqs)
return U[:, idx] * weights[:, None], {'avetoas': avetoas[idx],
'avefreqs': avefreqs[idx]}
@signal_base.function
def tf_kernel(labels, log10_sigma=-7, log10_ell=2, log10_gam_p=0,
log10_p=0, log10_ell2=4, log10_alpha_wgt=0):
"""
The product of a quasi-periodic time kernel and
a rational-quadratic frequency kernel.
"""
avetoas = labels['avetoas']
avefreqs = labels['avefreqs']
r = np.abs(avetoas[None, :] - avetoas[:, None])
r2 = np.abs(avefreqs[None, :] - avefreqs[:, None])
# convert units to seconds
sigma = 10**log10_sigma
l = 10**log10_ell * 86400
l2 = 10**log10_ell2
p = 10**log10_p * 3.16e7
gam_p = 10**log10_gam_p
alpha_wgt = 10**log10_alpha_wgt
d = np.eye(r.shape[0]) * (sigma/500)**2
Kt = sigma**2 * np.exp(-r**2/2/l**2 - gam_p*np.sin(np.pi*r/p)**2)
Kv = (1+r2**2/2/alpha_wgt/l2**2)**(-alpha_wgt)
return Kt * Kv + d
@signal_base.function
def sf_kernel(labels, log10_sigma=-7, log10_ell=2,
log10_ell2=4, log10_alpha_wgt=0):
"""
The product of a squared-exponential time kernel and
a rational-quadratic frequency kernel.
"""
avetoas = labels['avetoas']
avefreqs = labels['avefreqs']
r = np.abs(avetoas[None, :] - avetoas[:, None])
r2 = np.abs(avefreqs[None, :] - avefreqs[:, None])
# Convert everything into seconds
l = 10**log10_ell * 86400
sigma = 10**log10_sigma
l2 = 10**log10_ell2
alpha_wgt = 10**log10_alpha_wgt
d = np.eye(r.shape[0]) * (sigma/500)**2
Kt = sigma**2 * np.exp(-r**2/2/l**2)
Kv = (1+r2**2/2/alpha_wgt/l2**2)**(-alpha_wgt)
return Kt * Kv + d
| 5,878 | 28.691919 | 86 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/deterministic.py | # -*- coding: utf-8 -*-
import numpy as np
from enterprise import constants as const
from enterprise.signals import (deterministic_signals, parameter, signal_base,
utils)
def fdm_block(Tmin, Tmax, amp_prior='log-uniform', name='fdm',
amp_lower=-18, amp_upper=-11,
freq_lower=-9, freq_upper=-7,
use_fixed_freq=False, fixed_freq=-8):
"""
Returns deterministic fuzzy dark matter model:
1. FDM parameterized by frequency, phase,
and amplitude (mass and DM energy density).
:param Tmin:
Min time to search, probably first TOA (MJD).
:param Tmax:
Max time to search, probably last TOA (MJD).
:param amp_prior:
Prior on log10_A.
:param logmin:
log of minimum FDM amplitude for prior (log10)
:param logmax:
log of maximum FDM amplitude for prior (log10)
:param name:
Name of FDM signal.
:param amp_upper, amp_lower, freq_upper, freq_lower:
The log-space bounds on the amplitude and frequency priors.
:param use_fixed_freq:
Whether to do a fixed-frequency run and not search over the frequency.
:param fixed_freq:
The frequency value to do a fixed-frequency run with.
"""
# BWM parameters
amp_name = '{}_log10_A'.format(name)
log10_A_fdm = parameter.Uniform(amp_lower, amp_upper)(amp_name)
if use_fixed_freq is True:
log10_f_fdm = fixed_freq
if use_fixed_freq is False:
freq_name = '{}_log10_f'.format(name)
log10_f_fdm = parameter.Uniform(freq_lower, freq_upper)(freq_name)
phase_e_name = '{}_phase_e'.format(name)
phase_e_fdm = parameter.Uniform(0, 2*np.pi)(phase_e_name)
phase_p = parameter.Uniform(0, 2*np.pi)
fdm_wf = fdm_delay(log10_A=log10_A_fdm, log10_f=log10_f_fdm,
phase_e=phase_e_fdm, phase_p=phase_p)
fdm = deterministic_signals.Deterministic(fdm_wf, name=name)
return fdm
def cw_block_circ(amp_prior='log-uniform', dist_prior=None,
skyloc=None, log10_fgw=None,
psrTerm=False, tref=0, name='cw'):
"""
Returns deterministic, cirular orbit continuous GW model:
:param amp_prior:
Prior on log10_h. Default is "log-uniform."
Use "uniform" for upper limits, or "None" to search over
log10_dist instead.
:param dist_prior:
Prior on log10_dist. Default is "None," meaning that the
search is over log10_h instead of log10_dist. Use "log-uniform"
to search over log10_h with a log-uniform prior.
:param skyloc:
Fixed sky location of CW signal search as [cos(theta), phi].
Search over sky location if ``None`` given.
:param log10_fgw:
Fixed log10 GW frequency of CW signal search.
Search over GW frequency if ``None`` given.
:param ecc:
Fixed log10 distance to SMBHB search.
Search over distance or strain if ``None`` given.
:param psrTerm:
Boolean for whether to include the pulsar term. Default is False.
:param name:
Name of CW signal.
"""
if dist_prior is None:
log10_dist = None
if amp_prior == 'uniform':
log10_h = parameter.LinearExp(-18.0, -11.0)('{}_log10_h'.format(name))
elif amp_prior == 'log-uniform':
log10_h = parameter.Uniform(-18.0, -11.0)('{}_log10_h'.format(name))
elif dist_prior == 'log-uniform':
log10_dist = parameter.Uniform(-2.0, 4.0)('{}_log10_dL'.format(name))
log10_h = None
# chirp mass [Msol]
log10_Mc = parameter.Uniform(6.0, 10.0)('{}_log10_Mc'.format(name))
# GW frequency [Hz]
if log10_fgw is None:
log10_fgw = parameter.Uniform(-9.0, -7.0)('{}_log10_fgw'.format(name))
else:
log10_fgw = parameter.Constant(log10_fgw)('{}_log10_fgw'.format(name))
# orbital inclination angle [radians]
cosinc = parameter.Uniform(-1.0, 1.0)('{}_cosinc'.format(name))
# initial GW phase [radians]
phase0 = parameter.Uniform(0.0, 2*np.pi)('{}_phase0'.format(name))
# polarization
psi_name = '{}_psi'.format(name)
psi = parameter.Uniform(0, np.pi)(psi_name)
# sky location
costh_name = '{}_costheta'.format(name)
phi_name = '{}_phi'.format(name)
if skyloc is None:
costh = parameter.Uniform(-1, 1)(costh_name)
phi = parameter.Uniform(0, 2*np.pi)(phi_name)
else:
costh = parameter.Constant(skyloc[0])(costh_name)
phi = parameter.Constant(skyloc[1])(phi_name)
if psrTerm:
# orbital phase
p_phase = parameter.Uniform(0, np.pi)
p_dist = parameter.Normal(0, 1)
else:
p_phase = None
p_dist = 0
# continuous wave signal
wf = cw_delay(cos_gwtheta=costh, gwphi=phi, cos_inc=cosinc,
log10_mc=log10_Mc, log10_fgw=log10_fgw,
log10_h=log10_h, log10_dist=log10_dist,
phase0=phase0, psi=psi,
psrTerm=True, p_dist=p_dist, p_phase=p_phase,
phase_approx=True, check=False,
tref=tref)
cw = CWSignal(wf, ecc=False, psrTerm=psrTerm)
return cw
def cw_block_ecc(amp_prior='log-uniform', skyloc=None, log10_F=None,
ecc=None, psrTerm=False, tref=0, name='cw'):
"""
Returns deterministic, eccentric orbit continuous GW model:
:param amp_prior:
Prior on log10_h and log10_Mc/log10_dL. Default is "log-uniform" with
log10_Mc and log10_dL searched over. Use "uniform" for upper limits,
log10_h searched over.
:param skyloc:
Fixed sky location of CW signal search as [cos(theta), phi].
Search over sky location if ``None`` given.
:param log10_F:
Fixed log-10 orbital frequency of CW signal search.
Search over orbital frequency if ``None`` given.
:param ecc:
Fixed eccentricity of SMBHB search.
Search over eccentricity if ``None`` given.
:param psrTerm:
Boolean for whether to include the pulsar term. Default is False.
:param name:
Name of CW signal.
"""
if amp_prior == 'uniform':
log10_h = parameter.LinearExp(-18.0, -11.0)('{}_log10_h'.format(name))
elif amp_prior == 'log-uniform':
log10_h = None
# chirp mass [Msol]
log10_Mc = parameter.Uniform(6.0, 10.0)('{}_log10_Mc'.format(name))
# luminosity distance [Mpc]
log10_dL = parameter.Uniform(-2.0, 4.0)('{}_log10_dL'.format(name))
# orbital frequency [Hz]
if log10_F is None:
log10_Forb = parameter.Uniform(-9.0, -7.0)('{}_log10_Forb'.format(name))
else:
log10_Forb = parameter.Constant(log10_F)('{}_log10_Forb'.format(name))
# orbital inclination angle [radians]
cosinc = parameter.Uniform(-1.0, 1.0)('{}_cosinc'.format(name))
# periapsis position angle [radians]
gamma_0 = parameter.Uniform(0.0, np.pi)('{}_gamma0'.format(name))
# Earth-term eccentricity
if ecc is None:
e_0 = parameter.Uniform(0.0, 0.99)('{}_e0'.format(name))
else:
e_0 = parameter.Constant(ecc)('{}_e0'.format(name))
# initial mean anomaly [radians]
l_0 = parameter.Uniform(0.0, 2.0*np.pi)('{}_l0'.format(name))
# mass ratio = M_2/M_1
q = parameter.Constant(1.0)('{}_q'.format(name))
# polarization
pol_name = '{}_pol'.format(name)
pol = parameter.Uniform(0, np.pi)(pol_name)
# sky location
costh_name = '{}_costheta'.format(name)
phi_name = '{}_phi'.format(name)
if skyloc is None:
costh = parameter.Uniform(-1, 1)(costh_name)
phi = parameter.Uniform(0, 2*np.pi)(phi_name)
else:
costh = parameter.Constant(skyloc[0])(costh_name)
phi = parameter.Constant(skyloc[1])(phi_name)
# continuous wave signal
wf = compute_eccentric_residuals(cos_gwtheta=costh, gwphi=phi,
log10_mc=log10_Mc, log10_dist=log10_dL,
log10_h=log10_h, log10_F=log10_Forb,
cos_inc=cosinc, psi=pol, gamma0=gamma_0,
e0=e_0, l0=l_0, q=q, nmax=400,
pdist=None, pphase=None, pgam=None,
tref=tref, check=False)
cw = CWSignal(wf, ecc=True, psrTerm=psrTerm)
return cw
@signal_base.function
def cw_delay(toas, pos, pdist,
cos_gwtheta=0, gwphi=0, cos_inc=0,
log10_mc=9, log10_fgw=-8, log10_dist=None, log10_h=None,
phase0=0, psi=0,
psrTerm=False, p_dist=1, p_phase=None,
evolve=False, phase_approx=False, check=False,
tref=0):
"""
Function to create GW incuced residuals from a SMBMB as
defined in Ellis et. al 2012,2013.
:param toas:
Pular toas in seconds
:param pos:
Unit vector from the Earth to the pulsar
:param pdist:
Pulsar distance (mean and uncertainty) [kpc]
:param cos_gwtheta:
Cosine of Polar angle of GW source in celestial coords [radians]
:param gwphi:
Azimuthal angle of GW source in celestial coords [radians]
:param cos_inc:
cosine of Inclination of GW source [radians]
:param log10_mc:
log10 of Chirp mass of SMBMB [solar masses]
:param log10_fgw:
log10 of Frequency of GW (twice the orbital frequency) [Hz]
:param log10_dist:
log10 of Luminosity distance to SMBMB [Mpc],
used to compute strain, if not None
:param log10_h:
log10 of GW strain,
used to compute distance, if not None
:param phase0:
Initial GW phase of source [radians]
:param psi:
Polarization angle of GW source [radians]
:param psrTerm:
Option to include pulsar term [boolean]
:param p_dist:
Pulsar distance parameter
:param p_phase:
Use pulsar phase to determine distance [radian]
:param evolve:
Option to include/exclude full evolution [boolean]
:param phase_approx:
Option to include/exclude phase evolution across observation time
[boolean]
:param check:
Check if frequency evolves significantly over obs. time [boolean]
:param tref:
Reference time for phase and frequency [s]
:return: Vector of induced residuals
"""
# convert units to time
mc = 10**log10_mc * const.Tsun
fgw = 10**log10_fgw
gwtheta = np.arccos(cos_gwtheta)
inc = np.arccos(cos_inc)
p_dist = (pdist[0] + pdist[1]*p_dist)*const.kpc/const.c
if log10_h is None and log10_dist is None:
raise ValueError("one of log10_dist or log10_h must be non-None")
elif log10_h is not None and log10_dist is not None:
raise ValueError("only one of log10_dist or log10_h can be non-None")
elif log10_h is None:
dist = 10**log10_dist * const.Mpc / const.c
else:
dist = 2 * mc**(5/3) * (np.pi*fgw)**(2/3) / 10**log10_h
if check:
# check that frequency is not evolving significantly over obs. time
fstart = fgw * (1 - 256/5 * mc**(5/3) * fgw**(8/3) * toas[0])**(-3/8)
fend = fgw * (1 - 256/5 * mc**(5/3) * fgw**(8/3) * toas[-1])**(-3/8)
df = fend - fstart
# observation time
Tobs = toas.max()-toas.min()
fbin = 1/Tobs
if np.abs(df) > fbin:
print('WARNING: Frequency is evolving over more than one '
'frequency bin.')
print('f0 = {0}, f1 = {1}, df = {2}, fbin = {3}'.format(fstart, fend, df, fbin))
return np.ones(len(toas)) * np.nan
# get antenna pattern funcs and cosMu
# write function to get pos from theta,phi
fplus, fcross, cosMu = utils.create_gw_antenna_pattern(pos, gwtheta, gwphi)
# get pulsar time
toas -= tref
if p_dist > 0:
tp = toas-p_dist*(1-cosMu)
else:
tp = toas
# orbital frequency
w0 = np.pi * fgw
phase0 /= 2 # convert GW to orbital phase
# omegadot = 96/5 * mc**(5/3) * w0**(11/3) # Not currently used in code
# evolution
if evolve:
# calculate time dependent frequency at earth and pulsar
omega = w0 * (1 - 256/5 * mc**(5/3) * w0**(8/3) * toas)**(-3/8)
omega_p = w0 * (1 - 256/5 * mc**(5/3) * w0**(8/3) * tp)**(-3/8)
if p_dist > 0:
omega_p0 = w0 * (1 + 256/5
* mc**(5/3) * w0**(8/3) * p_dist*(1-cosMu))**(-3/8)
else:
omega_p0 = w0
# calculate time dependent phase
phase = phase0 + 1/32/mc**(5/3) * (w0**(-5/3) - omega**(-5/3))
if p_phase is None:
phase_p = phase0 + 1/32/mc**(5/3) * (w0**(-5/3) - omega_p**(-5/3))
else:
phase_p = (phase0 + p_phase
+ 1/32*mc**(-5/3) * (omega_p0**(-5/3) - omega_p**(-5/3)))
elif phase_approx:
# monochromatic
omega = w0
if p_dist > 0:
omega_p = w0 * (1 + 256/5
* mc**(5/3) * w0**(8/3) * p_dist*(1-cosMu))**(-3/8)
else:
omega_p = w0
# phases
phase = phase0 + omega * toas
if p_phase is not None:
phase_p = phase0 + p_phase + omega_p*toas
else:
phase_p = (phase0 + omega_p*toas
+ 1/32/mc**(5/3) * (w0**(-5/3) - omega_p**(-5/3)))
# no evolution
else:
# monochromatic
omega = np.pi*fgw
omega_p = omega
# phases
phase = phase0 + omega * toas
phase_p = phase0 + omega * tp
# define time dependent coefficients
At = -0.5*np.sin(2*phase)*(3+np.cos(2*inc))
Bt = 2*np.cos(2*phase)*np.cos(inc)
At_p = -0.5*np.sin(2*phase_p)*(3+np.cos(2*inc))
Bt_p = 2*np.cos(2*phase_p)*np.cos(inc)
# now define time dependent amplitudes
alpha = mc**(5./3.)/(dist*omega**(1./3.))
alpha_p = mc**(5./3.)/(dist*omega_p**(1./3.))
# define rplus and rcross
rplus = alpha*(-At*np.cos(2*psi)+Bt*np.sin(2*psi))
rcross = alpha*(At*np.sin(2*psi)+Bt*np.cos(2*psi))
rplus_p = alpha_p*(-At_p*np.cos(2*psi)+Bt_p*np.sin(2*psi))
rcross_p = alpha_p*(At_p*np.sin(2*psi)+Bt_p*np.cos(2*psi))
# residuals
if psrTerm:
res = fplus*(rplus_p-rplus)+fcross*(rcross_p-rcross)
else:
res = -fplus*rplus - fcross*rcross
return res
@signal_base.function
def bwm_delay(toas, pos, log10_h=-14.0, cos_gwtheta=0.0, gwphi=0.0, gwpol=0.0, t0=55000,
antenna_pattern_fn=None):
"""
Function that calculates the earth-term gravitational-wave
burst-with-memory signal, as described in:
Seto et al, van haasteren and Levin, phsirkov et al, Cordes and Jenet.
This version uses the F+/Fx polarization modes, as verified with the
Continuous Wave and Anisotropy papers.
:param toas: Time-of-arrival measurements [s]
:param pos: Unit vector from Earth to pulsar
:param log10_h: log10 of GW strain
:param cos_gwtheta: Cosine of GW polar angle
:param gwphi: GW azimuthal polar angle [rad]
:param gwpol: GW polarization angle
:param t0: Burst central time [day]
:param antenna_pattern_fn:
User defined function that takes `pos`, `gwtheta`, `gwphi` as
arguments and returns (fplus, fcross)
:return: the waveform as induced timing residuals (seconds)
"""
# convert
h = 10 ** log10_h
gwtheta = np.arccos(cos_gwtheta)
t0 *= const.day
# antenna patterns
if antenna_pattern_fn is None:
apc = utils.create_gw_antenna_pattern(pos, gwtheta, gwphi)
else:
apc = antenna_pattern_fn(pos, gwtheta, gwphi)
# grab fplus, fcross
fp, fc = apc[0], apc[1]
# combined polarization
pol = np.cos(2 * gwpol) * fp + np.sin(2 * gwpol) * fc
# Return the time-series for the pulsar
return pol * h * np.heaviside(toas - t0, 0.5) * (toas - t0)
@signal_base.function
def bwm_sglpsr_delay(toas, sign, log10_A=-15, t0=55000):
"""
Function that calculates the earth-term gravitational-wave
burst-with-memory signal for an optimally oriented source in a single pulsar
:param toas: Time-of-arrival measurements [s]
:param log10_A: log10 of the amplitude of the ramp (delta_f/f)
:param t0: Burst central time [day]
:return: the waveform as induced timing residuals (seconds)
"""
A = 10 ** log10_A
t0 *= const.day
# Return the time-series for the pulsar
def heaviside(x):
return 0.5 * (np.sign(x) + 1)
# return 0 #Fix the return to 0 in order to test what the heck is wrong with red noise detection in bwm
return A * np.sign(sign) * heaviside(toas - t0) * (toas - t0)
@signal_base.function
def compute_eccentric_residuals(toas, theta, phi, cos_gwtheta, gwphi,
log10_mc, log10_dist, log10_h, log10_F, cos_inc,
psi, gamma0, e0, l0, q, nmax=400, pdist=1.0,
pphase=None, pgam=None, psrTerm=False,
tref=0, check=False):
"""
Simulate GW from eccentric SMBHB. Waveform models from
Taylor et al. (2015) and Barack and Cutler (2004).
WARNING: This residual waveform is only accurate if the
GW frequency is not significantly evolving over the
observation time of the pulsar.
:param toa: pulsar observation times
:param theta: polar coordinate of pulsar
:param phi: azimuthal coordinate of pulsar
:param gwtheta: Polar angle of GW source in celestial coords [radians]
:param gwphi: Azimuthal angle of GW source in celestial coords [radians]
:param log10_mc: Base-10 lof of chirp mass of SMBMB [solar masses]
:param log10_dist: Base-10 uminosity distance to SMBMB [Mpc]
:param log10_F: base-10 orbital frequency of SMBHB [Hz]
:param inc: Inclination of GW source [radians]
:param psi: Polarization of GW source [radians]
:param gamma0: Initial angle of periastron [radians]
:param e0: Initial eccentricity of SMBHB
:param l0: Initial mean anomoly [radians]
:param q: Mass ratio of SMBHB
:param nmax: Number of harmonics to use in waveform decomposition
:param pdist: Pulsar distance [kpc]
:param pphase: Pulsar phase [rad]
:param pgam: Pulsar angle of periastron [rad]
:param psrTerm: Option to include pulsar term [boolean]
:param tref: Fidicuial time at which initial parameters are referenced [s]
:param check: Check if frequency evolves significantly over obs. time
:returns: Vector of induced residuals
"""
# convert from sampling
F = 10.0**log10_F
mc = 10.0**log10_mc
dist = 10.0**log10_dist
if log10_h is not None:
h0 = 10.0**log10_h
else:
h0 = None
inc = np.arccos(cos_inc)
gwtheta = np.arccos(cos_gwtheta)
# define variable for later use
cosgwtheta, cosgwphi = np.cos(gwtheta), np.cos(gwphi)
singwtheta, singwphi = np.sin(gwtheta), np.sin(gwphi)
sin2psi, cos2psi = np.sin(2*psi), np.cos(2*psi)
# unit vectors to GW source
m = np.array([singwphi, -cosgwphi, 0.0])
n = np.array([-cosgwtheta*cosgwphi, -cosgwtheta*singwphi, singwtheta])
omhat = np.array([-singwtheta*cosgwphi, -singwtheta*singwphi, -cosgwtheta])
# pulsar position vector
phat = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi),
np.cos(theta)])
fplus = 0.5 * (np.dot(m, phat)**2 - np.dot(n, phat)**2) / (1+np.dot(omhat, phat))
fcross = (np.dot(m, phat)*np.dot(n, phat)) / (1 + np.dot(omhat, phat))
cosMu = -np.dot(omhat, phat)
# get values from pulsar object
toas = toas.copy() - tref
if check:
# check that frequency is not evolving significantly over obs. time
y = utils.solve_coupled_ecc_solution(F, e0, gamma0, l0, mc, q,
np.array([0.0, toas.max()]))
# initial and final values over observation time
Fc0, ec0, gc0, phic0 = y[0, :]
Fc1, ec1, gc1, phic1 = y[-1, :]
# observation time
Tobs = 1/(toas.max()-toas.min())
if np.abs(Fc0-Fc1) > 1/Tobs:
print('WARNING: Frequency is evolving over more than one frequency bin.')
print('F0 = {0}, F1 = {1}, delta f = {2}'.format(Fc0, Fc1, 1/Tobs))
return np.ones(len(toas)) * np.nan
# get gammadot for earth term
gammadot = utils.get_gammadot(F, mc, q, e0)
# get number of harmonics to use
if not isinstance(nmax, int):
if e0 < 0.999 and e0 > 0.001:
nharm = int(nmax(e0))
elif e0 < 0.001:
nharm = 2
else:
nharm = int(nmax(0.999))
else:
nharm = nmax
# no more than 100 harmonics
nharm = min(nharm, 100)
##### earth term #####
splus, scross = utils.calculate_splus_scross(nmax=nharm, mc=mc, dl=dist,
h0=h0, F=F, e=e0, t=toas.copy(),
l0=l0, gamma=gamma0,
gammadot=gammadot, inc=inc)
##### pulsar term #####
if psrTerm:
# pulsar distance
pd = pdist
# convert units
pd *= const.kpc / const.c
# get pulsar time
tp = toas.copy() - pd * (1-cosMu)
# solve coupled system of equations to get pulsar term values
y = utils.solve_coupled_ecc_solution(F, e0, gamma0, l0, mc,
q, np.array([0.0, tp.min()]))
# get pulsar term values
if np.any(y):
Fp, ep, gp, phip = y[-1, :]
# get gammadot at pulsar term
gammadotp = utils.get_gammadot(Fp, mc, q, ep)
# get phase at pulsar
if pphase is None:
lp = phip
else:
lp = pphase
# get angle of periastron at pulsar
if pgam is None:
gp = gp
else:
gp = pgam
# get number of harmonics to use
if not isinstance(nmax, int):
if e0 < 0.999 and e0 > 0.001:
nharm = int(nmax(e0))
elif e0 < 0.001:
nharm = 2
else:
nharm = int(nmax(0.999))
else:
nharm = nmax
# no more than 1000 harmonics
nharm = min(nharm, 100)
splusp, scrossp = utils.calculate_splus_scross(nmax=nharm, mc=mc,
dl=dist, h0=h0,
F=Fp, e=ep,
t=toas.copy(),
l0=lp, gamma=gp,
gammadot=gammadotp,
inc=inc)
rr = (fplus*cos2psi - fcross*sin2psi) * (splusp - splus) + \
(fplus*sin2psi + fcross*cos2psi) * (scrossp - scross)
else:
rr = np.ones(len(toas)) * np.nan
else:
rr = - (fplus*cos2psi - fcross*sin2psi) * splus - \
(fplus*sin2psi + fcross*cos2psi) * scross
return rr
def CWSignal(cw_wf, ecc=False, psrTerm=False, name='cw'):
BaseClass = deterministic_signals.Deterministic(cw_wf, name=name)
class CWSignal(BaseClass):
def __init__(self, psr):
super(CWSignal, self).__init__(psr)
self._wf[''].add_kwarg(psrTerm=psrTerm)
if ecc:
pgam = parameter.Uniform(0, 2*np.pi)('_'.join([psr.name,
'pgam',
name]))
self._params['pgam'] = pgam
self._wf['']._params['pgam'] = pgam
return CWSignal
@signal_base.function
def generalized_gwpol_psd(f, log10_A_tt=-15, log10_A_st=-15,
log10_A_vl=-15, log10_A_sl=-15,
kappa=10/3, p_dist=1.0):
"""
PSD for a generalized mixture of scalar+vector dipole radiation
and tensorial quadrupole radiation from SMBHBs.
"""
df = np.diff(np.concatenate((np.array([0]), f[::2])))
euler_e = 0.5772156649
pdist = p_dist * const.kpc / const.c
orf_aa_tt = (2/3) * np.ones(len(f))
orf_aa_st = (2/3) * np.ones(len(f))
orf_aa_vl = 2*np.log(4*np.pi*f*pdist) - 14/3 + 2*euler_e
orf_aa_sl = np.pi**2*f*pdist/4 - \
np.log(4*np.pi*f*pdist) + 37/24 - euler_e
prefactor = (1 + kappa**2) / (1 + kappa**2 * (f / const.fyr)**(-2/3))
gwpol_amps = 10**(2*np.array([log10_A_tt, log10_A_st,
log10_A_vl, log10_A_sl]))
gwpol_factors = np.array([orf_aa_tt*gwpol_amps[0],
orf_aa_st*gwpol_amps[1],
orf_aa_vl*gwpol_amps[2],
orf_aa_sl*gwpol_amps[3]])
S_psd = prefactor * (gwpol_factors[0, :] * (f / const.fyr)**(-4/3) +
np.sum(gwpol_factors[1:, :], axis=0) *
(f / const.fyr)**(-2)) / \
(8*np.pi**2*f**3)
return S_psd * np.repeat(df, 2)
@signal_base.function
def fdm_delay(toas, log10_A, log10_f, phase_e, phase_p):
"""
Function that calculates the earth-term gravitational-wave
fuzzy dark matter signal, as described in:
Kato et al. (2020).
:param toas: Time-of-arrival measurements [s]
:param log10_A: log10 of GW strain
:param log10_f: log10 of GW frequency
:param phase_e: The Earth-term phase of the GW
:param phase_p: The Pulsar-term phase of the GW
:return: the waveform as induced timing residuals (seconds)
"""
# convert
A = 10 ** log10_A
f = 10 ** log10_f
# Return the time-series for the pulsar
return - A / (2 * np.pi * f) * (np.sin(2 * np.pi * f * toas + phase_e) - np.sin(2 * np.pi * f * toas + phase_p))
| 26,111 | 34.334235 | 116 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/sampler.py | # -*- coding: utf-8 -*-
import glob
import os
import pickle
import platform
import healpy as hp
import numpy as np
from PTMCMCSampler import __version__ as __vPTMCMC__
from PTMCMCSampler.PTMCMCSampler import PTSampler as ptmcmc
from enterprise_extensions import __version__
from enterprise_extensions.empirical_distr import (EmpiricalDistribution1D,
EmpiricalDistribution1DKDE,
EmpiricalDistribution2D,
EmpiricalDistribution2DKDE)
def extend_emp_dists(pta, emp_dists, npoints=100_000, save_ext_dists=False, outdir='./chains'):
new_emp_dists = []
modified = False # check if anything was changed
for emp_dist in emp_dists:
if isinstance(emp_dist, EmpiricalDistribution2D) or isinstance(emp_dist, EmpiricalDistribution2DKDE):
# check if we need to extend the distribution
prior_ok=True
for ii, (param, nbins) in enumerate(zip(emp_dist.param_names, emp_dist._Nbins)):
param_names = [par.name for par in pta.params]
if param not in param_names: # skip if one of the parameters isn't in our PTA object
short_par = '_'.join(param.split('_')[:-1]) # make sure we aren't skipping priors with size!=None
if short_par in param_names:
param = short_par
else:
continue
# check 2 conditions on both params to make sure that they cover their priors
# skip if emp dist already covers the prior
param_idx = param_names.index(param)
if pta.params[param_idx].type not in ['uniform', 'normal']:
msg = '{} cannot be covered automatically by the empirical distribution\n'.format(pta.params[param_idx].prior)
msg += 'Please check that your prior is covered by the empirical distribution.\n'
print(msg)
continue
elif pta.params[param_idx].type == 'uniform':
prior_min = pta.params[param_idx].prior._defaults['pmin']
prior_max = pta.params[param_idx].prior._defaults['pmax']
elif pta.params[param_idx].type == 'normal':
prior_min = pta.params[param_idx].prior._defaults['mu'] - 10 * pta.params[param_idx].prior._defaults['sigma']
prior_max = pta.params[param_idx].prior._defaults['mu'] + 10 * pta.params[param_idx].prior._defaults['sigma']
# no need to extend if histogram edges are already prior min/max
if isinstance(emp_dist, EmpiricalDistribution2D):
if not (emp_dist._edges[ii][0] == prior_min and emp_dist._edges[ii][-1] == prior_max):
prior_ok = False
continue
elif isinstance(emp_dist, EmpiricalDistribution2DKDE):
if not (emp_dist.minvals[ii] == prior_min and emp_dist.maxvals[ii] == prior_max):
prior_ok=False
continue
if prior_ok:
new_emp_dists.append(emp_dist)
continue
modified = True
samples = np.zeros((npoints, emp_dist.draw().shape[0]))
for ii in range(npoints): # generate samples from old emp dist
samples[ii] = emp_dist.draw()
new_bins = []
minvals = []
maxvals = []
idxs_to_remove = []
for ii, (param, nbins) in enumerate(zip(emp_dist.param_names, emp_dist._Nbins)):
param_idx = param_names.index(param)
if pta.params[param_idx].type == 'uniform':
prior_min = pta.params[param_idx].prior._defaults['pmin']
prior_max = pta.params[param_idx].prior._defaults['pmax']
elif pta.params[param_idx].type == 'normal':
prior_min = pta.params[param_idx].prior._defaults['mu'] - 10 * pta.params[param_idx].prior._defaults['sigma']
prior_max = pta.params[param_idx].prior._defaults['mu'] + 10 * pta.params[param_idx].prior._defaults['sigma']
# drop samples that are outside the prior range (in case prior is smaller than samples)
if isinstance(emp_dist, EmpiricalDistribution2D):
samples[(samples[:, ii] < prior_min) | (samples[:, ii] > prior_max), ii] = -np.inf
elif isinstance(emp_dist, EmpiricalDistribution2DKDE):
idxs_to_remove.extend(np.arange(npoints)[(samples[:, ii] < prior_min) | (samples[:, ii] > prior_max)])
minvals.append(prior_min)
maxvals.append(prior_max)
# new distribution with more bins this time to extend it all the way out in same style as above.
new_bins.append(np.linspace(prior_min, prior_max, nbins + 40))
samples = np.delete(samples, idxs_to_remove, axis=0)
if isinstance(emp_dist, EmpiricalDistribution2D):
new_emp = EmpiricalDistribution2D(emp_dist.param_names, samples.T, new_bins)
elif isinstance(emp_dist, EmpiricalDistribution2DKDE):
# new distribution with more bins this time to extend it all the way out in same style as above.
new_emp = EmpiricalDistribution2DKDE(emp_dist.param_names, samples.T, minvals=minvals, maxvals=maxvals, nbins=nbins+40, bandwidth=emp_dist.bandwidth)
new_emp_dists.append(new_emp)
elif isinstance(emp_dist, EmpiricalDistribution1D) or isinstance(emp_dist, EmpiricalDistribution1DKDE):
param_names = [par.name for par in pta.params]
if emp_dist.param_name not in param_names: # skip if one of the parameters isn't in our PTA object
short_par = '_'.join(emp_dist.param_name.split('_')[:-1]) # make sure we aren't skipping priors with size!=None
if short_par in param_names:
param = short_par
else:
continue
else:
param = emp_dist.param_name
param_idx = param_names.index(param)
if pta.params[param_idx].type not in ['uniform', 'normal']:
msg = 'This prior cannot be covered automatically by the empirical distribution\n'
msg += 'Please check that your prior is covered by the empirical distribution.\n'
print(msg)
continue
if pta.params[param_idx].type == 'uniform':
prior_min = pta.params[param_idx].prior._defaults['pmin']
prior_max = pta.params[param_idx].prior._defaults['pmax']
elif pta.params[param_idx].type == 'uniform':
prior_min = pta.params[param_idx].prior._defaults['mu'] - 10 * pta.params[param_idx].prior._defaults['sigma']
prior_max = pta.params[param_idx].prior._defaults['mu'] + 10 * pta.params[param_idx].prior._defaults['sigma']
# check 2 conditions on param to make sure that it covers the prior
# skip if emp dist already covers the prior
if isinstance(emp_dist, EmpiricalDistribution1D):
if emp_dist._edges[0] == prior_min and emp_dist._edges[-1] == prior_max:
new_emp_dists.append(emp_dist)
continue
elif isinstance(emp_dist, EmpiricalDistribution1DKDE):
if emp_dist.minval == prior_min and emp_dist.maxval == prior_max:
new_emp_dists.append(emp_dist)
continue
modified = True
samples = np.zeros((npoints, 1))
for ii in range(npoints): # generate samples from old emp dist
samples[ii] = emp_dist.draw()
new_bins = []
idxs_to_remove = []
# drop samples that are outside the prior range (in case prior is smaller than samples)
if isinstance(emp_dist, EmpiricalDistribution1D):
samples[(samples < prior_min) | (samples > prior_max)] = -np.inf
elif isinstance(emp_dist, EmpiricalDistribution1DKDE):
idxs_to_remove.extend(np.arange(npoints)[(samples.squeeze() < prior_min) | (samples.squeeze() > prior_max)])
samples = np.delete(samples, idxs_to_remove, axis=0)
new_bins = np.linspace(prior_min, prior_max, emp_dist._Nbins + 40)
if isinstance(emp_dist, EmpiricalDistribution1D):
new_emp = EmpiricalDistribution1D(emp_dist.param_name, samples, new_bins)
elif isinstance(emp_dist, EmpiricalDistribution1DKDE):
new_emp = EmpiricalDistribution1DKDE(emp_dist.param_name, samples,
minval=prior_min, maxval=prior_max,
bandwidth=emp_dist.bandwidth)
new_emp_dists.append(new_emp)
else:
print('Unable to extend class of unknown type to the edges of the priors.')
new_emp_dists.append(emp_dist)
continue
if save_ext_dists and modified: # if user wants to save them, and they have been modified...
with open(outdir + '/new_emp_dists.pkl', 'wb') as f:
pickle.dump(new_emp_dists, f)
return new_emp_dists
class JumpProposal(object):
def __init__(self, pta, snames=None, empirical_distr=None, f_stat_file=None, save_ext_dists=False, outdir='./chains'):
"""Set up some custom jump proposals"""
self.params = pta.params
self.pnames = pta.param_names
self.psrnames = pta.pulsars
self.ndim = sum(p.size or 1 for p in pta.params)
self.plist = [p.name for p in pta.params]
# parameter map
self.pmap = {}
ct = 0
for p in pta.params:
size = p.size or 1
self.pmap[str(p)] = slice(ct, ct+size)
ct += size
# parameter indices map
self.pimap = {}
for ct, p in enumerate(pta.param_names):
self.pimap[p] = ct
# collecting signal parameters across pta
if snames is None:
allsigs = np.hstack([[qq.signal_name for qq in pp._signals]
for pp in pta._signalcollections])
self.snames = dict.fromkeys(np.unique(allsigs))
for key in self.snames:
self.snames[key] = []
for sc in pta._signalcollections:
for signal in sc._signals:
self.snames[signal.signal_name].extend(signal.params)
for key in self.snames:
self.snames[key] = list(set(self.snames[key]))
else:
self.snames = snames
# empirical distributions
if isinstance(empirical_distr, list):
# check if a list of emp dists is provided
self.empirical_distr = empirical_distr
# check if a directory of empirical dist pkl files are provided
elif empirical_distr is not None and os.path.isdir(empirical_distr):
dir_files = glob.glob(empirical_distr+'*.pkl') # search for pkls
pickled_distr = np.array([])
for idx, emp_file in enumerate(dir_files):
try:
with open(emp_file, 'rb') as f:
pickled_distr = np.append(pickled_distr, pickle.load(f))
except:
try:
with open(emp_file, 'rb') as f:
pickled_distr = np.append(pickled_distr, pickle.load(f))
except:
print(f'\nI can\'t open the empirical distribution pickle file at location {idx} in list!')
print("Empirical distributions set to 'None'")
pickled_distr = None
break
self.empirical_distr = pickled_distr
# check if single pkl file provided
elif empirical_distr is not None and os.path.isfile(empirical_distr): # checking for single file
try:
# try opening the file
with open(empirical_distr, 'rb') as f:
pickled_distr = pickle.load(f)
except:
# second attempt at opening the file
try:
with open(empirical_distr, 'rb') as f:
pickled_distr = pickle.load(f)
# if the second attempt fails...
except:
print('\nI can\'t open the empirical distribution pickle file!')
pickled_distr = None
self.empirical_distr = pickled_distr
# all other cases - emp dists set to None
else:
self.empirical_distr = None
if self.empirical_distr is not None:
# only save the empirical distributions for parameters that are in the model
mask = []
for idx, d in enumerate(self.empirical_distr):
if d.ndim == 1:
if d.param_name in pta.param_names:
mask.append(idx)
else:
if d.param_names[0] in pta.param_names and d.param_names[1] in pta.param_names:
mask.append(idx)
if len(mask) >= 1:
self.empirical_distr = [self.empirical_distr[m] for m in mask]
# extend empirical_distr here:
print('Extending empirical distributions to priors...\n')
self.empirical_distr = extend_emp_dists(pta, self.empirical_distr, npoints=100_000,
save_ext_dists=save_ext_dists, outdir=outdir)
else:
self.empirical_distr = None
if empirical_distr is not None and self.empirical_distr is None:
# if an emp dist path is provided, but fails the code, this helpful msg is provided
print("Adding empirical distributions failed!! Empirical distributions set to 'None'\n")
# F-statistic map
if f_stat_file is not None and os.path.isfile(f_stat_file):
npzfile = np.load(f_stat_file)
self.fe_freqs = npzfile['freqs']
self.fe = npzfile['fe']
def draw_from_prior(self, x, iter, beta):
"""Prior draw.
The function signature is specific to PTMCMCSampler.
"""
q = x.copy()
lqxy = 0
# randomly choose parameter
param = np.random.choice(self.params)
# if vector parameter jump in random component
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_red_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
signal_name = 'red noise'
# draw parameter from signal model
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_empirical_distr(self, x, iter, beta):
q = x.copy()
lqxy = 0
if self.empirical_distr is not None:
# randomly choose one of the empirical distributions
distr_idx = np.random.randint(0, len(self.empirical_distr))
if self.empirical_distr[distr_idx].ndim == 1:
idx = self.pnames.index(self.empirical_distr[distr_idx].param_name)
q[idx] = self.empirical_distr[distr_idx].draw()
lqxy = (self.empirical_distr[distr_idx].logprob(x[idx]) -
self.empirical_distr[distr_idx].logprob(q[idx]))
dist = self.empirical_distr[distr_idx]
# if we fall outside the emp distr support, pull from prior instead
if x[idx] < dist._edges[0] or x[idx] > dist._edges[-1]:
q, lqxy = self.draw_from_prior(x, iter, beta)
else:
dist = self.empirical_distr[distr_idx]
oldsample = [x[self.pnames.index(p)] for p in dist.param_names]
newsample = dist.draw()
lqxy = (dist.logprob(oldsample) - dist.logprob(newsample))
for p, n in zip(dist.param_names, newsample):
q[self.pnames.index(p)] = n
# if we fall outside the emp distr support, pull from prior instead
for ii in range(len(oldsample)):
if oldsample[ii] < dist._edges[ii][0] or oldsample[ii] > dist._edges[ii][-1]:
q, lqxy = self.draw_from_prior(x, iter, beta)
return q, float(lqxy)
def draw_from_psr_empirical_distr(self, x, iter, beta):
q = x.copy()
lqxy = 0
if self.empirical_distr is not None:
# make list of empirical distributions with psr name
psr = np.random.choice(self.psrnames)
pnames = [ed.param_name if ed.ndim==1 else ed.param_names
for ed in self.empirical_distr]
# Retrieve indices of emp dists with pulsar pars.
idxs = []
for par in pnames:
if isinstance(par, str):
if psr in par:
idxs.append(pnames.index(par))
elif isinstance(par, list):
if any([psr in p for p in par]):
idxs.append(pnames.index(par))
for idx in idxs:
if self.empirical_distr[idx].ndim == 1:
pidx = self.pimap[self.empirical_distr[idx].param_name]
q[pidx] = self.empirical_distr[idx].draw()
lqxy += (self.empirical_distr[idx].logprob(x[pidx]) -
self.empirical_distr[idx].logprob(q[pidx]))
else:
oldsample = [x[self.pnames.index(p)]
for p in self.empirical_distr[idx].param_names]
newsample = self.empirical_distr[idx].draw()
for p, n in zip(self.empirical_distr[idx].param_names, newsample):
q[self.pnames.index(p)] = n
lqxy += (self.empirical_distr[idx].logprob(oldsample) -
self.empirical_distr[idx].logprob(newsample))
return q, float(lqxy)
def draw_from_dm_gp_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
signal_name = 'dm_gp'
# draw parameter from signal model
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_dm1yr_prior(self, x, iter, beta):
q = x.copy()
dm1yr_names = [dmname for dmname in self.pnames if 'dm_s1yr' in dmname]
dmname = np.random.choice(dm1yr_names)
idx = self.pnames.index(dmname)
if 'log10_Amp' in dmname:
q[idx] = np.random.uniform(-10, -2)
elif 'phase' in dmname:
q[idx] = np.random.uniform(0, 2*np.pi)
return q, 0
def draw_from_dmexpdip_prior(self, x, iter, beta):
q = x.copy()
dmexp_names = [dmname for dmname in self.pnames if 'dmexp' in dmname]
dmname = np.random.choice(dmexp_names)
idx = self.pnames.index(dmname)
if 'log10_Amp' in dmname:
q[idx] = np.random.uniform(-10, -2)
elif 'log10_tau' in dmname:
q[idx] = np.random.uniform(0, 2.5)
elif 'sign_param' in dmname:
q[idx] = np.random.uniform(-1.0, 1.0)
return q, 0
def draw_from_dmexpcusp_prior(self, x, iter, beta):
q = x.copy()
dmexp_names = [dmname for dmname in self.pnames if 'dm_cusp' in dmname]
dmname = np.random.choice(dmexp_names)
idx = self.pnames.index(dmname)
if 'log10_Amp' in dmname:
q[idx] = np.random.uniform(-10, -2)
elif 'log10_tau' in dmname:
q[idx] = np.random.uniform(0, 2.5)
# elif 't0' in dmname:
# q[idx] = np.random.uniform(53393.0, 57388.0)
elif 'sign_param' in dmname:
q[idx] = np.random.uniform(-1.0, 1.0)
return q, 0
def draw_from_dmx_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
signal_name = 'dmx_signal'
# draw parameter from signal model
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_chrom_gp_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
signal_name = 'chrom_gp'
# draw parameter from signal model
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_gwb_log_uniform_distribution(self, x, iter, beta):
q = x.copy()
lqxy = 0
# draw parameter from signal model
signal_name = [par for par in self.pnames
if ('gw' in par and 'log10_A' in par)][0]
idx = list(self.pnames).index(signal_name)
param = self.params[idx]
q[self.pmap[str(param)]] = np.random.uniform(param.prior._defaults['pmin'], param.prior._defaults['pmax'])
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_dipole_log_uniform_distribution(self, x, iter, beta):
q = x.copy()
# draw parameter from signal model
idx = self.pnames.index('dipole_log10_A')
q[idx] = np.random.uniform(-18, -11)
return q, 0
def draw_from_monopole_log_uniform_distribution(self, x, iter, beta):
q = x.copy()
# draw parameter from signal model
idx = self.pnames.index('monopole_log10_A')
q[idx] = np.random.uniform(-18, -11)
return q, 0
def draw_from_altpol_log_uniform_distribution(self, x, iter, beta):
q = x.copy()
# draw parameter from signal model
polnames = [pol for pol in self.pnames if 'log10Apol' in pol]
if 'kappa' in self.pnames:
polnames.append('kappa')
pol = np.random.choice(polnames)
idx = self.pnames.index(pol)
if pol == 'log10Apol_tt':
q[idx] = np.random.uniform(-18, -12)
elif pol == 'log10Apol_st':
q[idx] = np.random.uniform(-18, -12)
elif pol == 'log10Apol_vl':
q[idx] = np.random.uniform(-18, -15)
elif pol == 'log10Apol_sl':
q[idx] = np.random.uniform(-18, -16)
elif pol == 'kappa':
q[idx] = np.random.uniform(0, 10)
return q, 0
def draw_from_ephem_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
signal_name = 'phys_ephem'
# draw parameter from signal model
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_bwm_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
signal_name = 'bwm'
# draw parameter from signal model
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_fdm_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
signal_name = 'fdm'
# draw parameter from signal model
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_cw_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
signal_name = 'cw'
# draw parameter from signal model
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_cw_log_uniform_distribution(self, x, iter, beta):
q = x.copy()
# draw parameter from signal model
idx = self.pnames.index('log10_h')
q[idx] = np.random.uniform(-18, -11)
return q, 0
def draw_from_dm_sw_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
signal_name = 'gp_sw'
# draw parameter from signal model
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_gw_rho_prior(self, x, iter, beta):
"""
Jump proposals on free spec
"""
q = x.copy()
lqxy = 0
# draw parameter from signal model
parnames = [par.name for par in self.params]
pname = [pnm for pnm in parnames
if ('gw' in pnm and 'rho' in pnm)][0]
idx = parnames.index(pname)
param = self.params[idx]
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_signal_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
std = ['linear timing model',
'red noise',
'phys_ephem',
'gw',
'cw',
'bwm',
'fdm',
'gp_sw',
'ecorr_sherman-morrison',
'ecorr',
'efac',
'equad',
]
non_std = [nm for nm in self.snames.keys() if nm not in std]
# draw parameter from signal model
signal_name = np.random.choice(non_std)
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
def draw_from_par_prior(self, par_names):
# Preparing and comparing par_names with PTA parameters
par_names = np.atleast_1d(par_names)
par_list = []
name_list = []
for par_name in par_names:
pn_list = [n for n in self.plist if par_name in n]
if pn_list:
par_list.append(pn_list)
name_list.append(par_name)
if not par_list:
raise UserWarning("No parameter prior match found between {} and PTA.object."
.format(par_names))
par_list = np.concatenate(par_list, axis=None)
def draw(x, iter, beta):
"""Prior draw function generator for custom par_names.
par_names: list of strings
The function signature is specific to PTMCMCSampler.
"""
q = x.copy()
lqxy = 0
# randomly choose parameter
idx_name = np.random.choice(par_list)
idx = self.plist.index(idx_name)
# if vector parameter jump in random component
param = self.params[idx]
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
name_string = '_'.join(name_list)
draw.__name__ = 'draw_from_{}_prior'.format(name_string)
return draw
def draw_from_par_log_uniform(self, par_dict):
# Preparing and comparing par_dict.keys() with PTA parameters
par_list = []
name_list = []
for par_name in par_dict.keys():
pn_list = [n for n in self.plist if par_name in n and 'log' in n]
if pn_list:
par_list.append(pn_list)
name_list.append(par_name)
if not par_list:
raise UserWarning("No parameter dictionary match found between {} and PTA.object."
.format(par_dict.keys()))
par_list = np.concatenate(par_list, axis=None)
def draw(x, iter, beta):
"""log uniform prior draw function generator for custom par_names.
par_dict: dictionary with {"par_names":(lower bound,upper bound)}
{ "string":(float,float)}
The function signature is specific to PTMCMCSampler.
"""
q = x.copy()
# draw parameter from signal model
idx_name = np.random.choice(par_list)
idx = self.plist.index(idx_name)
q[idx] = np.random.uniform(par_dict[par_name][0], par_dict[par_name][1])
return q, 0
name_string = '_'.join(name_list)
draw.__name__ = 'draw_from_{}_log_uniform'.format(name_string)
return draw
def draw_from_psr_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
# draw parameter from pulsar names
psr = np.random.choice(self.psrnames)
idxs = [self.pimap[par] for par in self.pnames if psr in par]
for idx in idxs:
q[idx] = self.params[idx].sample()
# forward-backward jump probability
first = np.sum([self.params[idx].get_logpdf(x[idx]) for idx in idxs])
last = np.sum([self.params[idx].get_logpdf(q[idx]) for idx in idxs])
lqxy = first - last
return q, float(lqxy)
def draw_from_signal(self, signal_names):
# Preparing and comparing signal_names with PTA signals
signal_names = np.atleast_1d(signal_names)
signal_list = []
name_list = []
for signal_name in signal_names:
try:
param_list = self.snames[signal_name]
signal_list.append(param_list)
name_list.append(signal_name)
except:
pass
if not signal_list:
raise UserWarning("No signal match found between {} and PTA.object!"
.format(signal_names))
signal_list = np.concatenate(signal_list, axis=None)
def draw(x, iter, beta):
"""Signal draw function generator for custom signal_names.
signal_names: list of strings
The function signature is specific to PTMCMCSampler.
"""
q = x.copy()
lqxy = 0
# draw parameter from signal model
param = np.random.choice(signal_list)
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) -
param.get_logpdf(q[self.pmap[str(param)]]))
return q, float(lqxy)
name_string = '_'.join(name_list)
draw.__name__ = 'draw_from_{}_signal'.format(name_string)
return draw
def fe_jump(self, x, iter, beta):
q = x.copy()
lqxy = 0
fe_limit = np.max(self.fe)
# draw skylocation and frequency from f-stat map
accepted = False
while accepted is False:
log_f_new = self.params[self.pimap['log10_fgw']].sample()
f_idx = (np.abs(np.log10(self.fe_freqs) - log_f_new)).argmin()
gw_theta = np.arccos(self.params[self.pimap['cos_gwtheta']].sample())
gw_phi = self.params[self.pimap['gwphi']].sample()
hp_idx = hp.ang2pix(hp.get_nside(self.fe), gw_theta, gw_phi)
fe_new_point = self.fe[f_idx, hp_idx]
if np.random.uniform()<(fe_new_point/fe_limit):
accepted = True
# draw other parameters from prior
cos_inc = self.params[self.pimap['cos_inc']].sample()
psi = self.params[self.pimap['psi']].sample()
phase0 = self.params[self.pimap['phase0']].sample()
log10_h = self.params[self.pimap['log10_h']].sample()
# put new parameters into q
for param_name, new_param in zip(['log10_fgw', 'gwphi', 'cos_gwtheta', 'cos_inc', 'psi', 'phase0', 'log10_h'],
[log_f_new, gw_phi, np.cos(gw_theta), cos_inc, psi, phase0, log10_h]):
q[self.pimap[param_name]] = new_param
# calculate Hastings ratio
log_f_old = x[self.pimap['log10_fgw']]
f_idx_old = (np.abs(np.log10(self.fe_freqs) - log_f_old)).argmin()
gw_theta_old = np.arccos(x[self.pimap['cos_gwtheta']])
gw_phi_old = x[self.pimap['gwphi']]
hp_idx_old = hp.ang2pix(hp.get_nside(self.fe), gw_theta_old, gw_phi_old)
fe_old_point = self.fe[f_idx_old, hp_idx_old]
if fe_old_point>fe_limit:
fe_old_point = fe_limit
log10_h_old = x[self.pimap['log10_h']]
phase0_old = x[self.pimap['phase0']]
psi_old = x[self.pimap['psi']]
cos_inc_old = x[self.pimap['cos_inc']]
hastings_extra_factor = self.params[self.pimap['log10_h']].get_pdf(log10_h_old)
hastings_extra_factor *= 1/self.params[self.pimap['log10_h']].get_pdf(log10_h)
hastings_extra_factor = self.params[self.pimap['phase0']].get_pdf(phase0_old)
hastings_extra_factor *= 1/self.params[self.pimap['phase0']].get_pdf(phase0)
hastings_extra_factor = self.params[self.pimap['psi']].get_pdf(psi_old)
hastings_extra_factor *= 1/self.params[self.pimap['psi']].get_pdf(psi)
hastings_extra_factor = self.params[self.pimap['cos_inc']].get_pdf(cos_inc_old)
hastings_extra_factor *= 1/self.params[self.pimap['cos_inc']].get_pdf(cos_inc)
lqxy = np.log(fe_old_point/fe_new_point * hastings_extra_factor)
return q, float(lqxy)
def get_global_parameters(pta):
"""Utility function for finding global parameters."""
pars = []
for sc in pta._signalcollections:
pars.extend(sc.param_names)
gpars = list(set(par for par in pars if pars.count(par) > 1))
ipars = [par for par in pars if par not in gpars]
# gpars = np.unique(list(filter(lambda x: pars.count(x)>1, pars)))
# ipars = np.array([p for p in pars if p not in gpars])
return np.array(gpars), np.array(ipars)
def get_parameter_groups(pta):
"""Utility function to get parameter groupings for sampling."""
params = pta.param_names
ndim = len(params)
groups = [list(np.arange(0, ndim))]
# get global and individual parameters
gpars, ipars = get_global_parameters(pta)
if gpars.size:
# add a group of all global parameters
groups.append([params.index(gp) for gp in gpars])
# make a group for each signal, with all non-global parameters
for sc in pta._signalcollections:
for signal in sc._signals:
ind = [params.index(p) for p in signal.param_names if not gpars.size or p not in gpars]
if ind:
groups.append(ind)
return groups
def get_psr_groups(pta):
groups = []
for psr in pta.pulsars:
grp = [pta.param_names.index(par)
for par in pta.param_names if psr in par]
groups.append(grp)
return groups
def get_cw_groups(pta):
"""Utility function to get parameter groups for CW sampling.
These groups should be appended to the usual get_parameter_groups()
output.
"""
ang_pars = ['costheta', 'phi', 'cosinc', 'phase0', 'psi']
mfdh_pars = ['log10_Mc', 'log10_fgw', 'log10_dL', 'log10_h']
freq_pars = ['log10_Mc', 'log10_fgw', 'pdist', 'pphase']
groups = []
for pars in [ang_pars, mfdh_pars, freq_pars]:
groups.append(group_from_params(pta, pars))
return groups
def group_from_params(pta, params):
gr = []
for p in params:
for q in pta.param_names:
if p in q:
gr.append(pta.param_names.index(q))
return gr
def save_runtime_info(pta, outdir='chains', human=None):
"""save system info, enterprise PTA.summary, and other metadata to file
"""
# save system info and enterprise PTA.summary to single file
sysinfo = {}
if human is not None:
sysinfo.update({"human": human})
sysinfo.update(platform.uname()._asdict())
with open(os.path.join(outdir, "runtime_info.txt"), "w") as fout:
for field, data in sysinfo.items():
fout.write(field + " : " + data + "\n")
fout.write("\n")
fout.write("enterprise_extensions v" + __version__ +"\n")
fout.write("PTMCMCSampler v" + __vPTMCMC__ +"\n")
fout.write(pta.summary())
# save paramter list
with open(os.path.join(outdir, "pars.txt"), "w") as fout:
for pname in pta.param_names:
fout.write(pname + "\n")
# save list of priors
with open(os.path.join(outdir, "priors.txt"), "w") as fout:
for pp in pta.params:
fout.write(pp.__repr__() + "\n")
def setup_sampler(pta, outdir='chains', resume=False,
empirical_distr=None, groups=None, human=None,
save_ext_dists=False, loglkwargs={}, logpkwargs={}):
"""
Sets up an instance of PTMCMC sampler.
We initialize the sampler the likelihood and prior function
from the PTA object. We set up an initial jump covariance matrix
with fairly small jumps as this will be adapted as the MCMC runs.
We will setup an output directory in `outdir` that will contain
the chain (first n columns are the samples for the n parameters
and last 4 are log-posterior, log-likelihood, acceptance rate, and
an indicator variable for parallel tempering but it doesn't matter
because we aren't using parallel tempering).
We then add several custom jump proposals to the mix based on
whether or not certain parameters are in the model. These are
all either draws from the prior distribution of parameters or
draws from uniform distributions.
save_ext_dists: saves distributions that have been extended to
cover priors as a pickle to the outdir folder. These can then
be loaded later as distributions to save a minute at the start
of the run.
"""
# dimension of parameter space
params = pta.param_names
ndim = len(params)
# initial jump covariance matrix
if os.path.exists(outdir+'/cov.npy') and resume:
cov = np.load(outdir+'/cov.npy')
# check that the one we load is the same shape as our data
cov_new = np.diag(np.ones(ndim) * 0.1**2)
if cov.shape != cov_new.shape:
msg = 'The covariance matrix (cov.npy) in the output folder is '
msg += 'the wrong shape for the parameters given. '
msg += 'Start with a different output directory or '
msg += 'change resume to False to overwrite the run that exists.'
raise ValueError(msg)
else:
cov = np.diag(np.ones(ndim) * 0.1**2)
# parameter groupings
if groups is None:
groups = get_parameter_groups(pta)
sampler = ptmcmc(ndim, pta.get_lnlikelihood, pta.get_lnprior, cov, groups=groups,
outDir=outdir, resume=resume, loglkwargs=loglkwargs,
logpkwargs=logpkwargs)
save_runtime_info(pta, sampler.outDir, human)
# additional jump proposals
jp = JumpProposal(pta, empirical_distr=empirical_distr, save_ext_dists=save_ext_dists, outdir=outdir)
sampler.jp = jp
# always add draw from prior
sampler.addProposalToCycle(jp.draw_from_prior, 5)
# try adding empirical proposals
if empirical_distr is not None:
print('Attempting to add empirical proposals...\n')
sampler.addProposalToCycle(jp.draw_from_empirical_distr, 10)
# Red noise prior draw
if 'red noise' in jp.snames:
print('Adding red noise prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_red_prior, 10)
# DM GP noise prior draw
if 'dm_gp' in jp.snames:
print('Adding DM GP noise prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dm_gp_prior, 10)
# DM annual prior draw
if 'dm_s1yr' in jp.snames:
print('Adding DM annual prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dm1yr_prior, 10)
# DM dip prior draw
if 'dmexp' in jp.snames:
print('Adding DM exponential dip prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dmexpdip_prior, 10)
# DM cusp prior draw
if 'dm_cusp' in jp.snames:
print('Adding DM exponential cusp prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dmexpcusp_prior, 10)
# DMX prior draw
if 'dmx_signal' in jp.snames:
print('Adding DMX prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_dmx_prior, 10)
# Ephemeris prior draw
if 'd_jupiter_mass' in pta.param_names:
print('Adding ephemeris model prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_ephem_prior, 10)
# GWB uniform distribution draw
if np.any([('gw' in par and 'log10_A' in par) for par in pta.param_names]):
print('Adding GWB uniform distribution draws...\n')
sampler.addProposalToCycle(jp.draw_from_gwb_log_uniform_distribution, 10)
# Dipole uniform distribution draw
if 'dipole_log10_A' in pta.param_names:
print('Adding dipole uniform distribution draws...\n')
sampler.addProposalToCycle(jp.draw_from_dipole_log_uniform_distribution, 10)
# Monopole uniform distribution draw
if 'monopole_log10_A' in pta.param_names:
print('Adding monopole uniform distribution draws...\n')
sampler.addProposalToCycle(jp.draw_from_monopole_log_uniform_distribution, 10)
# Altpol uniform distribution draw
if 'log10Apol_tt' in pta.param_names:
print('Adding alternative GW-polarization uniform distribution draws...\n')
sampler.addProposalToCycle(jp.draw_from_altpol_log_uniform_distribution, 10)
# BWM prior draw
if 'bwm_log10_A' in pta.param_names:
print('Adding BWM prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_bwm_prior, 10)
# FDM prior draw
if 'fdm_log10_A' in pta.param_names:
print('Adding FDM prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_fdm_prior, 10)
# CW prior draw
if 'cw_log10_h' in pta.param_names:
print('Adding CW strain prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_cw_log_uniform_distribution, 10)
if 'cw_log10_Mc' in pta.param_names:
print('Adding CW prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_cw_distribution, 10)
# free spectrum prior draw
if np.any(['log10_rho' in par for par in pta.param_names]):
print('Adding free spectrum prior draws...\n')
sampler.addProposalToCycle(jp.draw_from_gw_rho_prior, 25)
return sampler
| 47,414 | 37.330639 | 165 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/model_utils.py | # -*- coding: utf-8 -*-
import time
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as scistats
try:
import acor
except ImportError:
from emcee.autocorr import integrated_time as acor
from enterprise_extensions import models
# Log-spaced frequncies
def linBinning(T, logmode, f_min, nlin, nlog):
"""
Get the frequency binning for the low-rank approximations, including
log-spaced low-frequency coverage.
Credit: van Haasteren & Vallisneri, MNRAS, Vol. 446, Iss. 2 (2015)
:param T: Duration experiment
:param logmode: From which linear mode to switch to log
:param f_min: Down to which frequency we'll sample
:param nlin: How many linear frequencies we'll use
:param nlog: How many log frequencies we'll use
"""
if logmode < 0:
raise ValueError("Cannot do log-spacing when all frequencies are"
"linearly sampled")
# First the linear spacing and weights
df_lin = 1.0 / T
f_min_lin = (1.0 + logmode) / T
f_lin = np.linspace(f_min_lin, f_min_lin + (nlin-1)*df_lin, nlin)
w_lin = np.sqrt(df_lin * np.ones(nlin))
if nlog > 0:
# Now the log-spacing, and weights
f_min_log = np.log(f_min)
f_max_log = np.log((logmode+0.5)/T)
df_log = (f_max_log - f_min_log) / (nlog)
f_log = np.exp(np.linspace(f_min_log+0.5*df_log,
f_max_log-0.5*df_log, nlog))
w_log = np.sqrt(df_log * f_log)
return np.append(f_log, f_lin), np.append(w_log, w_lin)
else:
return f_lin, w_lin
# New filter for different cadences
def cadence_filter(psr, start_time=None, end_time=None, cadence=None):
""" Filter data for coarser cadences. """
if start_time is None and end_time is None and cadence is None:
mask = np.ones(psr._toas.shape, dtype=bool)
else:
# find start and end indices of cadence filtering
start_idx = (np.abs((psr._toas / 86400) - start_time)).argmin()
end_idx = (np.abs((psr._toas / 86400) - end_time)).argmin()
# make a safe copy of sliced toas
tmp_toas = psr._toas[start_idx:end_idx+1].copy()
# cumulative sum of time differences
cumsum = np.cumsum(np.diff(tmp_toas / 86400))
tspan = (tmp_toas.max() - tmp_toas.min()) / 86400
# find closest indices of sliced toas to desired cadence
mask = []
for ii in np.arange(1.0, tspan, cadence):
idx = (np.abs(cumsum - ii)).argmin()
mask.append(idx)
# append start and end segements with cadence-sliced toas
mask = np.append(np.arange(start_idx),
np.array(mask) + start_idx)
mask = np.append(mask, np.arange(end_idx, len(psr._toas)))
psr._toas = psr._toas[mask]
psr._toaerrs = psr._toaerrs[mask]
psr._residuals = psr._residuals[mask]
psr._ssbfreqs = psr._ssbfreqs[mask]
psr._designmatrix = psr._designmatrix[mask, :]
dmx_mask = np.sum(psr._designmatrix, axis=0) != 0.0
psr._designmatrix = psr._designmatrix[:, dmx_mask]
for key in psr._flags:
psr._flags[key] = psr._flags[key][mask]
if psr._planetssb is not None:
psr._planetssb = psr.planetssb[mask, :, :]
psr.sort_data()
def get_tspan(psrs):
""" Returns maximum time span for all pulsars.
:param psrs: List of pulsar objects
"""
tmin = np.min([p.toas.min() for p in psrs])
tmax = np.max([p.toas.max() for p in psrs])
return tmax - tmin
class PostProcessing(object):
def __init__(self, chain, pars, burn_percentage=0.25):
burn = int(burn_percentage*chain.shape[0])
self.chain = chain[burn:]
self.pars = pars
def plot_trace(self, plot_kwargs={}):
ndim = len(self.pars)
if ndim > 1:
ncols = 4
nrows = int(np.ceil(ndim/ncols))
else:
ncols, nrows = 1, 1
plt.figure(figsize=(15, 2*nrows))
for ii in range(ndim):
plt.subplot(nrows, ncols, ii+1)
plt.plot(self.chain[:, ii], **plot_kwargs)
plt.title(self.pars[ii], fontsize=8)
plt.tight_layout()
def plot_hist(self, hist_kwargs={'bins': 50, 'normed': True}):
ndim = len(self.pars)
if ndim > 1:
ncols = 4
nrows = int(np.ceil(ndim/ncols))
else:
ncols, nrows = 1, 1
plt.figure(figsize=(15, 2*nrows))
for ii in range(ndim):
plt.subplot(nrows, ncols, ii+1)
plt.hist(self.chain[:, ii], **hist_kwargs)
plt.title(self.pars[ii], fontsize=8)
plt.tight_layout()
def ul(chain, q=95.0):
"""
Computes upper limit and associated uncertainty.
:param chain: MCMC samples of GWB (or common red noise) amplitude
:param q: desired percentile of upper-limit value [out of 100, default=95]
:returns: (upper limit, uncertainty on upper limit)
"""
hist = np.histogram(10.0**chain, bins=100)
hist_dist = scistats.rv_histogram(hist)
A_ul = 10**np.percentile(chain, q=q)
p_ul = hist_dist.pdf(A_ul)
Aul_error = np.sqrt((q/100.) * (1.0 - (q/100.0)) /
(chain.shape[0]/acor.acor(chain)[0])) / p_ul
return A_ul, Aul_error
def bayes_fac(samples, ntol=200, logAmin=-18, logAmax=-14):
"""
Computes the Savage Dickey Bayes Factor and uncertainty.
:param samples: MCMCF samples of GWB (or common red noise) amplitude
:param ntol: Tolerance on number of samples in bin
:returns: (bayes factor, 1-sigma bayes factor uncertainty)
"""
prior = 1 / (logAmax - logAmin)
dA = np.linspace(0.01, 0.1, 100)
bf = []
bf_err = []
mask = [] # selecting bins with more than 200 samples
for ii, delta in enumerate(dA):
n = np.sum(samples <= (logAmin + delta))
N = len(samples)
post = n / N / delta
bf.append(prior/post)
bf_err.append(bf[ii]/np.sqrt(n))
if n > ntol:
mask.append(ii)
return np.mean(np.array(bf)[mask]), np.std(np.array(bf)[mask])
def odds_ratio(chain, models=[0, 1], uncertainty=True, thin=False):
if thin:
indep_samples = np.rint(chain.shape[0] / acor.acor(chain)[0])
samples = np.random.choice(chain.copy(), int(indep_samples))
else:
samples = chain.copy()
mask_top = np.rint(samples) == max(models)
mask_bot = np.rint(samples) == min(models)
top = float(np.sum(mask_top))
bot = float(np.sum(mask_bot))
if top == 0.0 and bot != 0.0:
bf = 1.0 / bot
elif bot == 0.0 and top != 0.0:
bf = top
else:
bf = top / bot
if uncertainty:
if bot == 0. or top == 0.:
sigma = 0.0
else:
# Counting transitions from model 1 model 2
ct_tb = 0
for ii in range(len(mask_top)-1):
if mask_top[ii]:
if not mask_top[ii+1]:
ct_tb += 1
# Counting transitions from model 2 to model 1
ct_bt = 0
for ii in range(len(mask_bot)-1):
if mask_bot[ii]:
if not mask_bot[ii+1]:
ct_bt += 1
try:
sigma = bf * np.sqrt((float(top) - float(ct_tb))/(float(top)*float(ct_tb)) +
(float(bot) - float(ct_bt))/(float(bot)*float(ct_bt)))
except ZeroDivisionError:
sigma = 0.0
return bf, sigma
elif not uncertainty:
return bf
def bic(chain, nobs, log_evidence=False):
"""
Computes the Bayesian Information Criterion.
:param chain: MCMC samples of all parameters, plus meta-data
:param nobs: Number of observations in data
:param evidence: return evidence estimate too?
:returns: (bic, evidence)
"""
nparams = chain.shape[1] - 4 # removing 4 aux columns
maxlnlike = chain[:, -4].max()
bic = np.log(nobs)*nparams - 2.0*maxlnlike
if log_evidence:
return (bic, -0.5*bic)
else:
return bic
def mask_filter(psr, mask):
"""filter given pulsar data by user defined mask"""
psr._toas = psr._toas[mask]
psr._toaerrs = psr._toaerrs[mask]
psr._residuals = psr._residuals[mask]
psr._ssbfreqs = psr._ssbfreqs[mask]
psr._designmatrix = psr._designmatrix[mask, :]
dmx_mask = np.sum(psr._designmatrix, axis=0) != 0.0
psr._designmatrix = psr._designmatrix[:, dmx_mask]
for key in psr._flags:
psr._flags[key] = psr._flags[key][mask]
if psr._planetssb is not None:
psr._planetssb = psr.planetssb[mask, :, :]
psr.sort_data()
class CompareTimingModels():
"""
Compare difference between the usual and marginalized timing models.
After instantiating, the __call__() method can be used for sampling for any number of points.
To see the results, use the results() method.
:param psrs: Pulsar object containing pulsars from model
:param model_name: String name of model to test. Model must be defined in enterprise_extensions.models.
:param abs_tol: absolute tolerance for error between timing models (default 1e-3), set to None to bypass errors
:param rel_tol: relative tolerance for error between timing models (default 1e-6), set to None to bypass errors
:param dense: use the dense cholesky algorithm over sparse
"""
def __init__(self, psrs, model_name='model_1', abs_tol=1e-3, rel_tol=1e-6, dense=True, **kwargs):
model = getattr(models, model_name)
self.abs_tol = abs_tol
self.rel_tol = rel_tol
if dense:
self.pta_marg = model(psrs, tm_marg=True, dense_like=True, **kwargs) # marginalized model
else:
self.pta_marg = model(psrs, tm_marg=True, **kwargs) # marginalized model
self.pta_norm = model(psrs, **kwargs) # normal model
self.tm_correction = 0
for psr in psrs:
self.tm_correction -= 0.5 * np.log(1e40) * psr.Mmat.shape[1]
self.abs_err = []
self.rel_err = []
self.count = 0
def check_timing(self, number=10_000):
print('Timing sample creation...')
start = time.time()
for __ in range(number):
x0 = np.hstack([p.sample() for p in self.pta_marg.params])
end = time.time()
sample_time = end - start
print('Sampling {0} points took {1} seconds.'.format(number, sample_time))
print('Timing MarginalizedTimingModel...')
start = time.time()
for __ in range(number):
x0 = np.hstack([p.sample() for p in self.pta_marg.params])
self.pta_marg.get_lnlikelihood(x0)
end = time.time()
time_marg = end - start - sample_time # remove sampling time from total time taken
print('Sampling {0} points took {1} seconds.'.format(number, time_marg))
print('Timing TimingModel...')
start = time.time()
for __ in range(number):
x0 = np.hstack([p.sample() for p in self.pta_marg.params])
self.pta_norm.get_lnlikelihood(x0)
end = time.time()
time_norm = end - start - sample_time # remove sampling time from total time taken
print('Sampling {0} points took {1} seconds.'.format(number, time_norm))
res = time_norm / time_marg
print('MarginalizedTimingModel is {0} times faster than TimingModel after {1} points.'.format(res, number))
return res
def get_sample_point(self):
x0 = np.hstack([p.sample() for p in self.pta_marg.params])
return x0
def __call__(self, x0):
res_norm = self.pta_norm.get_lnlikelihood(x0)
res_marg = self.pta_marg.get_lnlikelihood(x0)
abs_err = np.abs(res_marg - res_norm)
rel_err = abs_err / res_norm
self.abs_err.append(abs_err)
self.rel_err.append(rel_err)
self.count += 1
if self.abs_tol is not None and abs_err > self.abs_tol:
abs_raise = 'Absolute error is {0} at {1} which is larger than abs_tol of {2}.'.format(
abs_err, x0, self.abs_tol)
raise ValueError(abs_raise)
elif self.rel_tol is not None and rel_err > self.rel_tol:
rel_raise = 'Relative error is {0} at {1} which is larger than rel_tol of {2}.'.format(
rel_err, x0, self.rel_tol)
raise ValueError(rel_raise)
return res_norm
def results(self):
print('Number of points evaluated:', self.count)
print('Maximum absolute error:', np.max(self.abs_err))
print('Maximum relative error:', np.max(self.rel_err))
return self.abs_err, self.rel_err
| 12,737 | 31.914729 | 115 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/empirical_distr.py | # -*- coding: utf-8 -*-
import logging
import pickle
import numpy as np
try:
from sklearn.neighbors import KernelDensity
sklearn_available=True
except ModuleNotFoundError:
sklearn_available=False
from scipy.interpolate import interp1d, interp2d
logger = logging.getLogger(__name__)
class EmpiricalDistribution1D(object):
"""
Class used to define a 1D empirical distribution
based on posterior from another MCMC.
:param samples: samples for hist
:param bins: edges to use for hist (left and right) make sure bins
cover whole prior!
"""
def __init__(self, param_name, samples, bins):
self.ndim = 1
self.param_name = param_name
self._Nbins = len(bins)-1
hist, x_bins = np.histogram(samples, bins=bins)
self._edges = x_bins
self._wids = np.diff(x_bins)
hist += 1 # add a sample to every bin
counts = np.sum(hist)
self._pdf = hist / float(counts) / self._wids
self._cdf = np.cumsum((self._pdf*self._wids).ravel())
self._logpdf = np.log(self._pdf)
def draw(self):
draw = np.random.rand()
draw_bin = np.searchsorted(self._cdf, draw, side='right')
idx = np.unravel_index(draw_bin, self._Nbins)[0]
samp = self._edges[idx] + self._wids[idx]*np.random.rand()
return np.array(samp)
def prob(self, params):
ix = np.searchsorted(self._edges, params) - 1
return self._pdf[ix]
def logprob(self, params):
ix = np.searchsorted(self._edges, params) - 1
return self._logpdf[ix]
class EmpiricalDistribution1DKDE(object):
def __init__(self, param_name, samples, minval=None, maxval=None, bandwidth=0.1, nbins=40):
"""
Minvals and maxvals should specify priors for these. Should make these required.
"""
self.ndim = 1
self.param_name = param_name
self.bandwidth = bandwidth
# code below relies on samples axes being swapped. but we
# want to keep inputs the same
# create a 2D KDE from which to evaluate
self.kde = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(samples.reshape((samples.size, 1)))
if minval is None:
# msg = "minvals for KDE empirical distribution were not supplied. Resulting distribution may not have support over full prior"
# logger.warning(msg)
# widen these to add support
minval = min(samples)
maxval = max(samples)
# significantly faster probability estimation using interpolation
# instead of evaluating KDE every time
self.minval = minval
self.maxval = maxval
xvals = np.linspace(minval, maxval, num=nbins)
self._Nbins = nbins
scores = np.array([self.kde.score(np.atleast_2d(xval)) for xval in xvals])
# interpolate within prior
self._logpdf = interp1d(xvals, scores, kind='linear', fill_value=-1000)
def draw(self):
params = self.kde.sample(1).T
return params.squeeze()
# class used to define a 2D empirical distribution
# based on posteriors from another MCMC
class EmpiricalDistribution2D(object):
"""
Class used to define a 1D empirical distribution
based on posterior from another MCMC.
:param samples: samples for hist
:param bins: edges to use for hist (left and right)
make sure bins cover whole prior!
"""
def __init__(self, param_names, samples, bins):
self.ndim = 2
self.param_names = param_names
self._Nbins = [len(b)-1 for b in bins]
hist, x_bins, y_bins = np.histogram2d(*samples, bins=bins)
self._edges = np.array([x_bins, y_bins])
self._wids = np.diff([x_bins, y_bins])
area = np.outer(*self._wids)
hist += 1 # add a sample to every bin
counts = np.sum(hist)
self._pdf = hist / counts / area
self._cdf = np.cumsum((self._pdf*area).ravel())
self._logpdf = np.log(self._pdf)
def draw(self):
draw = np.random.rand()
draw_bin = np.searchsorted(self._cdf, draw)
idx = np.unravel_index(draw_bin, self._Nbins)
samp = [self._edges[ii, idx[ii]] + self._wids[ii, idx[ii]]*np.random.rand()
for ii in range(2)]
return np.array(samp)
def prob(self, params):
ix, iy = [np.searchsorted(self._edges[ii], params[ii]) - 1 for ii in range(2)]
return self._pdf[ix, iy]
def logprob(self, params):
ix, iy = [np.searchsorted(self._edges[ii], params[ii]) - 1 for ii in range(2)]
return self._logpdf[ix, iy]
class EmpiricalDistribution2DKDE(object):
def __init__(self, param_names, samples, minvals=None, maxvals=None, bandwidth=0.1, nbins=40):
"""
Minvals and maxvals should specify priors for these. Should make these required.
:param param_names: 2-element list of parameter names
:param samples: samples, with dimension (2 x Nsamples)
:return distr: list of empirical distributions
"""
self.ndim = 2
self.param_names = param_names
self.bandwidth = bandwidth
# code below relies on samples axes being swapped. but we
# want to keep inputs the same
# create a 2D KDE from which to evaluate
self.kde = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(samples.T)
if minvals is None:
msg = "minvals for KDE empirical distribution were not supplied. Resulting distribution may not have support over full prior"
logger.warning(msg)
# widen these to add support
minvals = (min(samples[0, :]), min(samples[1, :]))
maxvals = (max(samples[0, :]), max(samples[1, :]))
# significantly faster probability estimation using interpolation
# instead of evaluating KDE every time
self.minvals = minvals
self.maxvals = maxvals
xvals = np.linspace(minvals[0], maxvals[0], num=nbins)
yvals = np.linspace(minvals[1], maxvals[1], num=nbins)
self._Nbins = [yvals.size for ii in range(xvals.size)]
scores = np.array([self.kde.score(np.array([xvals[ii], yvals[jj]]).reshape((1, 2))) for ii in range(xvals.size) for jj in range(yvals.size)])
# interpolate within prior
self._logpdf = interp2d(xvals, yvals, scores, kind='linear', fill_value=-1000)
def draw(self):
params = self.kde.sample(1).T
return params.squeeze()
def prob(self, params):
# just in case...make sure to make this zero outside of our prior ranges
param1_out = params[0] < self.minvals[0] or params[0] > self.maxvals[0]
param2_out = params[1] < self.minvals[1] or params[1] > self.maxvals[1]
if param1_out or param2_out:
# essentially zero
return -1000
else:
return np.exp(self._logpdf(*params))[0]
def logprob(self, params):
return self._logpdf(*params)[0]
def make_empirical_distributions(pta, paramlist, params, chain,
burn=0, nbins=81, filename='distr.pkl',
return_distribution=True,
save_dists=True):
"""
Utility function to construct empirical distributions.
:param pta: the pta object used to generate the posteriors
:param paramlist: a list of parameter names,
either single parameters or pairs of parameters
:param chain: MCMC chain from a previous run
:param burn: desired number of initial samples to discard
:param nbins: number of bins to use for the empirical distributions
:return distr: list of empirical distributions
"""
distr = []
if not save_dists and not return_distribution:
msg = "no distribution returned or saved, are you sure??"
logger.info(msg)
for pl in paramlist:
if type(pl) is not list:
pl = [pl]
if len(pl) == 1:
idx = pta.param_names.index(pl[0])
prior_min = pta.params[idx].prior._defaults['pmin']
prior_max = pta.params[idx].prior._defaults['pmax']
# get the bins for the histogram
bins = np.linspace(prior_min, prior_max, nbins)
new_distr = EmpiricalDistribution1D(pl[0], chain[burn:, idx], bins)
distr.append(new_distr)
elif len(pl) == 2:
# get the parameter indices
idx = [pta.param_names.index(pl1) for pl1 in pl]
# get the bins for the histogram
bins = [np.linspace(pta.params[i].prior._defaults['pmin'],
pta.params[i].prior._defaults['pmax'], nbins) for i in idx]
new_distr = EmpiricalDistribution2D(pl, chain[burn:, idx].T, bins)
distr.append(new_distr)
else:
msg = 'WARNING: only 1D and 2D empirical distributions are currently allowed.'
logger.warning(msg)
# save the list of empirical distributions as a pickle file
if save_dists:
if len(distr) > 0:
with open(filename, 'wb') as f:
pickle.dump(distr, f)
msg = 'The empirical distributions have been pickled to {0}.'.format(filename)
logger.info(msg)
else:
msg = 'WARNING: No empirical distributions were made!'
logger.warning(msg)
if return_distribution:
return distr
def make_empirical_distributions_KDE(pta, paramlist, params, chain,
burn=0, nbins=41, filename='distr.pkl',
bandwidth=0.1,
return_distribution=True,
save_dists=True):
"""
Utility function to construct empirical distributions.
:param paramlist: a list of parameter names,
either single parameters or pairs of parameters
:param params: list of all parameter names for the MCMC chain
:param chain: MCMC chain from a previous run, has dimensions Nsamples x Nparams
:param burn: desired number of initial samples to discard
:param nbins: number of bins to use for the empirical distributions
:return distr: list of empirical distributions
"""
distr = []
if not save_dists and not return_distribution:
msg = "no distribution returned or saved, are you sure??"
logger.info(msg)
for pl in paramlist:
if type(pl) is not list:
pl = [pl]
if len(pl) == 1:
# get the parameter index
idx = pta.param_names.index(pl[0])
prior_min = pta.params[idx].prior._defaults['pmin']
prior_max = pta.params[idx].prior._defaults['pmax']
# get the bins for the histogram
new_distr = EmpiricalDistribution1DKDE(pl[0], chain[burn:, idx], bandwidth=bandwidth, minval=prior_min, maxval=prior_max)
distr.append(new_distr)
elif len(pl) == 2:
# get the parameter indices
idx = [pta.param_names.index(pl1) for pl1 in pl]
# get the bins for the histogram
bins = [np.linspace(pta.params[i].prior._defaults['pmin'],
pta.params[i].prior._defaults['pmax'], nbins) for i in idx]
minvals = [pta.params[0].prior._defaults['pmin'], pta.params[1].prior._defaults['pmin']]
maxvals = [pta.params[0].prior._defaults['pmax'], pta.params[1].prior._defaults['pmax']]
# get the bins for the histogram
if sklearn_available:
new_distr = EmpiricalDistribution2DKDE(pl, chain[burn:, idx].T, bandwidth=bandwidth, minvals=minvals, maxvals=maxvals)
else:
logger.warn('`sklearn` package not available. Fall back to using histgrams for empirical distribution')
new_distr = EmpiricalDistribution2D(pl, chain[burn:, idx].T, bins)
distr.append(new_distr)
else:
msg = 'WARNING: only 1D and 2D empirical distributions are currently allowed.'
logger.warning(msg)
# save the list of empirical distributions as a pickle file
if save_dists:
if len(distr) > 0:
with open(filename, 'wb') as f:
pickle.dump(distr, f)
msg = 'The empirical distributions have been pickled to {0}.'.format(filename)
logger.info(msg)
else:
msg = 'WARNING: No empirical distributions were made!'
logger.warning(msg)
if return_distribution:
return distr
| 12,755 | 35.033898 | 149 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/sky_scrambles.py | # -*- coding: utf-8 -*-
import pickle
import sys
import time
import numpy as np
from enterprise.signals import utils
def compute_match(orf1, orf1_mag, orf2, orf2_mag):
"""Computes the match between two different ORFs."""
match = np.abs(np.dot(orf1, orf2))/(orf1_mag*orf2_mag)
return match
def make_true_orf(psrs):
"""Computes the ORF by looping over pulsar pairs"""
npsr = len(psrs)
orf = np.zeros(int(npsr*(npsr-1)/2))
idx = 0
for i in range(npsr):
for j in range(i+1, npsr):
orf[idx] = utils.hd_orf(psrs[i].pos, psrs[j].pos)
idx += 1
return orf
def compute_orf(ptheta, pphi):
"""
Computes the ORF coefficient. Takes different input than utils.hd_orf().
:param ptheta: Array of values of pulsar positions theta
:param pphi: Array of values of pulsar positions phi
:returns:
orf: ORF for the given positions
orf_mag: Magnitude of the ORF
"""
npsr = len(ptheta)
pos = [np.array([np.cos(phi)*np.sin(theta),
np.sin(phi)*np.sin(theta),
np.cos(theta)]) for phi, theta in zip(pphi, ptheta)]
x = []
for i in range(npsr):
for j in range(i+1, npsr):
x.append(np.dot(pos[i], pos[j]))
x = np.array(x)
orf = 1.5*(1./3. + (1.-x)/2.*(np.log((1.-x)/2.)-1./6.))
return orf, np.sqrt(np.dot(orf, orf))
def get_scrambles(psrs, N=500, Nmax=10000, thresh=0.1,
filename='sky_scrambles.npz', resume=False):
"""
Get sky scramble ORFs and matches.
:param psrs: List of pulsar objects
:param N: Number of desired sky scrambles
:param Nmax: Maximum number of tries to get independent scrambles
:param thresh: Threshold value for match statistic.
:param filename: Name of the file where the sky scrambles should be saved.
Sky scrambles should be saved in `npz` file.
:param resume: Whether to resume from an earlier run.
"""
# compute the unscrambled ORF
orf_true = make_true_orf(psrs)
orf_true_mag = np.sqrt(np.dot(orf_true, orf_true))
npsr = len(psrs)
print('Generating {0} sky scrambles from {1} attempts with threshold {2}...'.format(N, Nmax, thresh))
orf_mags = []
if resume:
print('Resuming from earlier run... loading sky scrambles from file {0}'.format(filename))
npzfile = np.load(filename)
matches, orfs = npzfile['matches'], npzfile['orfs']
thetas, phis = npzfile['thetas'], npzfile['phis']
print('{0} sky scrambles have already been generated.'.format(len(matches)))
for o in orfs:
orf_mags.append(np.sqrt(np.dot(o, o)))
else:
matches, orfs, thetas, phis = [], [], [], []
ct = 0
tstart = time.time()
while ct <= Nmax and len(matches) <= N:
ptheta = np.arccos(np.random.uniform(-1, 1, npsr))
pphi = np.random.uniform(0, 2*np.pi, npsr)
orf_s, orf_s_mag = compute_orf(ptheta, pphi)
match = compute_match(orf_true, orf_true_mag, orf_s, orf_s_mag)
if thresh == 1.0:
if ct == 0:
print('There is no threshold! Keep all the sky scrambles')
if len(orfs) == 0:
orfs.append(orf_s)
matches.append(match)
orfs = np.array(orfs)
matches = np.array(matches)
thetas = ptheta[np.newaxis, ...]
phis = pphi[np.newaxis, ...]
orf_mags.append(np.sqrt(np.dot(orf_s, orf_s)))
else:
matches = np.append(matches, match)
orf_reshape = np.vstack(orf_s).T
orfs = np.append(orfs, orf_reshape, axis=0)
orf_mags.append(orf_s_mag)
thetas = np.concatenate((thetas, [ptheta]))
phis = np.concatenate((phis, [pphi]))
elif thresh < 1.0 and match <= thresh:
if len(orfs) == 0:
orfs.append(orf_s)
matches.append(match)
orfs = np.array(orfs)
matches = np.array(matches)
thetas = ptheta[np.newaxis, ...]
phis = pphi[np.newaxis, ...]
orf_mags.append(np.sqrt(np.dot(orf_s, orf_s)))
else:
check = np.all(map(lambda x, y: compute_match(orf_s, orf_s_mag, x, y)<=thresh, orfs, orf_mags))
if check:
matches = np.append(matches, match)
orf_reshape = np.vstack(orf_s).T
orfs = np.append(orfs, orf_reshape, axis=0)
orf_mags.append(orf_s_mag)
thetas = np.concatenate((thetas, [ptheta]))
phis = np.concatenate((phis, [pphi]))
ct += 1
if ct % 1000 == 0:
sys.stdout.write('\r')
sys.stdout.write('Finished %2.1f percent in %f min'
% (float(ct)/N*100, (time.time() - tstart)/60.))
sys.stdout.flush()
if len(matches) < N:
print('\nGenerated {0} matches rather than the desired {1} matches'.format(len(matches), N))
else:
print('\nGenerated the desired {0} matches in {1} attempts'.format(len(matches), ct))
print('Total runtime: {0:.1f} min'.format((time.time()-tstart)/60.))
np.savez(filename, matches=matches, orfs=orfs, thetas=thetas, phis=phis)
return (matches, orfs, thetas, phis)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--picklefile',
help='pickle file for the pulsars')
parser.add_argument('--threshold', default=0.1,
help='threshold for sky scrambles (DEFAULT 0.1)')
parser.add_argument('--nscrambles', default=1000,
help='number of sky scrambles to generate (DEFAULT 1000)')
parser.add_argument('--nmax', default=1000,
help='maximum number of attempts (DEFAULT 1000)')
parser.add_argument('--savefile', default='../data/scrambles_nano.npz',
help='outputfile name')
parser.add_argument('--resume', action='store_true',
help='resume from existing savefile?')
args = parser.parse_args()
with open(args.picklefile, 'rb') as f:
psrs = pickle.load(f)
get_scrambles(psrs, N=int(args.nscrambles), Nmax=int(args.nmax), thresh=float(args.threshold),
filename=args.savefile, resume=args.resume)
| 6,532 | 33.75 | 111 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/timing.py | # -*- coding: utf-8 -*-
from collections import OrderedDict
import numpy as np
from enterprise.signals import deterministic_signals, parameter, signal_base
# timing model delay
@signal_base.function
def tm_delay(residuals, t2pulsar, tmparams_orig, tmparams, which='all'):
"""
Compute difference in residuals due to perturbed timing model.
:param residuals: original pulsar residuals from Pulsar object
:param t2pulsar: libstempo pulsar object
:param tmparams_orig: dictionary of TM parameter tuples, (val, err)
:param tmparams: new timing model parameters, rescaled to be in sigmas
:param which: option to have all or only named TM parameters varied
:return: difference between new and old residuals in seconds
"""
if which == 'all':
keys = tmparams_orig.keys()
else:
keys = which
# grab original timing model parameters and errors in dictionary
orig_params = np.array([tmparams_orig[key] for key in keys])
# put varying parameters into dictionary
tmparams_rescaled = np.atleast_1d(np.double(orig_params[:, 0] +
tmparams * orig_params[:, 1]))
tmparams_vary = OrderedDict(zip(keys, tmparams_rescaled))
# set to new values
t2pulsar.vals(tmparams_vary)
new_res = np.double(t2pulsar.residuals().copy())
# remember to set values back to originals
t2pulsar.vals(OrderedDict(zip(keys,
np.atleast_1d(np.double(orig_params[:, 0])))))
# Sort the residuals
isort = np.argsort(t2pulsar.toas(), kind='mergesort')
return residuals[isort] - new_res[isort]
# Model component building blocks #
def timing_block(tmparam_list=['RAJ', 'DECJ', 'F0', 'F1',
'PMRA', 'PMDEC', 'PX']):
"""
Returns the timing model block of the model
:param tmparam_list: a list of parameters to vary in the model
"""
# default 5-sigma prior above and below the parfile mean
tm_params = parameter.Uniform(-5.0, 5.0, size=len(tmparam_list))
# timing model
tm_func = tm_delay(tmparams=tm_params, which=tmparam_list)
tm = deterministic_signals.Deterministic(tm_func, name='timing model')
return tm
| 2,236 | 31.897059 | 80 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/dropout.py | # -*- coding: utf-8 -*-
import enterprise
import numpy as np
from enterprise import constants as const
from enterprise.signals import (deterministic_signals,
parameter,
signal_base,
utils)
@signal_base.function
def dropout_powerlaw(f, name, log10_A=-16, gamma=5,
dropout_psr='B1855+09', k_drop=0.5, k_threshold=0.5):
"""
Dropout powerlaw for a stochastic process. Switches a stochastic
process on or off in a single pulsar depending on whether k_drop exceeds
k_threshold.
:param dropout_psr: Which pulsar to use a dropout switch on. The value 'all'
will use the method on all pulsars.
"""
df = np.diff(np.concatenate((np.array([0]), f[::2])))
if name == 'all':
if k_drop >= k_threshold:
k_switch = 1.0
elif k_drop < k_threshold:
k_switch = 0.0
return k_switch * ((10**log10_A)**2 / 12.0 / np.pi**2 *
const.fyr**(gamma-3) * f**(-gamma) * np.repeat(df, 2))
elif name == dropout_psr:
if k_drop >= k_threshold:
k_switch = 1.0
elif k_drop < k_threshold:
k_switch = 0.0
return k_switch * ((10**log10_A)**2 / 12.0 / np.pi**2 *
const.fyr**(gamma-3) * f**(-gamma) * np.repeat(df, 2))
else:
return ((10**log10_A)**2 / 12.0 / np.pi**2 *
const.fyr**(gamma-3) * f**(-gamma) * np.repeat(df, 2))
@signal_base.function
def dropout_physical_ephem_delay(toas, planetssb, pos_t, frame_drift_rate=0,
d_jupiter_mass=0, d_saturn_mass=0, d_uranus_mass=0,
d_neptune_mass=0, jup_orb_elements=np.zeros(6),
sat_orb_elements=np.zeros(6), inc_jupiter_orb=False,
jup_orbelxyz=None, jup_mjd=None, inc_saturn_orb=False,
sat_orbelxyz=None, sat_mjd=None, equatorial=True,
k_drop=0.5, k_threshold=0.5):
"""
Dropout BayesEphem model. Switches BayesEphem on or off depending on
whether k_drop exceeds k_threshold.
"""
# get dropout switch
if k_drop >= k_threshold:
k_switch = 1.0
elif k_drop < k_threshold:
k_switch = 0.0
# convert toas to MJD
mjd = toas / 86400
# grab planet-to-SSB vectors
earth = planetssb[:, 2, :3]
jupiter = planetssb[:, 4, :3]
saturn = planetssb[:, 5, :3]
uranus = planetssb[:, 6, :3]
neptune = planetssb[:, 7, :3]
# do frame rotation
earth = utils.ss_framerotate(mjd, earth, 0.0, 0.0, 0.0, frame_drift_rate,
offset=None, equatorial=equatorial)
# mass perturbations
mpert = [(jupiter, d_jupiter_mass), (saturn, d_saturn_mass),
(uranus, d_uranus_mass), (neptune, d_neptune_mass)]
for planet, dm in mpert:
earth += utils.dmass(planet, dm)
# jupter orbital element perturbations
if inc_jupiter_orb:
jup_perturb_tmp = 0.0009547918983127075 * np.einsum(
'i,ijk->jk', jup_orb_elements, jup_orbelxyz)
earth += np.array([np.interp(mjd, jup_mjd, jup_perturb_tmp[:, aa])
for aa in range(3)]).T
# saturn orbital element perturbations
if inc_saturn_orb:
sat_perturb_tmp = 0.00028588567008942334 * np.einsum(
'i,ijk->jk', sat_orb_elements, sat_orbelxyz)
earth += np.array([np.interp(mjd, sat_mjd, sat_perturb_tmp[:, aa])
for aa in range(3)]).T
# construct the true geocenter to barycenter roemer
tmp_roemer = np.einsum('ij,ij->i', planetssb[:, 2, :3], pos_t)
# create the delay
delay = tmp_roemer - np.einsum('ij,ij->i', earth, pos_t)
return k_switch * delay
def Dropout_PhysicalEphemerisSignal(
frame_drift_rate=parameter.Uniform(-1e-9, 1e-9)('frame_drift_rate'),
d_jupiter_mass=parameter.Normal(0, 1.54976690e-11)('d_jupiter_mass'),
d_saturn_mass=parameter.Normal(0, 8.17306184e-12)('d_saturn_mass'),
d_uranus_mass=parameter.Normal(0, 5.71923361e-11)('d_uranus_mass'),
d_neptune_mass=parameter.Normal(0, 7.96103855e-11)('d_neptune_mass'),
jup_orb_elements=parameter.Uniform(-0.05, 0.05, size=6)('jup_orb_elements'),
sat_orb_elements=parameter.Uniform(-0.5, 0.5, size=6)('sat_orb_elements'),
inc_jupiter_orb=True, inc_saturn_orb=False, use_epoch_toas=True,
k_drop=parameter.Uniform(0.0, 1.0), k_threshold=0.5, name=''):
""" Class factory for dropout physical ephemeris model signal."""
# turn off saturn orbital element parameters if not including in signal
if not inc_saturn_orb:
sat_orb_elements = np.zeros(6)
# define waveform
jup_mjd, jup_orbelxyz, sat_mjd, sat_orbelxyz = (
utils.get_planet_orbital_elements())
wf = dropout_physical_ephem_delay(frame_drift_rate=frame_drift_rate,
d_jupiter_mass=d_jupiter_mass,
d_saturn_mass=d_saturn_mass,
d_uranus_mass=d_uranus_mass,
d_neptune_mass=d_neptune_mass,
jup_orb_elements=jup_orb_elements,
sat_orb_elements=sat_orb_elements,
inc_jupiter_orb=inc_jupiter_orb,
jup_orbelxyz=jup_orbelxyz,
jup_mjd=jup_mjd,
inc_saturn_orb=inc_saturn_orb,
sat_orbelxyz=sat_orbelxyz,
sat_mjd=sat_mjd,
k_drop=k_drop, k_threshold=k_threshold)
BaseClass = deterministic_signals.Deterministic(wf, name=name)
class Dropout_PhysicalEphemerisSignal(BaseClass):
signal_name = 'phys_ephem'
signal_id = 'phys_ephem_' + name if name else 'phys_ephem'
def __init__(self, psr):
# not available for PINT yet
if isinstance(psr, enterprise.pulsar.PintPulsar):
msg = 'Physical Ephemeris model is not compatible with PINT '
msg += 'at this time.'
raise NotImplementedError(msg)
super(Dropout_PhysicalEphemerisSignal, self).__init__(psr)
if use_epoch_toas:
# get quantization matrix and calculate daily average TOAs
U, _ = utils.create_quantization_matrix(psr.toas, nmin=1)
self.uinds = utils.quant2ind(U)
avetoas = np.array([psr.toas[sc].mean() for sc in self.uinds])
self._wf[''].add_kwarg(toas=avetoas)
# interpolate ssb planet position vectors to avetoas
planetssb = np.zeros((len(avetoas), 9, 3))
for jj in range(9):
planetssb[:, jj, :] = np.array([
np.interp(avetoas, psr.toas, psr.planetssb[:, jj, aa])
for aa in range(3)]).T
self._wf[''].add_kwarg(planetssb=planetssb)
# Inteprolating the pulsar position vectors onto epoch TOAs
pos_t = np.array([np.interp(avetoas, psr.toas, psr.pos_t[:, aa])
for aa in range(3)]).T
self._wf[''].add_kwarg(pos_t=pos_t)
# initialize delay
self._delay = np.zeros(len(psr.toas))
@signal_base.cache_call('delay_params')
def get_delay(self, params):
delay = self._wf[''](params=params)
if use_epoch_toas:
for slc, val in zip(self.uinds, delay):
self._delay[slc] = val
return self._delay
else:
return delay
return Dropout_PhysicalEphemerisSignal
| 8,010 | 39.872449 | 87 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/model_orfs.py | # -*- coding: utf-8 -*-
import numpy as np
import scipy.interpolate as interp
from enterprise import constants as const
from enterprise.signals import signal_base
@signal_base.function
def param_hd_orf(pos1, pos2, a=1.5, b=-0.25, c=0.5):
'''
Pre-factor parametrized Hellings & Downs spatial correlation function.
:param: a, b, c:
coefficients of H&D-like curve [default=1.5,-0.25,0.5].
Reference: Taylor, Gair, Lentati (2013), https://arxiv.org/abs/1210.6014
Author: S. R. Taylor (2020)
'''
if np.all(pos1 == pos2):
return 1
else:
omc2 = (1 - np.dot(pos1, pos2)) / 2
params = [a, b, c]
return params[0] * omc2 * np.log(omc2) + params[1] * omc2 + params[2]
@signal_base.function
def spline_orf(pos1, pos2, params):
'''
Agnostic spline-interpolated spatial correlation function. Spline knots
are placed at edges, zeros, and minimum of H&D curve. Changing locations
will require manual intervention to create new function.
:param: params
spline knot amplitudes.
Reference: Taylor, Gair, Lentati (2013), https://arxiv.org/abs/1210.6014
Author: S. R. Taylor (2020)
'''
if np.all(pos1 == pos2):
return 1
else:
# spline knots placed at edges, zeros, and minimum of H&D
spl_knts = np.array([1e-3, 25.0, 49.3, 82.5,
121.8, 150.0, 180.0]) * np.pi/180.0
omc2_knts = (1 - np.cos(spl_knts)) / 2
finterp = interp.interp1d(omc2_knts, params, kind='cubic')
omc2 = (1 - np.dot(pos1, pos2)) / 2
return finterp(omc2)
@signal_base.function
def bin_orf(pos1, pos2, params):
'''
Agnostic binned spatial correlation function. Bin edges are
placed at edges and across angular separation space. Changing bin
edges will require manual intervention to create new function.
:param: params
inter-pulsar correlation bin amplitudes.
Author: S. R. Taylor (2020)
'''
if np.all(pos1 == pos2):
return 1
else:
# bins in angsep space
bins = np.array([1e-3, 30.0, 50.0, 80.0, 100.0,
120.0, 150.0, 180.0]) * np.pi/180.0
angsep = np.arccos(np.dot(pos1, pos2))
idx = np.digitize(angsep, bins)
return params[idx-1]
@signal_base.function
def zero_diag_bin_orf(pos1, pos2, params):
'''
Agnostic binned spatial correlation function. To be
used in a "split likelihood" model with an additional common
uncorrelated red process. The latter is necessary to regularize
the overall Phi covariance matrix.
:param: params
inter-pulsar correlation bin amplitudes.
Author: S. R. Taylor (2020)
'''
if np.all(pos1 == pos2):
return 1e-20
else:
# bins in angsep space
bins = np.array([1e-3, 30.0, 50.0, 80.0, 100.0,
120.0, 150.0, 180.0]) * np.pi/180.0
angsep = np.arccos(np.dot(pos1, pos2))
idx = np.digitize(angsep, bins)
return params[idx-1]
@signal_base.function
def zero_diag_hd(pos1, pos2):
'''
Off-diagonal Hellings & Downs spatial correlation function. To be
used in a "split likelihood" model with an additional common uncorrelated
red process. The latter is necessary to regularize the overall Phi
covariance matrix.
Author: S. R. Taylor (2020)
'''
if np.all(pos1 == pos2):
return 1e-20
else:
omc2 = (1 - np.dot(pos1, pos2)) / 2
return 1.5 * omc2 * np.log(omc2) - 0.25 * omc2 + 0.5
@signal_base.function
def freq_hd(pos1, pos2, params):
'''
Frequency-dependent Hellings & Downs spatial correlation function.
Implemented as a model that only enforces H&D inter-pulsar correlations
after a certain number of frequencies in the spectrum. The first set of
frequencies are uncorrelated.
:param: params
params[0] is the number of components in the stochastic process.
params[1] is the frequency at which to start the H&D inter-pulsar
correlations (indexing from 0).
Reference: Taylor et al. (2017), https://arxiv.org/abs/1606.09180
Author: S. R. Taylor (2020)
'''
nfreq = params[0]
orf_ifreq = params[1]
if np.all(pos1 == pos2):
return np.ones(2*nfreq)
else:
omc2 = (1 - np.dot(pos1, pos2)) / 2
hd_coeff = 1.5 * omc2 * np.log(omc2) - 0.25 * omc2 + 0.5
hd_coeff *= np.ones(2*nfreq)
hd_coeff[:2*orf_ifreq] = 0.0
return hd_coeff
@signal_base.function
def legendre_orf(pos1, pos2, params):
'''
Legendre polynomial spatial correlation function. Assumes process
normalization such that autocorrelation signature is 1. A separate function
is needed to use a "split likelihood" model with this Legendre process
decoupled from the autocorrelation signature ("zero_diag_legendre_orf").
:param: params
Legendre polynomial amplitudes describing the Legendre series approximation
to the inter-pulsar correlation signature.
H&D coefficients are a_0=0, a_1=0, a_2=0.3125, a_3=0.0875, ...
Reference: Gair et al. (2014), https://arxiv.org/abs/1406.4664
Author: S. R. Taylor (2020)
'''
if np.all(pos1 == pos2):
return 1
else:
costheta = np.dot(pos1, pos2)
orf = np.polynomial.legendre.legval(costheta, params)
return orf
@signal_base.function
def zero_diag_legendre_orf(pos1, pos2, params):
'''
Legendre polynomial spatial correlation function. To be
used in a "split likelihood" model with an additional common uncorrelated
red process. The latter is necessary to regularize the overall Phi
covariance matrix.
:param: params
Legendre polynomial amplitudes describing the Legendre series approximation
to the inter-pulsar correlation signature.
H&D coefficients are a_0=0, a_1=0, a_2=0.3125, a_3=0.0875, ...
Reference: Gair et al. (2014), https://arxiv.org/abs/1406.4664
Author: S. R. Taylor (2020)
'''
if np.all(pos1 == pos2):
return 1e-20
else:
costheta = np.dot(pos1, pos2)
orf = np.polynomial.legendre.legval(costheta, params)
return orf
@signal_base.function
def hd_orf(pos1, pos2):
"""Hellings & Downs spatial correlation function."""
if np.all(pos1 == pos2):
return 1
else:
omc2 = (1 - np.dot(pos1, pos2)) / 2
return 1.5 * omc2 * np.log(omc2) - 0.25 * omc2 + 0.5
@signal_base.function
def dipole_orf(pos1, pos2):
"""Dipole spatial correlation function."""
if np.all(pos1 == pos2):
return 1 + 1e-5
else:
return np.dot(pos1, pos2)
@signal_base.function
def monopole_orf(pos1, pos2):
"""Monopole spatial correlation function."""
if np.all(pos1 == pos2):
return 1.0 + 1e-5
else:
return 1.0
@signal_base.function
def anis_orf(pos1, pos2, params, **kwargs):
"""Anisotropic GWB spatial correlation function."""
anis_basis = kwargs["anis_basis"]
psrs_pos = kwargs["psrs_pos"]
lmax = kwargs["lmax"]
psr1_index = [ii for ii in range(len(psrs_pos)) if np.all(psrs_pos[ii] == pos1)][0]
psr2_index = [ii for ii in range(len(psrs_pos)) if np.all(psrs_pos[ii] == pos2)][0]
clm = np.zeros((lmax + 1) ** 2)
clm[0] = 2.0 * np.sqrt(np.pi)
if lmax > 0:
clm[1:] = params
return sum(clm[ii] * basis for ii, basis in enumerate(anis_basis[: (lmax + 1) ** 2, psr1_index, psr2_index]))
@signal_base.function
def gw_monopole_orf(pos1, pos2):
"""
GW-monopole Correlations. This phenomenological correlation pattern can be
used in Bayesian runs as the simplest type of correlations.
Author: N. Laal (2020)
"""
if np.all(pos1 == pos2):
return 1
else:
return 1/2
@signal_base.function
def gw_dipole_orf(pos1, pos2):
"""
GW-dipole Correlations.
Author: N. Laal (2020)
"""
if np.all(pos1 == pos2):
return 1
else:
return 1/2*np.dot(pos1, pos2)
@signal_base.function
def st_orf(pos1, pos2):
"""
Scalar tensor correlations as induced by the breathing polarization mode of gravity.
Author: N. Laal (2020)
"""
if np.all(pos1 == pos2):
return 1
else:
return 1/8 * (3.0 + np.dot(pos1, pos2))
@signal_base.function
def gt_orf(pos1, pos2, tau):
"""
General Transverse (GT) Correlations. This ORF is used to detect the relative
significance of all possible correlation patterns induced by the most general
family of transverse gravitational waves.
:param: tau
tau = 1 results in ST correlations while tau = -1 results in HD correlations.
Author: N. Laal (2020)
"""
if np.all(pos1 == pos2):
return 1
else:
k = 1/2*(1-np.dot(pos1, pos2))
return 1/8 * (3+np.dot(pos1, pos2)) + (1-tau)*3/4*k*np.log(k)
@signal_base.function
def generalized_gwpol_psd(f, log10_A_tt=-15, log10_A_st=-15, alpha_tt=-2/3, alpha_alt=-1,
log10_A_vl=-15, log10_A_sl=-15,
kappa=0, p_dist=1.0):
'''
General powerlaw spectrum allowing for existence of all possible modes of gravity as
predicted by a general metric spacetime theory and generated by a binary system.
The SL and VL modes' powerlaw relations are not normalized.
:param: f
A list of considered frequencies
:param: log10_A_tt
Amplitude of the tensor transverse mode
:param: log10_A_st
Amplitude of the scalar transverse mode
:param: log10_A_vl
Amplitude of the vector longitudinal mode
:param: log10_A_sl
Amplitude of the scalar longitudinal mode
:param: kappa
Relative amplitude of dipole radiation over quadrupolar radiation
:param: p_dist
Pulsar distance in kpc
:param: alpha_tt
spectral index of the TT mode.
:param: alpha_alt
spectral index of the non-Einsteinian modes.
Reference: Cornish et al. (2017), https://arxiv.org/abs/1712.07132
Author: S. R. Taylor, N. Laal (2020)
'''
df = np.diff(np.concatenate((np.array([0]), f[::2])))
euler_e = 0.5772156649
pdist = p_dist * const.kpc / const.c
orf_aa_tt = (2/3) * np.ones(len(f))
orf_aa_st = (2/3) * np.ones(len(f))
orf_aa_vl = 2*np.log(4*np.pi*f*pdist) - 14/3 + 2*euler_e
orf_aa_sl = np.pi**2*f*pdist/4 - \
np.log(4*np.pi*f*pdist) + 37/24 - euler_e
prefactor = (1 + kappa**2) / (1 + kappa**2 * (f / const.fyr)**(-2/3))
gwpol_amps = 10**(2*np.array([log10_A_tt, log10_A_st,
log10_A_vl, log10_A_sl]))
gwpol_factors = np.array([orf_aa_tt*gwpol_amps[0],
orf_aa_st*gwpol_amps[1],
orf_aa_vl*gwpol_amps[2],
orf_aa_sl*gwpol_amps[3]])
S_psd = prefactor * (gwpol_factors[0, :] * (f / const.fyr)**(2 * alpha_tt) +
np.sum(gwpol_factors[1:, :], axis=0) *
(f / const.fyr)**(2 * alpha_alt)) / \
(8*np.pi**2*f**3)
return S_psd * np.repeat(df, 2)
| 11,164 | 29.757576 | 113 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/models.py | # -*- coding: utf-8 -*-
import functools
from collections import OrderedDict
import numpy as np
from enterprise import constants as const
from enterprise.signals import (deterministic_signals, gp_signals, parameter,
selections, signal_base, white_signals)
from enterprise.signals.signal_base import LogLikelihood
from enterprise_extensions import chromatic as chrom
from enterprise_extensions import deterministic
from enterprise_extensions import dropout as do
from enterprise_extensions import model_utils
from enterprise_extensions.blocks import (bwm_block, bwm_sglpsr_block,
chromatic_noise_block,
common_red_noise_block,
dm_noise_block, red_noise_block,
white_noise_block)
from enterprise_extensions.chromatic.solar_wind import solar_wind_block
from enterprise_extensions.timing import timing_block
# from enterprise.signals.signal_base import LookupLikelihood
def model_singlepsr_noise(psr, tm_var=False, tm_linear=False,
tmparam_list=None,
red_var=True, psd='powerlaw', red_select=None,
noisedict=None, tm_svd=False, tm_norm=True,
white_vary=True, components=30, upper_limit=False,
is_wideband=False, use_dmdata=False, tnequad=False,
dmjump_var=False, gamma_val=None, dm_var=False,
dm_type='gp', dmgp_kernel='diag', dm_psd='powerlaw',
dm_nondiag_kernel='periodic', dmx_data=None,
dm_annual=False, gamma_dm_val=None,
dm_dt=15, dm_df=200,
chrom_gp=False, chrom_gp_kernel='nondiag',
chrom_psd='powerlaw', chrom_idx=4, chrom_quad=False,
chrom_kernel='periodic',
chrom_dt=15, chrom_df=200,
dm_expdip=False, dmexp_sign='negative',
dm_expdip_idx=2,
dm_expdip_tmin=None, dm_expdip_tmax=None,
num_dmdips=1, dmdip_seqname=None,
dm_cusp=False, dm_cusp_sign='negative',
dm_cusp_idx=2, dm_cusp_sym=False,
dm_cusp_tmin=None, dm_cusp_tmax=None,
num_dm_cusps=1, dm_cusp_seqname=None,
dm_dual_cusp=False, dm_dual_cusp_tmin=None,
dm_dual_cusp_tmax=None, dm_dual_cusp_sym=False,
dm_dual_cusp_idx1=2, dm_dual_cusp_idx2=4,
dm_dual_cusp_sign='negative', num_dm_dual_cusps=1,
dm_dual_cusp_seqname=None,
dm_sw_deter=False, dm_sw_gp=False,
swgp_prior=None, swgp_basis=None,
coefficients=False, extra_sigs=None,
psr_model=False, factorized_like=False,
Tspan=None, fact_like_gamma=13./3, gw_components=10,
fact_like_logmin=None, fact_like_logmax=None,
select='backend', tm_marg=False, dense_like=False, ng_twg_setup=False, wb_efac_sigma=0.25):
"""
Single pulsar noise model.
:param psr: enterprise pulsar object
:param tm_var: explicitly vary the timing model parameters
:param tm_linear: vary the timing model in the linear approximation
:param tmparam_list: an explicit list of timing model parameters to vary
:param red_var: include red noise in the model
:param psd: red noise psd model
:param noisedict: dictionary of noise parameters
:param tm_svd: boolean for svd-stabilised timing model design matrix
:param tm_norm: normalize the timing model, or provide custom normalization
:param white_vary: boolean for varying white noise or keeping fixed
:param components: number of modes in Fourier domain processes
:param dm_components: number of modes in Fourier domain DM processes
:param upper_limit: whether to do an upper-limit analysis
:param is_wideband: whether input TOAs are wideband TOAs; will exclude
ecorr from the white noise model
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband
:param gamma_val: red noise spectral index to fix
:param dm_var: whether to explicitly model DM-variations
:param dm_type: gaussian process ('gp') or dmx ('dmx')
:param dmgp_kernel: diagonal in frequency or non-diagonal
:param dm_psd: power-spectral density of DM variations
:param dm_nondiag_kernel: type of time-domain DM GP kernel
:param dmx_data: supply the DMX data from par files
:param dm_annual: include an annual DM signal
:param gamma_dm_val: spectral index of power-law DM variations
:param dm_dt: time-scale for DM linear interpolation basis (days)
:param dm_df: frequency-scale for DM linear interpolation basis (MHz)
:param chrom_gp: include general chromatic noise
:param chrom_gp_kernel: GP kernel type to use in chrom ['diag','nondiag']
:param chrom_psd: power-spectral density of chromatic noise
['powerlaw','tprocess','free_spectrum']
:param chrom_idx: frequency scaling of chromatic noise
:param chrom_kernel: Type of 'nondiag' time-domain chrom GP kernel to use
['periodic', 'sq_exp','periodic_rfband', 'sq_exp_rfband']
:param chrom_quad: Whether to add a quadratic chromatic term. Boolean
:param chrom_dt: time-scale for chromatic linear interpolation basis (days)
:param chrom_df: frequency-scale for chromatic linear interpolation basis (MHz)
:param dm_expdip: inclue a DM exponential dip
:param dmexp_sign: set the sign parameter for dip
:param dm_expdip_idx: chromatic index of exponential dip
:param dm_expdip_tmin: sampling minimum of DM dip epoch
:param dm_expdip_tmax: sampling maximum of DM dip epoch
:param num_dmdips: number of dm exponential dips
:param dmdip_seqname: name of dip sequence
:param dm_cusp: include a DM exponential cusp
:param dm_cusp_sign: set the sign parameter for cusp
:param dm_cusp_idx: chromatic index of exponential cusp
:param dm_cusp_tmin: sampling minimum of DM cusp epoch
:param dm_cusp_tmax: sampling maximum of DM cusp epoch
:param dm_cusp_sym: make exponential cusp symmetric
:param num_dm_cusps: number of dm exponential cusps
:param dm_cusp_seqname: name of cusp sequence
:param dm_dual_cusp: include a DM cusp with two chromatic indices
:param dm_dual_cusp_tmin: sampling minimum of DM dual cusp epoch
:param dm_dual_cusp_tmax: sampling maximum of DM dual cusp epoch
:param dm_dual_cusp_idx1: first chromatic index of DM dual cusp
:param dm_dual_cusp_idx2: second chromatic index of DM dual cusp
:param dm_dual_cusp_sym: make dual cusp symmetric
:param dm_dual_cusp_sign: set the sign parameter for dual cusp
:param num_dm_dual_cusps: number of DM dual cusps
:param dm_dual_cusp_seqname: name of dual cusp sequence
:param dm_scattering: whether to explicitly model DM scattering variations
:param dm_sw_deter: use the deterministic solar wind model
:param dm_sw_gp: add a Gaussian process perturbation to the deterministic
solar wind model.
:param swgp_prior: prior is currently set automatically
:param swgp_basis: ['powerlaw', 'periodic', 'sq_exp']
:param coefficients: explicitly include latent coefficients in model
:param psr_model: Return the enterprise model instantiated on the pulsar
rather than an instantiated PTA object, i.e. model(psr) rather than
PTA(model(psr)).
:param factorized_like: Whether to run a factorized likelihood analyis Boolean
:param gw_components: number of modes in Fourier domain for a common
process in a factorized likelihood calculation.
:param fact_like_gamma: fixed common process spectral index
:param fact_like_logmin: specify lower prior for common psd. This is a prior on log10_rho
if common_psd is 'spectrum', else it is a prior on log10 amplitude
:param fact_like_logmax: specify upper prior for common psd. This is a prior on log10_rho
if common_psd is 'spectrum', else it is a prior on log10 amplitude
:param Tspan: time baseline used to determine Fourier GP frequencies
:param extra_sigs: Any additional `enterprise` signals to be added to the
model.
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:return s: single pulsar noise model
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# timing model
if not tm_var:
if (is_wideband and use_dmdata):
if dmjump_var:
dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmjump = parameter.Constant()
if white_vary:
if ng_twg_setup:
dmefac = parameter.Normal(1.0, wb_efac_sigma)
else:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(
selections.by_backend),
dmjump_selection=selections.Selection(
selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd, normed=tm_norm,
coefficients=coefficients)
else:
# create new attribute for enterprise pulsar object
psr.tmparams_orig = OrderedDict.fromkeys(psr.t2pulsar.pars())
for key in psr.tmparams_orig:
psr.tmparams_orig[key] = (psr.t2pulsar[key].val,
psr.t2pulsar[key].err)
if not tm_linear:
s = timing_block(tmparam_list=tmparam_list)
else:
pass
# red noise and common process
if factorized_like:
if Tspan is None:
msg = 'Must Timespan to match amongst all pulsars when doing '
msg += 'a factorized likelihood analysis.'
raise ValueError(msg)
s += common_red_noise_block(psd=psd, prior=amp_prior,
Tspan=Tspan, components=gw_components,
gamma_val=fact_like_gamma, delta_val=None,
orf=None, name='gw',
coefficients=coefficients,
pshift=False, pseed=None,
logmin=fact_like_logmin, logmax=fact_like_logmax)
if red_var:
s += red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_val,
coefficients=coefficients, select=red_select)
# DM variations
if dm_var:
if dm_type == 'gp':
if dmgp_kernel == 'diag':
s += dm_noise_block(gp_kernel=dmgp_kernel, psd=dm_psd,
prior=amp_prior, components=components,
gamma_val=gamma_dm_val,
coefficients=coefficients)
elif dmgp_kernel == 'nondiag':
s += dm_noise_block(gp_kernel=dmgp_kernel,
nondiag_kernel=dm_nondiag_kernel,
dt=dm_dt, df=dm_df,
coefficients=coefficients)
elif dm_type == 'dmx':
s += chrom.dmx_signal(dmx_data=dmx_data[psr.name])
if dm_annual:
s += chrom.dm_annual_signal()
if chrom_gp:
s += chromatic_noise_block(gp_kernel=chrom_gp_kernel,
psd=chrom_psd, idx=chrom_idx,
components=components,
nondiag_kernel=chrom_kernel,
dt=chrom_dt, df=chrom_df,
include_quadratic=chrom_quad,
coefficients=coefficients)
if dm_expdip:
if dm_expdip_tmin is None and dm_expdip_tmax is None:
tmin = [psr.toas.min() / const.day for ii in range(num_dmdips)]
tmax = [psr.toas.max() / const.day for ii in range(num_dmdips)]
else:
tmin = (dm_expdip_tmin if isinstance(dm_expdip_tmin, list)
else [dm_expdip_tmin])
tmax = (dm_expdip_tmax if isinstance(dm_expdip_tmax, list)
else [dm_expdip_tmax])
if dmdip_seqname is not None:
dmdipname_base = (['dmexp_' + nm for nm in dmdip_seqname]
if isinstance(dmdip_seqname, list)
else ['dmexp_' + dmdip_seqname])
else:
dmdipname_base = ['dmexp_{0}'.format(ii+1)
for ii in range(num_dmdips)]
dm_expdip_idx = (dm_expdip_idx if isinstance(dm_expdip_idx, list)
else [dm_expdip_idx])
for dd in range(num_dmdips):
s += chrom.dm_exponential_dip(tmin=tmin[dd], tmax=tmax[dd],
idx=dm_expdip_idx[dd],
sign=dmexp_sign,
name=dmdipname_base[dd])
if dm_cusp:
if dm_cusp_tmin is None and dm_cusp_tmax is None:
tmin = [psr.toas.min() / const.day for ii in range(num_dm_cusps)]
tmax = [psr.toas.max() / const.day for ii in range(num_dm_cusps)]
else:
tmin = (dm_cusp_tmin if isinstance(dm_cusp_tmin, list)
else [dm_cusp_tmin])
tmax = (dm_cusp_tmax if isinstance(dm_cusp_tmax, list)
else [dm_cusp_tmax])
if dm_cusp_seqname is not None:
cusp_name_base = 'dm_cusp_'+dm_cusp_seqname+'_'
else:
cusp_name_base = 'dm_cusp_'
dm_cusp_idx = (dm_cusp_idx if isinstance(dm_cusp_idx, list)
else [dm_cusp_idx])
dm_cusp_sign = (dm_cusp_sign if isinstance(dm_cusp_sign, list)
else [dm_cusp_sign])
for dd in range(1, num_dm_cusps+1):
s += chrom.dm_exponential_cusp(tmin=tmin[dd-1],
tmax=tmax[dd-1],
idx=dm_cusp_idx[dd-1],
sign=dm_cusp_sign[dd-1],
symmetric=dm_cusp_sym,
name=cusp_name_base+str(dd))
if dm_dual_cusp:
if dm_dual_cusp_tmin is None and dm_cusp_tmax is None:
tmin = psr.toas.min() / const.day
tmax = psr.toas.max() / const.day
else:
tmin = dm_dual_cusp_tmin
tmax = dm_dual_cusp_tmax
if dm_dual_cusp_seqname is not None:
dual_cusp_name_base = 'dm_dual_cusp_'+dm_cusp_seqname+'_'
else:
dual_cusp_name_base = 'dm_dual_cusp_'
for dd in range(1, num_dm_dual_cusps+1):
s += chrom.dm_dual_exp_cusp(tmin=tmin, tmax=tmax,
idx1=dm_dual_cusp_idx1,
idx2=dm_dual_cusp_idx2,
sign=dm_dual_cusp_sign,
symmetric=dm_dual_cusp_sym,
name=dual_cusp_name_base+str(dd))
if dm_sw_deter:
Tspan = psr.toas.max() - psr.toas.min()
s += solar_wind_block(ACE_prior=True, include_swgp=dm_sw_gp,
swgp_prior=swgp_prior, swgp_basis=swgp_basis,
Tspan=Tspan)
if extra_sigs is not None:
s += extra_sigs
# adding white-noise, and acting on psr objects
if ('NANOGrav' in psr.flags['pta'] or 'CHIME' in psr.flags['f']) and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True,
tnequad=tnequad, select=select)
model = s2(psr)
if psr_model:
Model = s2
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False,
tnequad=tnequad, select=select, ng_twg_setup=ng_twg_setup, wb_efac_sigma=wb_efac_sigma)
model = s3(psr)
if psr_model:
Model = s3
if psr_model:
return Model
else:
# set up PTA
if dense_like:
pta = signal_base.PTA([model], lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA([model])
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_1(psrs, psd='powerlaw', noisedict=None, white_vary=False,
components=30, upper_limit=False, bayesephem=False, tnequad=False,
be_type='orbel', is_wideband=False, use_dmdata=False, Tspan=None,
select='backend', tm_marg=False, dense_like=False, tm_svd=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with only white and red noise:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1. Optional physical ephemeris modeling.
:param psd:
Choice of PSD function [e.g. powerlaw (default), turnover, tprocess]
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param be_type:
orbel, orbel-v2, setIII
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param Tspan: time baseline used to determine Fourier GP frequencies;
derived from data if not specified
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
if Tspan is None:
Tspan = model_utils.get_tspan(psrs)
# timing model
if (is_wideband and use_dmdata):
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(psd=psd, prior=amp_prior,
Tspan=Tspan, components=components)
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True,
model=be_type)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True,
tnequad=tnequad, select=select)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False,
tnequad=tnequad, select=select)
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_2a(psrs, psd='powerlaw', noisedict=None, components=30,
n_rnfreqs=None, n_gwbfreqs=None, gamma_common=None,
delta_common=None, upper_limit=False, bayesephem=False,
be_type='setIII', white_vary=False, is_wideband=False,
use_dmdata=False, Tspan=None, select='backend', tnequad=False,
pshift=False, pseed=None, psr_models=False,
tm_marg=False, dense_like=False, tm_svd=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 2A from the analysis paper:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1.Common red noise modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
2. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum']. 'powerlaw' is default
value.
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param be_type:
orbel, orbel-v2, setIII
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param Tspan: time baseline used to determine Fourier GP frequencies;
derived from data if not specified
:param psr_models:
Return list of psr models rather than signal_base.PTA object.
:param n_rnfreqs:
Number of frequencies to use in achromatic rednoise model.
:param n_gwbfreqs:
Number of frequencies to use in the GWB model.
:param pshift:
Option to use a random phase shift in design matrix. For testing the
null hypothesis.
:param pseed:
Option to provide a seed for the random phase shift.
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
if Tspan is None:
Tspan = model_utils.get_tspan(psrs)
if n_gwbfreqs is None:
n_gwbfreqs = components
if n_rnfreqs is None:
n_rnfreqs = components
# timing model
if (is_wideband and use_dmdata):
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=n_rnfreqs)
# common red noise block
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=n_gwbfreqs, gamma_val=gamma_common,
delta_val=delta_common, name='gw',
pshift=pshift, pseed=pseed)
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True,
model=be_type)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True,
tnequad=tnequad, select=select)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False,
tnequad=tnequad, select=select)
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
if psr_models:
return models
else:
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_general(psrs, tm_var=False, tm_linear=False, tmparam_list=None,
tm_svd=False, tm_norm=True, noisedict=None, white_vary=False,
Tspan=None, modes=None, wgts=None, logfreq=False, nmodes_log=10,
common_psd='powerlaw', common_components=30, tnequad=False,
log10_A_common=None, gamma_common=None,
common_logmin=None, common_logmax=None,
orf='crn', orf_names=None, orf_ifreq=0, leg_lmax=5,
upper_limit_common=None, upper_limit=False,
red_var=True, red_psd='powerlaw', red_components=30, upper_limit_red=None,
red_select=None, red_breakflat=False, red_breakflat_fq=None,
bayesephem=False, be_type='setIII_1980', is_wideband=False, use_dmdata=False,
dm_var=False, dm_type='gp', dm_psd='powerlaw', dm_components=30,
upper_limit_dm=None, dm_annual=False, dm_chrom=False, dmchrom_psd='powerlaw',
dmchrom_idx=4, gequad=False, coefficients=False, pshift=False,
select='backend', tm_marg=False, dense_like=False,
delta_common=None):
"""
Reads in list of enterprise Pulsar instances and returns a PTA
object instantiated with user-supplied options.
:param tm_var: boolean to vary timing model coefficients.
[default = False]
:param tm_linear: boolean to vary timing model under linear approximation.
[default = False]
:param tmparam_list: list of timing model parameters to vary.
[default = None]
:param tm_svd: stabilize timing model designmatrix with SVD.
[default = False]
:param tm_norm: normalize the timing model design matrix, or provide custom
normalization. Alternative to 'tm_svd'.
[default = True]
:param noisedict: Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
[default = None]
:param white_vary: boolean for varying white noise or keeping fixed.
[default = False]
:param Tspan: timespan assumed for describing stochastic processes,
in units of seconds. If None provided will find span of pulsars.
[default = None]
:param modes: list of frequencies on which to describe red processes.
[default = None]
:param wgts: sqrt summation weights for each frequency bin, i.e. sqrt(delta f).
[default = None]
:param logfreq: boolean for including log-spaced bins.
[default = False]
:param nmodes_log: number of log-spaced bins below 1/T.
[default = 10]
:param common_psd: psd of common process.
['powerlaw', 'spectrum', 'turnover', 'turnover_knee,', 'broken_powerlaw']
[default = 'powerlaw']
:param common_components: number of frequencies starting at 1/T for common process.
[default = 30]
:param log10_A_common: value of fixed log10_A_common parameter for
fixed amplitude analyses.
[default = None]
:param gamma_common: fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
[default = None]
:param common_logmin: specify lower prior for common psd. This is a prior on log10_rho
if common_psd is 'spectrum', else it is a prior on log amplitude
:param common_logmax: specify upper prior for common psd. This is a prior on log10_rho
if common_psd is 'spectrum', else it is a prior on log amplitude
:param orf: comma de-limited string of multiple common processes with different orfs.
[default = crn]
:param orf_names: comma de-limited string of process names for different orfs. Manual
control of these names is useful for embedding model_general within a hypermodel
analysis for a process with and without hd correlations where we want to avoid
parameter duplication.
[default = None]
:param orf_ifreq:
Frequency bin at which to start the Hellings & Downs function with
numbering beginning at 0. Currently only works with freq_hd orf.
[default = 0]
:param leg_lmax:
Maximum multipole of a Legendre polynomial series representation
of the overlap reduction function.
[default = 5]
:param upper_limit_common: perform upper limit on common red noise amplitude. Note
that when perfoming upper limits it is recommended that the spectral index also
be fixed to a specific value.
[default = False]
:param upper_limit: apply upper limit priors to all red processes.
[default = False]
:param red_var: boolean to switch on/off intrinsic red noise.
[default = True]
:param red_psd: psd of intrinsic red process.
['powerlaw', 'spectrum', 'turnover', 'tprocess', 'tprocess_adapt']
[default = 'powerlaw']
:param red_components: number of frequencies starting at 1/T for intrinsic red process.
[default = 30]
:param upper_limit_red: perform upper limit on intrinsic red noise amplitude. Note
that when perfoming upper limits it is recommended that the spectral index also
be fixed to a specific value.
[default = False]
:param red_select: selection properties for intrinsic red noise.
['backend', 'band', 'band+', None]
[default = None]
:param red_breakflat: break red noise spectrum and make flat above certain frequency.
[default = False]
:param red_breakflat_fq: break frequency for 'red_breakflat'.
[default = None]
:param bayesephem: boolean to include BayesEphem model.
[default = False]
:param be_type: flavor of bayesephem model based on how partials are computed.
['orbel', 'orbel-v2', 'setIII', 'setIII_1980']
[default = 'setIII_1980']
:param is_wideband: boolean for whether input TOAs are wideband TOAs. Will exclude
ecorr from the white noise model.
[default = False]
:param use_dmdata: whether to use DM data (WidebandTimingModel) if is_wideband.
[default = False]
:param dm_var: boolean for explicitly searching for DM variations.
[default = False]
:param dm_type: type of DM variations.
['gp', other choices selected with additional options; see below]
[default = 'gp']
:param dm_psd: psd of DM GP.
['powerlaw', 'spectrum', 'turnover', 'tprocess', 'tprocess_adapt']
[default = 'powerlaw']
:param dm_components: number of frequencies starting at 1/T for DM GP.
[default = 30]
:param upper_limit_dm: perform upper limit on DM GP. Note that when perfoming
upper limits it is recommended that the spectral index also be
fixed to a specific value.
[default = False]
:param dm_annual: boolean to search for an annual DM trend.
[default = False]
:param dm_chrom: boolean to search for a generic chromatic GP.
[default = False]
:param dmchrom_psd: psd of generic chromatic GP.
['powerlaw', 'spectrum', 'turnover']
[default = 'powerlaw']
:param dmchrom_idx: spectral index of generic chromatic GP.
[default = 4]
:param gequad: boolean to search for a global EQUAD.
[default = False]
:param coefficients: boolean to form full hierarchical PTA object;
(no analytic latent-coefficient marginalization)
[default = False]
:param pshift: boolean to add random phase shift to red noise Fourier design
matrices for false alarm rate studies.
[default = False]
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
Default PTA object composition:
1. fixed EFAC per backend/receiver system (per pulsar)
2. fixed EQUAD per backend/receiver system (per pulsar)
3. fixed ECORR per backend/receiver system (per pulsar)
4. Red noise modeled as a power-law with 30 sampling frequencies
(per pulsar)
5. Linear timing model (per pulsar)
6. Common-spectrum uncorrelated process modeled as a power-law with
30 sampling frequencies. (global)
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
gp_priors = [upper_limit_red, upper_limit_dm, upper_limit_common]
if all(ii is None for ii in gp_priors):
amp_prior_red = amp_prior
amp_prior_dm = amp_prior
amp_prior_common = amp_prior
else:
amp_prior_red = 'uniform' if upper_limit_red else 'log-uniform'
amp_prior_dm = 'uniform' if upper_limit_dm else 'log-uniform'
amp_prior_common = 'uniform' if upper_limit_common else 'log-uniform'
# timing model
if not tm_var and not use_dmdata:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd, normed=tm_norm,
coefficients=coefficients)
elif not tm_var and use_dmdata:
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
# create new attribute for enterprise pulsar object
for p in psrs:
p.tmparams_orig = OrderedDict.fromkeys(p.t2pulsar.pars())
for key in p.tmparams_orig:
p.tmparams_orig[key] = (p.t2pulsar[key].val,
p.t2pulsar[key].err)
if not tm_linear:
s = timing_block(tmparam_list=tmparam_list)
else:
pass
# find the maximum time span to set GW frequency sampling
if Tspan is not None:
Tspan = Tspan
else:
Tspan = model_utils.get_tspan(psrs)
if logfreq:
fmin = 10.0
modes, wgts = model_utils.linBinning(Tspan, nmodes_log,
1.0 / fmin / Tspan,
common_components, nmodes_log)
wgts = wgts**2.0
# red noise
if red_var:
s += red_noise_block(psd=red_psd, prior=amp_prior_red, Tspan=Tspan,
components=red_components, modes=modes, wgts=wgts,
coefficients=coefficients,
select=red_select, break_flat=red_breakflat,
break_flat_fq=red_breakflat_fq)
# common red noise block
crn = []
if orf_names is None:
orf_names = orf
for elem, elem_name in zip(orf.split(','), orf_names.split(',')):
if elem == 'zero_diag_bin_orf' or elem == 'zero_diag_legendre_orf':
log10_A_val = log10_A_common
else:
log10_A_val = None
crn.append(common_red_noise_block(psd=common_psd, prior=amp_prior_common, Tspan=Tspan,
components=common_components,
log10_A_val=log10_A_val, gamma_val=gamma_common,
delta_val=None, orf=elem, name='gw_{}'.format(elem_name),
orf_ifreq=orf_ifreq, leg_lmax=leg_lmax,
coefficients=coefficients, pshift=pshift, pseed=None,
logmin=common_logmin, logmax=common_logmax))
# orf_ifreq only affects freq_hd model.
# leg_lmax only affects (zero_diag_)legendre_orf model.
crn = functools.reduce((lambda x, y: x+y), crn)
s += crn
# DM variations
if dm_var:
if dm_type == 'gp':
s += dm_noise_block(gp_kernel='diag', psd=dm_psd,
prior=amp_prior_dm,
components=dm_components, gamma_val=None,
coefficients=coefficients)
if dm_annual:
s += chrom.dm_annual_signal()
if dm_chrom:
s += chromatic_noise_block(psd=dmchrom_psd, idx=dmchrom_idx,
name='chromatic',
components=dm_components,
coefficients=coefficients)
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True,
model=be_type)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True,
tnequad=tnequad, select=select)
if gequad:
s2 += white_signals.EquadNoise(log10_equad=parameter.Uniform(-8.5, -5),
selection=selections.Selection(selections.no_selection),
name='gequad')
if '1713' in p.name and dm_var:
tmin = p.toas.min() / const.day
tmax = p.toas.max() / const.day
s3 = s2 + chrom.dm_exponential_dip(tmin=tmin, tmax=tmax, idx=2,
sign=False, name='dmexp')
models.append(s3(p))
else:
models.append(s2(p))
else:
s4 = s + white_noise_block(vary=white_vary, inc_ecorr=False,
tnequad=tnequad, select=select)
if gequad:
s4 += white_signals.TNEquadNoise(log10_tnequad=parameter.Uniform(-8.5, -5),
selection=selections.Selection(selections.no_selection),
name='gequad')
if '1713' in p.name and dm_var:
tmin = p.toas.min() / const.day
tmax = p.toas.max() / const.day
s5 = s4 + chrom.dm_exponential_dip(tmin=tmin, tmax=tmax, idx=2,
sign=False, name='dmexp')
models.append(s5(p))
else:
models.append(s4(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_2b(psrs, psd='powerlaw', noisedict=None, white_vary=False,
bayesephem=False, be_type='orbel', is_wideband=False, components=30,
use_dmdata=False, Tspan=None, select='backend', pshift=False, tnequad=False,
tm_marg=False, dense_like=False, tm_svd=False, upper_limit=False,
gamma_common=None):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 2B from the analysis paper:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1. Dipole spatially correlated signal modeled with PSD.
Default PSD is powerlaw. Available options
['powerlaw', 'turnover', 'spectrum']
2. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum']. 'powerlaw' is default
value.
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param be_type:
orbel, orbel-v2, setIII
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param Tspan: time baseline used to determine Fourier GP frequencies;
derived from data if not specified
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
if Tspan is None:
Tspan = model_utils.get_tspan(psrs)
# timing model
if (is_wideband and use_dmdata):
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=components)
# dipole
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='dipole', name='dipole', pshift=pshift)
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True,
model=be_type)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True,
tnequad=tnequad, select=select)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False,
tnequad=tnequad, select=select)
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_2c(psrs, psd='powerlaw', noisedict=None, white_vary=False,
components=30, gamma_common=None, upper_limit=False, tnequad=False,
bayesephem=False, be_type='orbel', is_wideband=False,
use_dmdata=False, Tspan=None, select='backend', tm_marg=False,
dense_like=False, tm_svd=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 2C from the analysis paper:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1. Dipole spatially correlated signal modeled with PSD.
Default PSD is powerlaw. Available options
['powerlaw', 'turnover', 'spectrum']
2. Monopole spatially correlated signal modeled with PSD.
Default PSD is powerlaw. Available options
['powerlaw', 'turnover', 'spectrum']
3. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum']. 'powerlaw' is default
value.
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param be_type:
orbel, orbel-v2, setIII
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param Tspan: time baseline used to determine Fourier GP frequencies;
derived from data if not specified
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
if Tspan is None:
Tspan = model_utils.get_tspan(psrs)
# timing model
if (is_wideband and use_dmdata):
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=components)
# dipole
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='dipole', name='dipole')
# monopole
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='monopole', name='monopole')
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True,
model=be_type)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True,
tnequad=tnequad, select=select)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False,
tnequad=tnequad, select=select)
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_2d(psrs, psd='powerlaw', noisedict=None, white_vary=False,
components=30, n_rnfreqs=None, n_gwbfreqs=None,
gamma_common=None, upper_limit=False, tnequad=False,
bayesephem=False, be_type='orbel', is_wideband=False,
use_dmdata=False, Tspan=None, select='backend', pshift=False,
tm_marg=False, dense_like=False, tm_svd=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 2D from the analysis paper:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1. Monopole spatially correlated signal modeled with PSD.
Default PSD is powerlaw. Available options
['powerlaw', 'turnover', 'spectrum']
2. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum']. 'powerlaw' is default
value.
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param be_type:
orbel, orbel-v2, setIII
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param Tspan: time baseline used to determine Fourier GP frequencies;
derived from data if not specified
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
if Tspan is None:
Tspan = model_utils.get_tspan(psrs)
if n_gwbfreqs is None:
n_gwbfreqs = components
if n_rnfreqs is None:
n_rnfreqs = components
# timing model
if (is_wideband and use_dmdata):
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=n_rnfreqs)
# monopole
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=n_gwbfreqs, gamma_val=gamma_common,
orf='monopole', name='monopole', pshift=pshift)
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True,
model=be_type)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True,
tnequad=tnequad, select=select)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False,
tnequad=tnequad, select=select)
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_3a(psrs, psd='powerlaw', noisedict=None, white_vary=False,
components=30, n_rnfreqs=None, n_gwbfreqs=None,
gamma_common=None, delta_common=None, upper_limit=False,
bayesephem=False, be_type='setIII', is_wideband=False,
use_dmdata=False, Tspan=None, select='backend',
tnequad=False,
pshift=False, pseed=None, psr_models=False,
tm_marg=False, dense_like=False, tm_svd=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 3A from the analysis paper:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1. GWB with HD correlations modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
2. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum'] 'powerlaw' is default
value.
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param delta_common:
Fixed common red process spectral index value for higher frequencies in
broken power law model.
By default we vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param be_type:
orbel, orbel-v2, setIII
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param Tspan: time baseline used to determine Fourier GP frequencies;
derived from data if not specified
:param pshift:
Option to use a random phase shift in design matrix. For testing the
null hypothesis.
:param pseed:
Option to provide a seed for the random phase shift.
:param psr_models:
Return list of psr models rather than signal_base.PTA object.
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
if Tspan is None:
Tspan = model_utils.get_tspan(psrs)
if n_gwbfreqs is None:
n_gwbfreqs = components
if n_rnfreqs is None:
n_rnfreqs = components
# timing model
if (is_wideband and use_dmdata):
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(psd='powerlaw',
prior=amp_prior,
Tspan=Tspan, components=n_rnfreqs)
# common red noise block
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=n_gwbfreqs, gamma_val=gamma_common,
delta_val=delta_common,
orf='hd', name='gw', pshift=pshift, pseed=pseed)
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True,
model=be_type)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True,
tnequad=tnequad, select=select)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False,
tnequad=tnequad, select=select)
models.append(s3(p))
if psr_models:
return models
else:
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_3b(psrs, psd='powerlaw', noisedict=None, white_vary=False,
components=30, gamma_common=None, upper_limit=False, tnequad=False,
bayesephem=False, be_type='setIII', is_wideband=False,
use_dmdata=False, Tspan=None, select='backend', tm_marg=False,
dense_like=False, tm_svd=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 3B from the analysis paper:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1. GWB with HD correlations modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
2. Dipole signal modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
3. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum'] 'powerlaw' is default
value.
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param be_type:
orbel, orbel-v2, setIII
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param Tspan: time baseline used to determine Fourier GP frequencies;
derived from data if not specified
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
if Tspan is None:
Tspan = model_utils.get_tspan(psrs)
# timing model
if (is_wideband and use_dmdata):
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=components)
# common red noise block
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='hd', name='gw')
# dipole
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='dipole', name='dipole')
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True,
model=be_type)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True,
tnequad=tnequad, select=select)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False,
tnequad=tnequad, select=select)
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_3c(psrs, psd='powerlaw', noisedict=None, white_vary=False,
components=30, gamma_common=None, upper_limit=False, tnequad=False,
bayesephem=False, be_type='orbel', is_wideband=False,
use_dmdata=False, Tspan=None, select='backend', tm_marg=False,
dense_like=False, tm_svd=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 3C from the analysis paper:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1. GWB with HD correlations modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
2. Dipole signal modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
3. Monopole signal modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
4. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum'] 'powerlaw' is default
value.
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param be_type:
orbel, orbel-v2, setIII
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param Tspan: time baseline used to determine Fourier GP frequencies;
derived from data if not specified
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
if Tspan is None:
Tspan = model_utils.get_tspan(psrs)
# timing model
if is_wideband and use_dmdata:
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=components)
# common red noise block
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='hd', name='gw')
# dipole
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='dipole', name='dipole')
# monopole
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='monopole', name='monopole')
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True,
model=be_type)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True,
tnequad=tnequad, select=select)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False,
tnequad=tnequad, select=select)
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_3d(psrs, psd='powerlaw', noisedict=None, white_vary=False,
components=30, gamma_common=None, upper_limit=False, tnequad=False,
bayesephem=False, be_type='orbel', is_wideband=False,
use_dmdata=False, Tspan=None, select='backend', tm_marg=False,
dense_like=False, tm_svd=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 3D from the analysis paper:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1. GWB with HD correlations modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
2. Monopole signal modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
3. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum'] 'powerlaw' is default
value.
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param be_type:
orbel, orbel-v2, setIII
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param Tspan: time baseline used to determine Fourier GP frequencies;
derived from data if not specified
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
if Tspan is None:
Tspan = model_utils.get_tspan(psrs)
# timing model
if (is_wideband and use_dmdata):
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=components)
# common red noise block
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='hd', name='gw')
# monopole
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
orf='monopole', name='monopole')
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True,
model=be_type)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True,
tnequad=tnequad, select=select)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False,
tnequad=tnequad, select=select)
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_2a_drop_be(psrs, psd='powerlaw', noisedict=None, white_vary=False,
components=30, gamma_common=None, upper_limit=False,
is_wideband=False, use_dmdata=False, k_threshold=0.5,
pshift=False, tm_marg=False, dense_like=False, tm_svd=False,
tnequad=False,):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 2A from the analysis paper:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1.Common red noise modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
2. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum']. 'powerlaw' is default
value.
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param k_threshold:
Define threshold for dropout parameter 'k'.
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
Tspan = model_utils.get_tspan(psrs)
# timing model
if (is_wideband and use_dmdata):
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=components)
# common red noise block
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
name='gw', pshift=pshift)
# ephemeris model
s += do.Dropout_PhysicalEphemerisSignal(use_epoch_toas=True,
k_threshold=k_threshold)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True, tnequad=tnequad)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False, tnequad=tnequad)
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_2a_drop_crn(psrs, psd='powerlaw', noisedict=None, white_vary=False,
components=30, gamma_common=None, upper_limit=False,
bayesephem=False, is_wideband=False, use_dmdata=False,
k_threshold=0.5, pshift=False, tm_marg=False,
dense_like=False, tm_svd=False, tnequad=False,):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 2A from the analysis paper:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1.Common red noise modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
2. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum']. 'powerlaw' is default
value.
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
Tspan = model_utils.get_tspan(psrs)
# timing model
if (is_wideband and use_dmdata):
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=components)
# common red noise block
amp_name = '{}_log10_A'.format('gw')
if amp_prior == 'uniform':
log10_Agw = parameter.LinearExp(-18, -11)(amp_name)
elif amp_prior == 'log-uniform' and gamma_common is not None:
if np.abs(gamma_common - 4.33) < 0.1:
log10_Agw = parameter.Uniform(-18, -14)(amp_name)
else:
log10_Agw = parameter.Uniform(-18, -11)(amp_name)
else:
log10_Agw = parameter.Uniform(-18, -11)(amp_name)
gam_name = '{}_gamma'.format('gw')
if gamma_common is not None:
gamma_gw = parameter.Constant(gamma_common)(gam_name)
else:
gamma_gw = parameter.Uniform(0, 7)(gam_name)
k_drop = parameter.Uniform(0.0, 1.0) # per-pulsar
drop_pl = do.dropout_powerlaw(log10_A=log10_Agw, gamma=gamma_gw,
k_drop=k_drop, k_threshold=k_threshold)
crn = gp_signals.FourierBasisGP(drop_pl, components=components,
Tspan=Tspan, name='gw', pshift=pshift)
s += crn
# ephemeris model
s += do.Dropout_PhysicalEphemerisSignal(use_epoch_toas=True)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True, tnequad=tnequad)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False, tnequad=tnequad)
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
# Does not yet work with IPTA datasets due to white-noise modeling issues.
def model_chromatic(psrs, psd='powerlaw', noisedict=None, white_vary=False,
components=30, gamma_common=None, upper_limit=False,
bayesephem=False, is_wideband=False, use_dmdata=False,
pshift=False, idx=4, chromatic_psd='powerlaw',
c_psrs=['J1713+0747'], tm_marg=False, dense_like=False,
tm_svd=False, tnequad=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with model 2A from the analysis paper + additional
chromatic noise for given pulsars
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
6. Chromatic noise for given pulsar list
global:
1.Common red noise modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
2. Optional physical ephemeris modeling.
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum']. 'powerlaw' is default
value.
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param idx:
Index of chromatic process (i.e DM is 2, scattering would be 4). If
set to `vary` then will vary from 0 - 6 (This will be VERY slow!)
:param chromatic_psd:
PSD to use for chromatic noise. Available options
are ['powerlaw', 'turnover' 'spectrum']. 'powerlaw' is default
value.
:param c_psrs:
List of pulsars to use chromatic noise. 'all' will use all pulsars
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
Tspan = model_utils.get_tspan(psrs)
# timing model
if (is_wideband and use_dmdata):
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# white noise
s += white_noise_block(vary=white_vary, inc_ecorr=not is_wideband,
tnequad=tnequad)
# red noise
s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=components)
# common red noise block
s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan,
components=components, gamma_val=gamma_common,
name='gw', pshift=pshift)
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True)
# chromatic noise
sc = chromatic_noise_block(psd=chromatic_psd, idx=idx)
if c_psrs == 'all':
s += sc
models = [s(psr) for psr in psrs]
elif len(c_psrs) > 0:
models = []
for psr in psrs:
if psr.name in c_psrs:
print('Adding chromatic model to PSR {}'.format(psr.name))
snew = s + sc
models.append(snew(psr))
else:
models.append(s(psr))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_bwm(psrs, likelihood=LogLikelihood, lookupdir=None, noisedict=None, tm_svd=False,
Tmin_bwm=None, Tmax_bwm=None, skyloc=None, logmin=None, logmax=None,
burst_logmin=-17, burst_logmax=-12, red_psd='powerlaw', components=30,
dm_var=False, dm_psd='powerlaw', dm_annual=False, tnequad=False,
upper_limit=False, bayesephem=False, wideband=False, tm_marg=False, dense_like=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with BWM model:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system (if NG channelized)
4. Red noise modeled by a specified psd
5. Linear timing model.
6. Optional DM-variation modeling
global:
1. Deterministic GW burst with memory signal.
2. Optional physical ephemeris modeling.
:param psrs:
list of enterprise.Pulsar objects for PTA
:param noisedict:
Dictionary of pulsar noise properties for fixed white noise.
Can provide manually, or the code will attempt to find it.
:param tm_svd:
boolean for svd-stabilised timing model design matrix
:param Tmin_bwm:
Min time to search for BWM (MJD). If omitted, uses first TOA.
:param Tmax_bwm:
Max time to search for BWM (MJD). If omitted, uses last TOA.
:param skyloc:
Fixed sky location of BWM signal search as [cos(theta), phi].
Search over sky location if ``None`` given.
:param logmin:
Lower bound on log10_A of the red noise process in each pulsar`
:param logmax:
Upper bound on log10_A of the red noise process in each pulsar
:param burst_logmin:
Lower bound on the log10_A of the burst amplitude in each pulsar
:param burst_logmax:
Upper boudn on the log10_A of the burst amplitude in each pulsar
:param red_psd:
PSD to use for per pulsar red noise. Available options
are ['powerlaw', 'turnover', tprocess, 'spectrum'].
:param components:
number of modes in Fourier domain processes (red noise, DM
variations, etc)
:param dm_var:
include gaussian process DM variations
:param dm_psd:
power-spectral density for gp DM variations
:param dm_annual:
include a yearly period DM variation
:param upper_limit:
Perform upper limit on BWM amplitude. By default this is
set to False for a 'detection' run.
:param bayesephem:
Include BayesEphem model.
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:return: instantiated enterprise.PTA object
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set frequency sampling
tmin = np.min([p.toas.min() for p in psrs])
tmax = np.max([p.toas.max() for p in psrs])
Tspan = tmax - tmin
if Tmin_bwm is None:
Tmin_bwm = tmin/const.day
if Tmax_bwm is None:
Tmax_bwm = tmax/const.day
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior, psd=red_psd, Tspan=Tspan, components=components, logmin=logmin, logmax=logmax)
# DM variations
if dm_var:
s += dm_noise_block(psd=dm_psd, prior=amp_prior, components=components,
gamma_val=None)
if dm_annual:
s += chrom.dm_annual_signal()
# DM exponential dip for J1713's DM event
dmexp = chrom.dm_exponential_dip(tmin=54500, tmax=54900)
# GW BWM signal block
s += bwm_block(Tmin_bwm, Tmax_bwm, logmin=burst_logmin, logmax=burst_logmax,
amp_prior=amp_prior,
skyloc=skyloc, name='bwm')
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not wideband:
s2 = s + white_noise_block(vary=False, inc_ecorr=True, tnequad=tnequad)
if dm_var and 'J1713+0747' == p.name:
s2 += dmexp
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=False, inc_ecorr=False, tnequad=tnequad)
if dm_var and 'J1713+0747' == p.name:
s3 += dmexp
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_bwm_sglpsr(psr, likelihood=LogLikelihood, lookupdir=None,
noisedict=None, tm_svd=False, tnequad=False,
Tmin_bwm=None, Tmax_bwm=None,
burst_logmin=-17, burst_logmax=-12, fixed_sign=None,
red_psd='powerlaw', logmin=None,
logmax=None, components=30,
dm_var=False, dm_psd='powerlaw', dm_annual=False,
upper_limit=False, bayesephem=False,
wideband=False, tm_marg=False, dense_like=False):
"""
Burst-With-Memory model for single pulsar runs
Because all of the geometric parameters (pulsar_position, source_position, gw_pol) are all degenerate with each other in a single pulsar BWM search,
this model can only search over burst epoch and residual-space ramp amplitude (t0, ramp_amplitude)
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with single-pulsar BWM model (called a ramp):
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system (if NG channelized)
4. Red noise modeled by a specified psd
5. Linear timing model.
6. Optional DM-variation modeling
7. Deterministic GW burst with memory signal for this pulsar
:param psr:
enterprise.Pulsar objects for PTA. This model is only for one pulsar at a time.
:param likelihood:
The likelihood function to use. The options are [enterprise.signals.signal_base.LogLikelihood, enterprise.signals.signal_base.LookupLikelihood]
:param noisedict:
Dictionary of pulsar noise properties for fixed white noise.
Can provide manually, or the code will attempt to find it.
:param tm_svd:
boolean for svd-stabilised timing model design matrix
:param Tmin_bwm:
Min time to search for BWM (MJD). If omitted, uses first TOA.
:param Tmax_bwm:
Max time to search for BWM (MJD). If omitted, uses last TOA.
:param red_psd:
PSD to use for per pulsar red noise. Available options
are ['powerlaw', 'turnover', tprocess, 'spectrum'].
:param components:
number of modes in Fourier domain processes (red noise, DM
variations, etc)
:param dm_var:
include gaussian process DM variations
:param dm_psd:
power-spectral density for gp DM variations
:param dm_annual:
include a yearly period DM variation
:param upper_limit:
Perform upper limit on BWM amplitude. By default this is
set to False for a 'detection' run.
:param bayesephem:
Include BayesEphem model.
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:return: instantiated enterprise.PTA object
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set frequency sampling
tmin = psr.toas.min()
tmax = psr.toas.max()
Tspan = tmax - tmin
if Tmin_bwm is None:
Tmin_bwm = tmin/const.day
if Tmax_bwm is None:
Tmax_bwm = tmax/const.day
if tm_marg:
s = gp_signals.MarginalizingTimingModel()
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior, psd=red_psd, Tspan=Tspan, components=components, logmin=logmin, logmax=logmax)
# DM variations
if dm_var:
s += dm_noise_block(psd=dm_psd, prior=amp_prior, components=components,
gamma_val=None)
if dm_annual:
s += chrom.dm_annual_signal()
# DM exponential dip for J1713's DM event
dmexp = chrom.dm_exponential_dip(tmin=54500, tmax=54900)
# GW BWM signal block
s += bwm_sglpsr_block(Tmin_bwm, Tmax_bwm, amp_prior=amp_prior, name='ramp',
logmin=burst_logmin, logmax=burst_logmax, fixed_sign=fixed_sign)
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True)
# adding white-noise, and acting on psr objects
models = []
if 'NANOGrav' in psr.flags['pta'] and not wideband:
s2 = s + white_noise_block(vary=False, inc_ecorr=True, tnequad=tnequad)
if dm_var and 'J1713+0747' == psr.name:
s2 += dmexp
models.append(s2(psr))
else:
s3 = s + white_noise_block(vary=False, inc_ecorr=False, tnequad=tnequad)
if dm_var and 'J1713+0747' == psr.name:
s3 += dmexp
models.append(s3(psr))
# set up PTA
# TODO: decide on a way to handle likelihood
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_fdm(psrs, noisedict=None, white_vary=False, tm_svd=False,
Tmin_fdm=None, Tmax_fdm=None, gw_psd='powerlaw',
red_psd='powerlaw', components=30, n_rnfreqs=None,
n_gwbfreqs=None, gamma_common=None, delta_common=None,
dm_var=False, dm_psd='powerlaw', dm_annual=False,
upper_limit=False, bayesephem=False, wideband=False,
pshift=False, pseed=None, model_CRN=False,
amp_upper=-11, amp_lower=-18, tnequad=False,
freq_upper=-7, freq_lower=-9,
use_fixed_freq=False, fixed_freq=-8, tm_marg=False,
dense_like=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with FDM model:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system (if NG channelized)
4. Red noise modeled by a specified psd
5. Linear timing model.
6. Optional DM-variation modeling
7. The pulsar phase term.
global:
1. Deterministic GW FDM signal.
2. Optional physical ephemeris modeling.
:param psrs:
list of enterprise.Pulsar objects for PTA
:param noisedict:
Dictionary of pulsar noise properties for fixed white noise.
Can provide manually, or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param tm_svd:
boolean for svd-stabilised timing model design matrix
:param Tmin_fdm:
Min time to search for FDM (MJD). If omitted, uses first TOA.
:param Tmax_fdm:
Max time to search for FDM (MJD). If omitted, uses last TOA.
:param gw_psd:
PSD to use for the per pulsar GWB.
:param red_psd:
PSD to use for per pulsar red noise. Available options
are ['powerlaw', 'turnover', tprocess, 'spectrum'].
:param components:
number of modes in Fourier domain processes (red noise, DM
variations, etc)
:param n_rnfreqs:
Number of frequencies to use in achromatic rednoise model.
:param n_gwbfreqs:
Number of frequencies to use in the GWB model.
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param dm_var:
include gaussian process DM variations
:param dm_psd:
power-spectral density for gp DM variations
:param dm_annual:
include a yearly period DM variation
:param upper_limit:
Perform upper limit on FDM amplitude. By default this is
set to False for a 'detection' run.
:param bayesephem:
Include BayesEphem model.
:param wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param pshift:
Option to use a random phase shift in design matrix. For testing the
null hypothesis.
:param pseed:
Option to provide a seed for the random phase shift.
:param model_CRN:
Option to model the common red process in addition to the
FDM signal.
:param amp_upper, amp_lower, freq_upper, freq_lower:
The log-space bounds on the amplitude and frequency priors.
:param use_fixed_freq:
Whether to do a fixed-frequency run and not search over the frequency.
:param fixed_freq:
The frequency value to do a fixed-frequency run with.
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:return: instantiated enterprise.PTA object
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
if n_gwbfreqs is None:
n_gwbfreqs = components
if n_rnfreqs is None:
n_rnfreqs = components
# find the maximum time span to set frequency sampling
tmin = np.min([p.toas.min() for p in psrs])
tmax = np.max([p.toas.max() for p in psrs])
Tspan = tmax - tmin
if Tmin_fdm is None:
Tmin_fdm = tmin/const.day
if Tmax_fdm is None:
Tmax_fdm = tmax/const.day
# timing model
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior, psd=red_psd, Tspan=Tspan, components=n_rnfreqs)
# DM variations
if dm_var:
s += dm_noise_block(psd=dm_psd, prior=amp_prior, components=components,
gamma_val=None)
if dm_annual:
s += chrom.dm_annual_signal()
# DM exponential dip for J1713's DM event
dmexp = chrom.dm_exponential_dip(tmin=54500, tmax=54900)
if model_CRN is True:
# common red noise block
s += common_red_noise_block(psd=gw_psd, prior=amp_prior, Tspan=Tspan,
components=n_gwbfreqs, gamma_val=gamma_common,
delta_val=delta_common, name='gw',
pshift=pshift, pseed=pseed)
# GW FDM signal block
s += deterministic.fdm_block(Tmin_fdm, Tmax_fdm,
amp_prior=amp_prior, name='fdm',
amp_lower=amp_lower, amp_upper=amp_upper,
freq_lower=freq_lower, freq_upper=freq_upper,
use_fixed_freq=use_fixed_freq, fixed_freq=fixed_freq)
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not wideband:
s2 = s + white_noise_block(vary=False, inc_ecorr=True, tnequad=tnequad)
if dm_var and 'J1713+0747' == p.name:
s2 += dmexp
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=False, inc_ecorr=False, tnequad=tnequad)
if dm_var and 'J1713+0747' == p.name:
s3 += dmexp
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
def model_cw(psrs, upper_limit=False, rn_psd='powerlaw', noisedict=None,
white_vary=False, components=30, bayesephem=False, skyloc=None,
log10_F=None, ecc=False, psrTerm=False, is_wideband=False,
use_dmdata=False, gp_ecorr='basis_ecorr', tnequad=False,
tm_marg=False, dense_like=False, tm_svd=False):
"""
Reads in list of enterprise Pulsar instance and returns a PTA
instantiated with CW model:
per pulsar:
1. fixed EFAC per backend/receiver system
2. fixed EQUAD per backend/receiver system
3. fixed ECORR per backend/receiver system
4. Red noise modeled as a power-law with 30 sampling frequencies
5. Linear timing model.
global:
1. Deterministic CW signal.
2. Optional physical ephemeris modeling.
:param upper_limit:
Perform upper limit on common red noise amplitude. By default
this is set to False. Note that when perfoming upper limits it
is recommended that the spectral index also be fixed to a specific
value.
:param rn_psd:
psd to use in red_noise_block()
:param noisedict:
Dictionary of pulsar noise properties. Can provide manually,
or the code will attempt to find it.
:param white_vary:
boolean for varying white noise or keeping fixed.
:param bayesephem:
Include BayesEphem model. Set to False by default
:param skyloc:
Fixed sky location of CW signal search as [cos(theta), phi].
Search over sky location if ``None`` given.
:param log10_F:
Fixed frequency of CW signal search.
Search over frequency if ``None`` given.
:param ecc:
boolean or float
if boolean: include/exclude eccentricity in search
if float: use fixed eccentricity with eccentric model
:psrTerm:
boolean, include/exclude pulsar term in search
:param is_wideband:
Whether input TOAs are wideband TOAs; will exclude ecorr from the white
noise model.
:param use_dmdata: whether to use DM data (WidebandTimingModel) if
is_wideband.
:param tm_marg: Use marginalized timing model. In many cases this will speed
up the likelihood calculation significantly.
:param dense_like: Use dense or sparse functions to evalute lnlikelihood
:param tm_svd: boolean for svd-stabilised timing model design matrix
"""
amp_prior = 'uniform' if upper_limit else 'log-uniform'
# find the maximum time span to set GW frequency sampling
tmin = np.min([p.toas.min() for p in psrs])
tmax = np.max([p.toas.max() for p in psrs])
Tspan = tmax - tmin
# timing model
if (is_wideband and use_dmdata):
dmjump = parameter.Constant()
if white_vary:
dmefac = parameter.Uniform(pmin=0.1, pmax=10.0)
log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0)
# dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005)
else:
dmefac = parameter.Constant()
log10_dmequad = parameter.Constant()
# dmjump = parameter.Constant()
s = gp_signals.WidebandTimingModel(dmefac=dmefac,
log10_dmequad=log10_dmequad, dmjump=dmjump,
selection=selections.Selection(selections.by_backend),
dmjump_selection=selections.Selection(selections.by_frontend))
else:
if tm_marg:
s = gp_signals.MarginalizingTimingModel(use_svd=tm_svd)
else:
s = gp_signals.TimingModel(use_svd=tm_svd)
# red noise
s += red_noise_block(prior=amp_prior,
psd=rn_psd, Tspan=Tspan, components=components)
# GW CW signal block
if not ecc:
s += deterministic.cw_block_circ(amp_prior=amp_prior,
skyloc=skyloc,
log10_fgw=log10_F,
psrTerm=psrTerm, tref=tmin,
name='cw')
else:
if type(ecc) is not float:
ecc = None
s += deterministic.cw_block_ecc(amp_prior=amp_prior,
skyloc=skyloc, log10_F=log10_F,
ecc=ecc, psrTerm=psrTerm,
tref=tmin, name='cw')
# ephemeris model
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta'] and not is_wideband:
if gp_ecorr:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True,
gp_ecorr=True, name=gp_ecorr,
tnequad=tnequad)
else:
s2 = s + white_noise_block(vary=white_vary, inc_ecorr=True, tnequad=tnequad)
models.append(s2(p))
else:
s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False, tnequad=tnequad)
models.append(s3(p))
# set up PTA
if dense_like:
pta = signal_base.PTA(models, lnlikelihood=signal_base.LogLikelihoodDenseCholesky)
else:
pta = signal_base.PTA(models)
# set white noise parameters
if not white_vary or (is_wideband and use_dmdata):
if noisedict is None:
print('No noise dictionary provided!...')
else:
noisedict = noisedict
pta.set_default_params(noisedict)
return pta
| 122,774 | 41.973399 | 152 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/__init__.py | __version__ = "2.4.3"
| 22 | 10.5 | 21 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/blocks.py | # -*- coding: utf-8 -*-
import types
import numpy as np
from enterprise import constants as const
from enterprise.signals import deterministic_signals
from enterprise.signals import gp_bases as gpb
from enterprise.signals import gp_priors as gpp
from enterprise.signals import (gp_signals, parameter, selections, utils,
white_signals)
from enterprise_extensions import deterministic as ee_deterministic
from . import chromatic as chrom
from . import dropout as drop
from . import gp_kernels as gpk
from . import model_orfs
__all__ = ['white_noise_block',
'red_noise_block',
'bwm_block',
'bwm_sglpsr_block',
'dm_noise_block',
'chromatic_noise_block',
'common_red_noise_block',
]
def channelized_backends(backend_flags):
"""Selection function to split by channelized backend flags only. For ECORR"""
flagvals = np.unique(backend_flags)
ch_b = ['ASP', 'GASP', 'GUPPI', 'PUPPI', 'YUPPI', 'CHIME']
flagvals = filter(lambda x: any(map(lambda y: y in x, ch_b)), flagvals)
return {flagval: backend_flags == flagval for flagval in flagvals}
def white_noise_block(vary=False, inc_ecorr=False, gp_ecorr=False,
efac1=False, select='backend', tnequad=False, name=None, ng_twg_setup=False, wb_efac_sigma=0.25):
"""
Returns the white noise block of the model:
1. EFAC per backend/receiver system
2. EQUAD per backend/receiver system
3. ECORR per backend/receiver system
:param vary:
If set to true we vary these parameters
with uniform priors. Otherwise they are set to constants
with values to be set later.
:param inc_ecorr:
include ECORR, needed for NANOGrav channelized TOAs
:param gp_ecorr:
whether to use the Gaussian process model for ECORR
:param efac1:
use a strong prior on EFAC = Normal(mu=1, stdev=0.1)
:param tnequad:
Whether to use the TempoNest definition of EQUAD. Defaults to False to
follow Tempo, Tempo2 and Pint definition.
"""
if select == 'backend':
# define selection by observing backend
backend = selections.Selection(selections.by_backend)
# define selection by nanograv backends
backend_ng = selections.Selection(selections.nanograv_backends)
# backend_ch = selections.Selection(channelized_backends)
else:
# define no selection
backend = selections.Selection(selections.no_selection)
# white noise parameters
if vary:
if efac1:
efac = parameter.Normal(1.0, 0.1)
elif ng_twg_setup:
efac = parameter.Normal(1.0, wb_efac_sigma)
else:
efac = parameter.Uniform(0.01, 10.0)
equad = parameter.Uniform(-8.5, -5)
if inc_ecorr:
ecorr = parameter.Uniform(-8.5, -5)
else:
efac = parameter.Constant()
equad = parameter.Constant()
if inc_ecorr:
ecorr = parameter.Constant()
# white noise signals
if tnequad:
efeq = white_signals.MeasurementNoise(efac=efac,
selection=backend, name=name)
efeq += white_signals.TNEquadNoise(log10_tnequad=equad,
selection=backend, name=name)
else:
efeq = white_signals.MeasurementNoise(efac=efac, log10_t2equad=equad,
selection=backend, name=name)
if inc_ecorr:
if gp_ecorr:
if name is None:
ec = gp_signals.EcorrBasisModel(log10_ecorr=ecorr,
selection=backend_ng)
else:
ec = gp_signals.EcorrBasisModel(log10_ecorr=ecorr,
selection=backend_ng, name=name)
else:
ec = white_signals.EcorrKernelNoise(log10_ecorr=ecorr,
selection=backend_ng,
name=name)
# combine signals
if inc_ecorr:
s = efeq + ec
elif not inc_ecorr:
s = efeq
return s
def red_noise_block(psd='powerlaw', prior='log-uniform', Tspan=None,
components=30, gamma_val=None, coefficients=False,
select=None, modes=None, wgts=None, combine=True,
break_flat=False, break_flat_fq=None,
logmin=None, logmax=None, dropout=False, k_threshold=0.5):
"""
Returns red noise model:
Red noise modeled as a power-law with 30 sampling frequencies
:param psd:
PSD function [e.g. powerlaw (default), turnover, spectrum, tprocess]
:param prior:
Prior on log10_A. Default if "log-uniform". Use "uniform" for
upper limits.
:param Tspan:
Sets frequency sampling f_i = i / Tspan. Default will
use overall time span for indivicual pulsar.
:param components:
Number of frequencies in sampling of red noise
:param gamma_val:
If given, this is the fixed slope of the power-law for
powerlaw, turnover, or tprocess red noise
:param coefficients: include latent coefficients in GP model?
:param dropout: Use a dropout analysis for intrinsic red noise models.
Currently only supports power law option.
:param k_threshold: Threshold for dropout analysis.
"""
# red noise parameters that are common
if psd in ['powerlaw', 'powerlaw_genmodes', 'turnover',
'tprocess', 'tprocess_adapt']:
# parameters shared by PSD functions
if logmin is not None and logmax is not None:
if prior == 'uniform':
log10_A = parameter.LinearExp(logmin, logmax)
elif prior == 'log-uniform':
log10_A = parameter.Uniform(logmin, logmax)
else:
if prior == 'uniform':
log10_A = parameter.LinearExp(-20, -11)
elif prior == 'log-uniform' and gamma_val is not None:
if np.abs(gamma_val - 4.33) < 0.1:
log10_A = parameter.Uniform(-20, -11)
else:
log10_A = parameter.Uniform(-20, -11)
else:
log10_A = parameter.Uniform(-20, -11)
if gamma_val is not None:
gamma = parameter.Constant(gamma_val)
else:
gamma = parameter.Uniform(0, 7)
# different PSD function parameters
if psd == 'powerlaw' and dropout:
k_drop = parameter.Uniform(0, 1)
pl = drop.dropout_powerlaw(log10_A=log10_A, gamma=gamma,
dropout_psr='all', k_drop=k_drop,
k_threshold=k_threshold)
elif psd == 'powerlaw':
pl = utils.powerlaw(log10_A=log10_A, gamma=gamma)
elif psd == 'powerlaw_genmodes':
pl = gpp.powerlaw_genmodes(log10_A=log10_A, gamma=gamma, wgts=wgts)
elif psd == 'turnover':
kappa = parameter.Uniform(0, 7)
lf0 = parameter.Uniform(-9, -7)
pl = utils.turnover(log10_A=log10_A, gamma=gamma,
lf0=lf0, kappa=kappa)
elif psd == 'tprocess':
df = 2
alphas = gpp.InvGamma(df/2, df/2, size=components)
pl = gpp.t_process(log10_A=log10_A, gamma=gamma, alphas=alphas)
elif psd == 'tprocess_adapt':
df = 2
alpha_adapt = gpp.InvGamma(df/2, df/2, size=1)
nfreq = parameter.Uniform(-0.5, 10-0.5)
pl = gpp.t_process_adapt(log10_A=log10_A, gamma=gamma,
alphas_adapt=alpha_adapt, nfreq=nfreq)
if psd == 'spectrum':
if prior == 'uniform':
log10_rho = parameter.LinearExp(-10, -4, size=components)
elif prior == 'log-uniform':
log10_rho = parameter.Uniform(-10, -4, size=components)
pl = gpp.free_spectrum(log10_rho=log10_rho)
if select == 'backend':
# define selection by observing backend
selection = selections.Selection(selections.by_backend)
elif select == 'band' or select == 'band+':
# define selection by observing band
selection = selections.Selection(selections.by_band)
else:
# define no selection
selection = selections.Selection(selections.no_selection)
if break_flat:
log10_A_flat = parameter.Uniform(-20, -11)
gamma_flat = parameter.Constant(0)
pl_flat = utils.powerlaw(log10_A=log10_A_flat, gamma=gamma_flat)
freqs = 1.0 * np.arange(1, components+1) / Tspan
components_low = sum(f < break_flat_fq for f in freqs)
if components_low < 1.5:
components_low = 2
rn = gp_signals.FourierBasisGP(pl, components=components_low,
Tspan=Tspan, coefficients=coefficients,
combine=combine, selection=selection)
rn_flat = gp_signals.FourierBasisGP(pl_flat,
modes=freqs[components_low:],
coefficients=coefficients,
selection=selection,
combine=combine,
name='red_noise_hf')
rn = rn + rn_flat
else:
rn = gp_signals.FourierBasisGP(pl, components=components,
Tspan=Tspan,
combine=combine,
coefficients=coefficients,
selection=selection,
modes=modes)
if select == 'band+': # Add the common component as well
rn = rn + gp_signals.FourierBasisGP(pl, components=components,
Tspan=Tspan, combine=combine,
coefficients=coefficients)
return rn
def bwm_block(Tmin, Tmax, amp_prior='log-uniform',
skyloc=None, logmin=-18, logmax=-11,
name='bwm'):
"""
Returns deterministic GW burst with memory model:
1. Burst event parameterized by time, sky location,
polarization angle, and amplitude
:param Tmin:
Min time to search, probably first TOA (MJD).
:param Tmax:
Max time to search, probably last TOA (MJD).
:param amp_prior:
Prior on log10_A. Default if "log-uniform". Use "uniform" for
upper limits.
:param skyloc:
Fixed sky location of BWM signal search as [cos(theta), phi].
Search over sky location if ``None`` given.
:param logmin:
log of minimum BWM amplitude for prior (log10)
:param logmax:
log of maximum BWM amplitude for prior (log10)
:param name:
Name of BWM signal.
"""
# BWM parameters
amp_name = '{}_log10_A'.format(name)
if amp_prior == 'uniform':
log10_A_bwm = parameter.LinearExp(logmin, logmax)(amp_name)
elif amp_prior == 'log-uniform':
log10_A_bwm = parameter.Uniform(logmin, logmax)(amp_name)
pol_name = '{}_pol'.format(name)
pol = parameter.Uniform(0, np.pi)(pol_name)
t0_name = '{}_t0'.format(name)
t0 = parameter.Uniform(Tmin, Tmax)(t0_name)
costh_name = '{}_costheta'.format(name)
phi_name = '{}_phi'.format(name)
if skyloc is None:
costh = parameter.Uniform(-1, 1)(costh_name)
phi = parameter.Uniform(0, 2*np.pi)(phi_name)
else:
costh = parameter.Constant(skyloc[0])(costh_name)
phi = parameter.Constant(skyloc[1])(phi_name)
# BWM signal
bwm_wf = ee_deterministic.bwm_delay(log10_h=log10_A_bwm, t0=t0,
cos_gwtheta=costh, gwphi=phi, gwpol=pol)
bwm = deterministic_signals.Deterministic(bwm_wf, name=name)
return bwm
def bwm_sglpsr_block(Tmin, Tmax, amp_prior='log-uniform',
logmin=-17, logmax=-12, name='ramp', fixed_sign=None):
if fixed_sign is None:
sign = parameter.Uniform(-1, 1)("sign")
else:
sign = np.sign(fixed_sign)
amp_name = '{}_log10_A'.format(name)
if amp_prior == 'uniform':
log10_A_ramp = parameter.LinearExp(logmin, logmax)(amp_name)
elif amp_prior == 'log-uniform':
log10_A_ramp = parameter.Uniform(logmin, logmax)(amp_name)
t0_name = '{}_t0'.format(name)
t0 = parameter.Uniform(Tmin, Tmax)(t0_name)
ramp_wf = ee_deterministic.bwm_sglpsr_delay(log10_A=log10_A_ramp, t0=t0, sign=sign)
ramp = deterministic_signals.Deterministic(ramp_wf, name=name)
return ramp
def dm_noise_block(gp_kernel='diag', psd='powerlaw', nondiag_kernel='periodic',
prior='log-uniform', dt=15, df=200,
Tspan=None, components=30,
gamma_val=None, coefficients=False):
"""
Returns DM noise model:
1. DM noise modeled as a power-law with 30 sampling frequencies
:param psd:
PSD function [e.g. powerlaw (default), spectrum, tprocess]
:param prior:
Prior on log10_A. Default if "log-uniform". Use "uniform" for
upper limits.
:param dt:
time-scale for linear interpolation basis (days)
:param df:
frequency-scale for linear interpolation basis (MHz)
:param Tspan:
Sets frequency sampling f_i = i / Tspan. Default will
use overall time span for indivicual pulsar.
:param components:
Number of frequencies in sampling of DM-variations.
:param gamma_val:
If given, this is the fixed slope of the power-law for
powerlaw, turnover, or tprocess DM-variations
"""
# dm noise parameters that are common
if gp_kernel == 'diag':
if psd in ['powerlaw', 'turnover', 'tprocess', 'tprocess_adapt']:
# parameters shared by PSD functions
if prior == 'uniform':
log10_A_dm = parameter.LinearExp(-20, -11)
elif prior == 'log-uniform' and gamma_val is not None:
if np.abs(gamma_val - 4.33) < 0.1:
log10_A_dm = parameter.Uniform(-20, -11)
else:
log10_A_dm = parameter.Uniform(-20, -11)
else:
log10_A_dm = parameter.Uniform(-20, -11)
if gamma_val is not None:
gamma_dm = parameter.Constant(gamma_val)
else:
gamma_dm = parameter.Uniform(0, 7)
# different PSD function parameters
if psd == 'powerlaw':
dm_prior = utils.powerlaw(log10_A=log10_A_dm, gamma=gamma_dm)
elif psd == 'turnover':
kappa_dm = parameter.Uniform(0, 7)
lf0_dm = parameter.Uniform(-9, -7)
dm_prior = utils.turnover(log10_A=log10_A_dm, gamma=gamma_dm,
lf0=lf0_dm, kappa=kappa_dm)
elif psd == 'tprocess':
df = 2
alphas_dm = gpp.InvGamma(df/2, df/2, size=components)
dm_prior = gpp.t_process(log10_A=log10_A_dm, gamma=gamma_dm,
alphas=alphas_dm)
elif psd == 'tprocess_adapt':
df = 2
alpha_adapt_dm = gpp.InvGamma(df/2, df/2, size=1)
nfreq_dm = parameter.Uniform(-0.5, 10-0.5)
dm_prior = gpp.t_process_adapt(log10_A=log10_A_dm,
gamma=gamma_dm,
alphas_adapt=alpha_adapt_dm,
nfreq=nfreq_dm)
if psd == 'spectrum':
if prior == 'uniform':
log10_rho_dm = parameter.LinearExp(-10, -4, size=components)
elif prior == 'log-uniform':
log10_rho_dm = parameter.Uniform(-10, -4, size=components)
dm_prior = gpp.free_spectrum(log10_rho=log10_rho_dm)
dm_basis = utils.createfourierdesignmatrix_dm(nmodes=components,
Tspan=Tspan)
elif gp_kernel == 'nondiag':
if nondiag_kernel == 'periodic':
# Periodic GP kernel for DM
log10_sigma = parameter.Uniform(-10, -4)
log10_ell = parameter.Uniform(1, 4)
log10_p = parameter.Uniform(-4, 1)
log10_gam_p = parameter.Uniform(-3, 2)
dm_basis = gpk.linear_interp_basis_dm(dt=dt*const.day)
dm_prior = gpk.periodic_kernel(log10_sigma=log10_sigma,
log10_ell=log10_ell,
log10_gam_p=log10_gam_p,
log10_p=log10_p)
elif nondiag_kernel == 'periodic_rfband':
# Periodic GP kernel for DM with RQ radio-frequency dependence
log10_sigma = parameter.Uniform(-10, -4)
log10_ell = parameter.Uniform(1, 4)
log10_ell2 = parameter.Uniform(2, 7)
log10_alpha_wgt = parameter.Uniform(-4, 1)
log10_p = parameter.Uniform(-4, 1)
log10_gam_p = parameter.Uniform(-3, 2)
dm_basis = gpk.get_tf_quantization_matrix(df=df, dt=dt*const.day,
dm=True)
dm_prior = gpk.tf_kernel(log10_sigma=log10_sigma,
log10_ell=log10_ell,
log10_gam_p=log10_gam_p, log10_p=log10_p,
log10_alpha_wgt=log10_alpha_wgt,
log10_ell2=log10_ell2)
elif nondiag_kernel == 'sq_exp':
# squared-exponential GP kernel for DM
log10_sigma = parameter.Uniform(-10, -4)
log10_ell = parameter.Uniform(1, 4)
dm_basis = gpk.linear_interp_basis_dm(dt=dt*const.day)
dm_prior = gpk.se_dm_kernel(log10_sigma=log10_sigma,
log10_ell=log10_ell)
elif nondiag_kernel == 'sq_exp_rfband':
# Sq-Exp GP kernel for DM with RQ radio-frequency dependence
log10_sigma = parameter.Uniform(-10, -4)
log10_ell = parameter.Uniform(1, 4)
log10_ell2 = parameter.Uniform(2, 7)
log10_alpha_wgt = parameter.Uniform(-4, 1)
dm_basis = gpk.get_tf_quantization_matrix(df=df, dt=dt*const.day,
dm=True)
dm_prior = gpk.sf_kernel(log10_sigma=log10_sigma,
log10_ell=log10_ell,
log10_alpha_wgt=log10_alpha_wgt,
log10_ell2=log10_ell2)
elif nondiag_kernel == 'dmx_like':
# DMX-like signal
log10_sigma = parameter.Uniform(-10, -4)
dm_basis = gpk.linear_interp_basis_dm(dt=dt*const.day)
dm_prior = gpk.dmx_ridge_prior(log10_sigma=log10_sigma)
dmgp = gp_signals.BasisGP(dm_prior, dm_basis, name='dm_gp',
coefficients=coefficients)
return dmgp
def chromatic_noise_block(gp_kernel='nondiag', psd='powerlaw',
nondiag_kernel='periodic',
prior='log-uniform', dt=15, df=200,
idx=4, include_quadratic=False,
Tspan=None, name='chrom', components=30,
coefficients=False):
"""
Returns GP chromatic noise model :
1. Chromatic modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
:param gp_kernel:
Whether to use a diagonal kernel for the GP. ['diag','nondiag']
:param nondiag_kernel:
Which nondiagonal kernel to use for the GP.
['periodic','sq_exp','periodic_rfband','sq_exp_rfband']
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum']
:param prior:
What type of prior to use for amplitudes. ['log-uniform','uniform']
:param dt:
time-scale for linear interpolation basis (days)
:param df:
frequency-scale for linear interpolation basis (MHz)
:param idx:
Index of radio frequency dependence (i.e. DM is 2). Any float will work.
:param include_quadratic:
Whether to include a quadratic fit.
:param name: Name of signal
:param Tspan:
Tspan from which to calculate frequencies for PSD-based GPs.
:param components:
Number of frequencies to use in 'diag' GPs.
:param coefficients:
Whether to keep coefficients of the GP.
"""
if gp_kernel == 'diag':
chm_basis = gpb.createfourierdesignmatrix_chromatic(nmodes=components,
Tspan=Tspan)
if psd in ['powerlaw', 'turnover']:
if prior == 'uniform':
log10_A = parameter.LinearExp(-18, -11)
elif prior == 'log-uniform':
log10_A = parameter.Uniform(-18, -11)
gamma = parameter.Uniform(0, 7)
# PSD
if psd == 'powerlaw':
chm_prior = utils.powerlaw(log10_A=log10_A, gamma=gamma)
elif psd == 'turnover':
kappa = parameter.Uniform(0, 7)
lf0 = parameter.Uniform(-9, -7)
chm_prior = utils.turnover(log10_A=log10_A, gamma=gamma,
lf0=lf0, kappa=kappa)
if psd == 'spectrum':
if prior == 'uniform':
log10_rho = parameter.LinearExp(-10, -4, size=components)
elif prior == 'log-uniform':
log10_rho = parameter.Uniform(-10, -4, size=components)
chm_prior = gpp.free_spectrum(log10_rho=log10_rho)
elif gp_kernel == 'nondiag':
if nondiag_kernel == 'periodic':
# Periodic GP kernel for DM
log10_sigma = parameter.Uniform(-10, -4)
log10_ell = parameter.Uniform(1, 4)
log10_p = parameter.Uniform(-4, 1)
log10_gam_p = parameter.Uniform(-3, 2)
chm_basis = gpk.linear_interp_basis_chromatic(dt=dt*const.day)
chm_prior = gpk.periodic_kernel(log10_sigma=log10_sigma,
log10_ell=log10_ell,
log10_gam_p=log10_gam_p,
log10_p=log10_p)
elif nondiag_kernel == 'periodic_rfband':
# Periodic GP kernel for DM with RQ radio-frequency dependence
log10_sigma = parameter.Uniform(-10, -4)
log10_ell = parameter.Uniform(1, 4)
log10_ell2 = parameter.Uniform(2, 7)
log10_alpha_wgt = parameter.Uniform(-4, 1)
log10_p = parameter.Uniform(-4, 1)
log10_gam_p = parameter.Uniform(-3, 2)
chm_basis = gpk.get_tf_quantization_matrix(df=df, dt=dt*const.day,
dm=True, dm_idx=idx)
chm_prior = gpk.tf_kernel(log10_sigma=log10_sigma,
log10_ell=log10_ell,
log10_gam_p=log10_gam_p,
log10_p=log10_p,
log10_alpha_wgt=log10_alpha_wgt,
log10_ell2=log10_ell2)
elif nondiag_kernel == 'sq_exp':
# squared-exponential kernel for DM
log10_sigma = parameter.Uniform(-10, -4)
log10_ell = parameter.Uniform(1, 4)
chm_basis = gpk.linear_interp_basis_chromatic(dt=dt*const.day, idx=idx)
chm_prior = gpk.se_dm_kernel(log10_sigma=log10_sigma,
log10_ell=log10_ell)
elif nondiag_kernel == 'sq_exp_rfband':
# Sq-Exp GP kernel for Chrom with RQ radio-frequency dependence
log10_sigma = parameter.Uniform(-10, -4)
log10_ell = parameter.Uniform(1, 4)
log10_ell2 = parameter.Uniform(2, 7)
log10_alpha_wgt = parameter.Uniform(-4, 1)
chm_basis = gpk.get_tf_quantization_matrix(df=df, dt=dt*const.day,
dm=True, dm_idx=idx)
chm_prior = gpk.sf_kernel(log10_sigma=log10_sigma,
log10_ell=log10_ell,
log10_alpha_wgt=log10_alpha_wgt,
log10_ell2=log10_ell2)
cgp = gp_signals.BasisGP(chm_prior, chm_basis, name=name+'_gp',
coefficients=coefficients)
if include_quadratic:
# quadratic piece
basis_quad = chrom.chromatic_quad_basis(idx=idx)
prior_quad = chrom.chromatic_quad_prior()
cquad = gp_signals.BasisGP(prior_quad, basis_quad, name=name+'_quad')
cgp += cquad
return cgp
def common_red_noise_block(psd='powerlaw', prior='log-uniform',
Tspan=None, components=30, combine=True,
log10_A_val=None, gamma_val=None, delta_val=None,
logmin=None, logmax=None,
orf=None, orf_ifreq=0, leg_lmax=5,
name='gw', coefficients=False,
pshift=False, pseed=None):
"""
Returns common red noise model:
1. Red noise modeled with user defined PSD with
30 sampling frequencies. Available PSDs are
['powerlaw', 'turnover' 'spectrum']
:param psd:
PSD to use for common red noise signal. Available options
are ['powerlaw', 'turnover' 'spectrum', 'broken_powerlaw']
:param prior:
Prior on log10_A. Default if "log-uniform". Use "uniform" for
upper limits.
:param Tspan:
Sets frequency sampling f_i = i / Tspan. Default will
use overall time span for individual pulsar.
:param log10_A_val:
Value of log10_A parameter for fixed amplitude analyses.
:param gamma_val:
Value of spectral index for power-law and turnover
models. By default spectral index is varied of range [0,7]
:param delta_val:
Value of spectral index for high frequencies in broken power-law
and turnover models. By default spectral index is varied in range [0,7].\
:param logmin:
Specify the lower bound of the prior on the amplitude for all psd but 'spectrum'.
If psd=='spectrum', then this specifies the lower prior on log10_rho_gw
:param logmax:
Specify the lower bound of the prior on the amplitude for all psd but 'spectrum'.
If psd=='spectrum', then this specifies the lower prior on log10_rho_gw
:param orf:
String representing which overlap reduction function to use.
By default we do not use any spatial correlations. Permitted
values are ['hd', 'dipole', 'monopole'].
:param orf_ifreq:
Frequency bin at which to start the Hellings & Downs function with
numbering beginning at 0. Currently only works with freq_hd orf.
:param leg_lmax:
Maximum multipole of a Legendre polynomial series representation
of the overlap reduction function [default=5]
:param pshift:
Option to use a random phase shift in design matrix. For testing the
null hypothesis.
:param pseed:
Option to provide a seed for the random phase shift.
:param name: Name of common red process
"""
orfs = {'crn': None, 'hd': model_orfs.hd_orf(),
'gw_monopole': model_orfs.gw_monopole_orf(),
'gw_dipole': model_orfs.gw_dipole_orf(),
'st': model_orfs.st_orf(),
'gt': model_orfs.gt_orf(tau=parameter.Uniform(-1.5, 1.5)('tau')),
'dipole': model_orfs.dipole_orf(),
'monopole': model_orfs.monopole_orf(),
'param_hd': model_orfs.param_hd_orf(a=parameter.Uniform(-1.5, 3.0)('gw_orf_param0'),
b=parameter.Uniform(-1.0, 0.5)('gw_orf_param1'),
c=parameter.Uniform(-1.0, 1.0)('gw_orf_param2')),
'spline_orf': model_orfs.spline_orf(params=parameter.Uniform(-0.9, 0.9, size=7)('gw_orf_spline')),
'bin_orf': model_orfs.bin_orf(params=parameter.Uniform(-1.0, 1.0, size=7)('gw_orf_bin')),
'zero_diag_hd': model_orfs.zero_diag_hd(),
'zero_diag_bin_orf': model_orfs.zero_diag_bin_orf(params=parameter.Uniform(
-1.0, 1.0, size=7)('gw_orf_bin_zero_diag')),
'freq_hd': model_orfs.freq_hd(params=[components, orf_ifreq]),
'legendre_orf': model_orfs.legendre_orf(params=parameter.Uniform(
-1.0, 1.0, size=leg_lmax+1)('gw_orf_legendre')),
'zero_diag_legendre_orf': model_orfs.zero_diag_legendre_orf(params=parameter.Uniform(
-1.0, 1.0, size=leg_lmax+1)('gw_orf_legendre_zero_diag'))}
# common red noise parameters
if psd in ['powerlaw', 'turnover', 'turnover_knee', 'broken_powerlaw']:
amp_name = '{}_log10_A'.format(name)
if log10_A_val is not None:
log10_Agw = parameter.Constant(log10_A_val)(amp_name)
if logmin is not None and logmax is not None:
if prior == 'uniform':
log10_Agw = parameter.LinearExp(logmin, logmax)(amp_name)
elif prior == 'log-uniform' and gamma_val is not None:
if np.abs(gamma_val - 4.33) < 0.1:
log10_Agw = parameter.Uniform(logmin, logmax)(amp_name)
else:
log10_Agw = parameter.Uniform(logmin, logmax)(amp_name)
else:
log10_Agw = parameter.Uniform(logmin, logmax)(amp_name)
else:
if prior == 'uniform':
log10_Agw = parameter.LinearExp(-18, -11)(amp_name)
elif prior == 'log-uniform' and gamma_val is not None:
if np.abs(gamma_val - 4.33) < 0.1:
log10_Agw = parameter.Uniform(-18, -14)(amp_name)
else:
log10_Agw = parameter.Uniform(-18, -11)(amp_name)
else:
log10_Agw = parameter.Uniform(-18, -11)(amp_name)
gam_name = '{}_gamma'.format(name)
if gamma_val is not None:
gamma_gw = parameter.Constant(gamma_val)(gam_name)
else:
gamma_gw = parameter.Uniform(0, 7)(gam_name)
# common red noise PSD
if psd == 'powerlaw':
cpl = utils.powerlaw(log10_A=log10_Agw, gamma=gamma_gw)
elif psd == 'broken_powerlaw':
delta_name = '{}_delta'.format(name)
kappa_name = '{}_kappa'.format(name)
log10_fb_name = '{}_log10_fb'.format(name)
kappa_gw = parameter.Uniform(0.01, 0.5)(kappa_name)
log10_fb_gw = parameter.Uniform(-10, -7)(log10_fb_name)
if delta_val is not None:
delta_gw = parameter.Constant(delta_val)(delta_name)
else:
delta_gw = parameter.Uniform(0, 7)(delta_name)
cpl = gpp.broken_powerlaw(log10_A=log10_Agw,
gamma=gamma_gw,
delta=delta_gw,
log10_fb=log10_fb_gw,
kappa=kappa_gw)
elif psd == 'turnover':
kappa_name = '{}_kappa'.format(name)
lf0_name = '{}_log10_fbend'.format(name)
kappa_gw = parameter.Uniform(0, 7)(kappa_name)
lf0_gw = parameter.Uniform(-9, -7)(lf0_name)
cpl = utils.turnover(log10_A=log10_Agw, gamma=gamma_gw,
lf0=lf0_gw, kappa=kappa_gw)
elif psd == 'turnover_knee':
kappa_name = '{}_kappa'.format(name)
lfb_name = '{}_log10_fbend'.format(name)
delta_name = '{}_delta'.format(name)
lfk_name = '{}_log10_fknee'.format(name)
kappa_gw = parameter.Uniform(0, 7)(kappa_name)
lfb_gw = parameter.Uniform(-9.3, -8)(lfb_name)
delta_gw = parameter.Uniform(-2, 0)(delta_name)
lfk_gw = parameter.Uniform(-8, -7)(lfk_name)
cpl = gpp.turnover_knee(log10_A=log10_Agw, gamma=gamma_gw,
lfb=lfb_gw, lfk=lfk_gw,
kappa=kappa_gw, delta=delta_gw)
if psd == 'spectrum':
rho_name = '{}_log10_rho'.format(name)
# checking if priors specified, otherwise give default values
if logmin is None:
logmin = -9
if logmax is None:
logmax = -4
if prior == 'uniform':
log10_rho_gw = parameter.LinearExp(logmin, logmax,
size=components)(rho_name)
elif prior == 'log-uniform':
log10_rho_gw = parameter.Uniform(logmin, logmax, size=components)(rho_name)
cpl = gpp.free_spectrum(log10_rho=log10_rho_gw)
if orf is None:
crn = gp_signals.FourierBasisGP(cpl, coefficients=coefficients, combine=combine,
components=components, Tspan=Tspan,
name=name, pshift=pshift, pseed=pseed)
elif orf in orfs.keys():
if orf == 'crn':
crn = gp_signals.FourierBasisGP(cpl, coefficients=coefficients, combine=combine,
components=components, Tspan=Tspan,
name=name, pshift=pshift, pseed=pseed)
else:
crn = gp_signals.FourierBasisCommonGP(cpl, orfs[orf],
components=components, combine=combine,
Tspan=Tspan,
name=name, pshift=pshift,
pseed=pseed)
elif isinstance(orf, types.FunctionType):
crn = gp_signals.FourierBasisCommonGP(cpl, orf,
components=components, combine=combine,
Tspan=Tspan,
name=name, pshift=pshift,
pseed=pseed)
else:
raise ValueError('ORF {} not recognized'.format(orf))
return crn
| 34,848 | 42.506866 | 119 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/chromatic/chromatic.py | # -*- coding: utf-8 -*-
import numpy as np
from enterprise import constants as const
from enterprise.signals import deterministic_signals, parameter, signal_base
__all__ = ['chrom_exp_decay',
'chrom_exp_cusp',
'chrom_dual_exp_cusp',
'chrom_yearly_sinusoid',
'chromatic_quad_basis',
'chromatic_quad_prior',
'dmx_delay',
'dm_exponential_dip',
'dm_exponential_cusp',
'dm_dual_exp_cusp',
'dmx_signal',
'dm_annual_signal',
]
@signal_base.function
def chrom_exp_decay(toas, freqs, log10_Amp=-7, sign_param=-1.0,
t0=54000, log10_tau=1.7, idx=2):
"""
Chromatic exponential-dip delay term in TOAs.
:param t0: time of exponential minimum [MJD]
:param tau: 1/e time of exponential [s]
:param log10_Amp: amplitude of dip
:param sign_param: sign of waveform
:param idx: index of chromatic dependence
:return wf: delay time-series [s]
"""
t0 *= const.day
tau = 10**log10_tau * const.day
ind = np.where(toas > t0)[0]
wf = 10**log10_Amp * np.heaviside(toas - t0, 1)
wf[ind] *= np.exp(- (toas[ind] - t0) / tau)
return np.sign(sign_param) * wf * (1400 / freqs) ** idx
@signal_base.function
def chrom_exp_cusp(toas, freqs, log10_Amp=-7, sign_param=-1.0,
t0=54000, log10_tau_pre=1.7, log10_tau_post=1.7,
symmetric=False, idx=2):
"""
Chromatic exponential-cusp delay term in TOAs.
:param t0: time of exponential minimum [MJD]
:param tau_pre: 1/e time of exponential before peak [s]
:param tau_post: 1/e time of exponential after peak[s]
:param symmetric: whether or not tau_pre = tau_post
:param log10_Amp: amplitude of cusp
:param sign_param: sign of waveform
:param idx: index of chromatic dependence
:return wf: delay time-series [s]
"""
t0 *= const.day
if symmetric:
tau = 10**log10_tau_pre * const.day
ind_pre = np.where(toas < t0)[0]
ind_post = np.where(toas > t0)[0]
wf_pre = 10**log10_Amp * (1 - np.heaviside(toas - t0, 1))
wf_pre[ind_pre] *= np.exp(- (t0 - toas[ind_pre]) / tau)
wf_post = 10**log10_Amp * np.heaviside(toas - t0, 1)
wf_post[ind_post] *= np.exp(- (toas[ind_post] - t0) / tau)
wf = wf_pre + wf_post
else:
tau_pre = 10**log10_tau_pre * const.day
tau_post = 10**log10_tau_post * const.day
ind_pre = np.where(toas < t0)[0]
ind_post = np.where(toas > t0)[0]
wf_pre = 10**log10_Amp * (1 - np.heaviside(toas - t0, 1))
wf_pre[ind_pre] *= np.exp(- (t0 - toas[ind_pre]) / tau_pre)
wf_post = 10**log10_Amp * np.heaviside(toas - t0, 1)
wf_post[ind_post] *= np.exp(- (toas[ind_post] - t0) / tau_post)
wf = wf_pre + wf_post
return np.sign(sign_param) * wf * (1400 / freqs) ** idx
@signal_base.function
def chrom_dual_exp_cusp(toas, freqs, t0=54000, sign_param=-1.0,
log10_Amp_1=-7, log10_tau_pre_1=1.7,
log10_tau_post_1=1.7,
log10_Amp_2=-7, log10_tau_pre_2=1.7,
log10_tau_post_2=1.7,
symmetric=False, idx1=2, idx2=4):
"""
Chromatic exponential-cusp delay term in TOAs.
:param t0: time of exponential minimum [MJD]
:param tau_pre: 1/e time of exponential before peak [s]
:param tau_post: 1/e time of exponential after peak[s]
:param symmetric: whether or not tau_pre = tau_post
:param log10_Amp: amplitude of cusp
:param sign_param: sign of waveform
:param idx: index of chromatic dependence
:return wf: delay time-series [s]
"""
t0 *= const.day
ind_pre = np.where(toas < t0)[0]
ind_post = np.where(toas > t0)[0]
if symmetric:
tau_1 = 10**log10_tau_pre_1 * const.day
wf_1_pre = 10**log10_Amp_1 * (1 - np.heaviside(toas - t0, 1))
wf_1_pre[ind_pre] *= np.exp(- (t0 - toas[ind_pre]) / tau_1)
wf_1_post = 10**log10_Amp_1 * np.heaviside(toas - t0, 1)
wf_1_post[ind_post] *= np.exp(- (toas[ind_post] - t0) / tau_1)
wf_1 = wf_1_pre + wf_1_post
tau_2 = 10**log10_tau_pre_2 * const.day
wf_2_pre = 10**log10_Amp_2 * (1 - np.heaviside(toas - t0, 1))
wf_2_pre[ind_pre] *= np.exp(- (t0 - toas[ind_pre]) / tau_2)
wf_2_post = 10**log10_Amp_2 * np.heaviside(toas - t0, 1)
wf_2_post[ind_post] *= np.exp(- (toas[ind_post] - t0) / tau_2)
wf_2 = wf_2_pre + wf_2_post
else:
tau_1_pre = 10**log10_tau_pre_1 * const.day
tau_1_post = 10**log10_tau_post_1 * const.day
wf_1_pre = 10**log10_Amp_1 * (1 - np.heaviside(toas - t0, 1))
wf_1_pre[ind_pre] *= np.exp(- (t0 - toas[ind_pre]) / tau_1_pre)
wf_1_post = 10**log10_Amp_1 * np.heaviside(toas - t0, 1)
wf_1_post[ind_post] *= np.exp(- (toas[ind_post] - t0) / tau_1_post)
wf_1 = wf_1_pre + wf_1_post
tau_2_pre = 10**log10_tau_pre_2 * const.day
tau_2_post = 10**log10_tau_post_2 * const.day
wf_2_pre = 10**log10_Amp_2 * (1 - np.heaviside(toas - t0, 1))
wf_2_pre[ind_pre] *= np.exp(- (t0 - toas[ind_pre]) / tau_2_pre)
wf_2_post = 10**log10_Amp_2 * np.heaviside(toas - t0, 1)
wf_2_post[ind_post] *= np.exp(- (toas[ind_post] - t0) / tau_2_post)
wf_2 = wf_2_pre + wf_2_post
return np.sign(sign_param) * (wf_1 * (1400 / freqs) ** idx1 + wf_2 * (1400 / freqs) ** idx2)
@signal_base.function
def chrom_yearly_sinusoid(toas, freqs, log10_Amp=-7, phase=0, idx=2):
"""
Chromatic annual sinusoid.
:param log10_Amp: amplitude of sinusoid
:param phase: initial phase of sinusoid
:param idx: index of chromatic dependence
:return wf: delay time-series [s]
"""
wf = 10**log10_Amp * np.sin(2 * np.pi * const.fyr * toas + phase)
return wf * (1400 / freqs) ** idx
@signal_base.function
def chromatic_quad_basis(toas, freqs, idx=4):
"""
Basis for chromatic quadratic function.
:param idx: index of chromatic dependence
:return ret: normalized quadratic basis matrix [Ntoa, 3]
"""
ret = np.zeros((len(toas), 3))
t0 = (toas.max() + toas.min()) / 2
for ii in range(3):
ret[:, ii] = (toas-t0) ** (ii) * (1400/freqs) ** idx
norm = np.sqrt(np.sum(ret**2, axis=0))
return ret/norm, np.ones(3)
@signal_base.function
def chromatic_quad_prior(toas):
"""
Prior for chromatic quadratic function.
:return prior: prior-range for quadratic coefficients
"""
return np.ones(3) * 1e80
@signal_base.function
def dmx_delay(toas, freqs, dmx_ids, **kwargs):
"""
Delay in DMX model of DM variations.
:param dmx_ids: dictionary of DMX data for each pulsar from parfile
:param kwargs: dictionary of enterprise DMX parameters
:return wf: DMX signal
"""
wf = np.zeros(len(toas))
dmx = kwargs
for dmx_id in dmx_ids:
mask = np.logical_and(toas >= (dmx_ids[dmx_id]['DMX_R1'] - 0.01) * 86400.,
toas <= (dmx_ids[dmx_id]['DMX_R2'] + 0.01) * 86400.)
wf[mask] += dmx[dmx_id] / freqs[mask]**2 / const.DM_K / 1e12
return wf
def dm_exponential_dip(tmin, tmax, idx=2, sign='negative', name='dmexp'):
"""
Returns chromatic exponential dip (i.e. TOA advance):
:param tmin, tmax:
search window for exponential dip time.
:param idx:
index of radio frequency dependence (i.e. DM is 2). If this is set
to 'vary' then the index will vary from 1 - 6
:param sign:
set sign of dip: 'positive', 'negative', or 'vary'
:param name: Name of signal
:return dmexp:
chromatic exponential dip waveform.
"""
t0_dmexp = parameter.Uniform(tmin, tmax)
log10_Amp_dmexp = parameter.Uniform(-10, -2)
log10_tau_dmexp = parameter.Uniform(0, 2.5)
if sign == 'vary':
sign_param = parameter.Uniform(-1.0, 1.0)
elif sign == 'positive':
sign_param = 1.0
else:
sign_param = -1.0
wf = chrom_exp_decay(log10_Amp=log10_Amp_dmexp,
t0=t0_dmexp, log10_tau=log10_tau_dmexp,
sign_param=sign_param, idx=idx)
dmexp = deterministic_signals.Deterministic(wf, name=name)
return dmexp
def dm_exponential_cusp(tmin, tmax, idx=2, sign='negative',
symmetric=False, name='dm_cusp'):
"""
Returns chromatic exponential cusp (i.e. TOA advance):
:param tmin, tmax:
search window for exponential cusp time.
:param idx:
index of radio frequency dependence (i.e. DM is 2). If this is set
to 'vary' then the index will vary from 1 - 6
:param sign:
set sign of dip: 'positive', 'negative', or 'vary'
:param name: Name of signal
:return dmexp:
chromatic exponential dip waveform.
"""
t0_dm_cusp = parameter.Uniform(tmin, tmax)
log10_Amp_dm_cusp = parameter.Uniform(-10, -2)
log10_tau_dm_cusp_pre = parameter.Uniform(0, 2.5)
if sign == 'vary':
sign_param = parameter.Uniform(-1.0, 1.0)
elif sign == 'positive':
sign_param = 1.0
else:
sign_param = -1.0
if symmetric:
log10_tau_dm_cusp_post = 1
else:
log10_tau_dm_cusp_post = parameter.Uniform(0, 2.5)
wf = chrom_exp_cusp(log10_Amp=log10_Amp_dm_cusp, sign_param=sign_param,
t0=t0_dm_cusp, log10_tau_pre=log10_tau_dm_cusp_pre,
log10_tau_post=log10_tau_dm_cusp_post,
symmetric=symmetric, idx=idx)
dm_cusp = deterministic_signals.Deterministic(wf, name=name)
return dm_cusp
def dm_dual_exp_cusp(tmin, tmax, idx1=2, idx2=4, sign='negative',
symmetric=False, name='dual_dm_cusp'):
"""
Returns chromatic exponential cusp (i.e. TOA advance):
:param tmin, tmax:
search window for exponential cusp time.
:param idx:
index of radio frequency dependence (i.e. DM is 2). If this is set
to 'vary' then the index will vary from 1 - 6
:param sign:
set sign of dip: 'positive', 'negative', or 'vary'
:param name: Name of signal
:return dmexp:
chromatic exponential dip waveform.
"""
t0_dual_cusp = parameter.Uniform(tmin, tmax)
log10_Amp_dual_cusp_1 = parameter.Uniform(-10, -2)
log10_Amp_dual_cusp_2 = parameter.Uniform(-10, -2)
log10_tau_dual_cusp_pre_1 = parameter.Uniform(0, 2.5)
log10_tau_dual_cusp_pre_2 = parameter.Uniform(0, 2.5)
if sign == 'vary':
sign_param = parameter.Uniform(-1.0, 1.0)
elif sign == 'positive':
sign_param = 1.0
else:
sign_param = -1.0
if symmetric:
log10_tau_dual_cusp_post_1 = 1
log10_tau_dual_cusp_post_2 = 1
else:
log10_tau_dual_cusp_post_1 = parameter.Uniform(0, 2.5)
log10_tau_dual_cusp_post_2 = parameter.Uniform(0, 2.5)
wf = chrom_dual_exp_cusp(t0=t0_dual_cusp, sign_param=sign_param,
symmetric=symmetric,
log10_Amp_1=log10_Amp_dual_cusp_1,
log10_tau_pre_1=log10_tau_dual_cusp_pre_1,
log10_tau_post_1=log10_tau_dual_cusp_post_1,
log10_Amp_2=log10_Amp_dual_cusp_2,
log10_tau_pre_2=log10_tau_dual_cusp_pre_2,
log10_tau_post_2=log10_tau_dual_cusp_post_2,
idx1=idx1, idx2=idx2)
dm_cusp = deterministic_signals.Deterministic(wf, name=name)
return dm_cusp
def dmx_signal(dmx_data, name='dmx_signal'):
"""
Returns DMX signal:
:param dmx_data: dictionary of DMX data for each pulsar from parfile.
:param name: Name of signal.
:return dmx_sig:
dmx signal waveform.
"""
dmx = {}
for dmx_id in sorted(dmx_data):
dmx_data_tmp = dmx_data[dmx_id]
dmx.update({dmx_id: parameter.Normal(mu=dmx_data_tmp['DMX_VAL'],
sigma=dmx_data_tmp['DMX_ERR'])})
wf = dmx_delay(dmx_ids=dmx_data, **dmx)
dmx_sig = deterministic_signals.Deterministic(wf, name=name)
return dmx_sig
def dm_annual_signal(idx=2, name='dm_s1yr'):
"""
Returns chromatic annual signal (i.e. TOA advance):
:param idx:
index of radio frequency dependence (i.e. DM is 2). If this is set
to 'vary' then the index will vary from 1 - 6
:param name: Name of signal
:return dm1yr:
chromatic annual waveform.
"""
log10_Amp_dm1yr = parameter.Uniform(-10, -2)
phase_dm1yr = parameter.Uniform(0, 2*np.pi)
wf = chrom_yearly_sinusoid(log10_Amp=log10_Amp_dm1yr,
phase=phase_dm1yr, idx=idx)
dm1yr = deterministic_signals.Deterministic(wf, name=name)
return dm1yr
| 12,928 | 33.569519 | 96 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/chromatic/__init__.py | # -*- coding: utf-8 -*-
from .chromatic import * # noqa: F401, F403
| 70 | 16.75 | 44 | py |