repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/prior/benchmark.py | import numpy as np
import pandas as pd
from blackbox.load_utils import evaluation_split_from_task, tasks
from optimizer.normalization_transforms import from_string
from prior.mlp_pytorch import ParametricPrior
from prior.mlp_sklearn import ParametricPriorSklearn
normalization = "gaussian"
rows = []
#tasks = [
# 'electricity',
# # 'australian',
# #'m4-Hourly',
# #'m4-Daily',
#]
for task in tasks:
Xys_train, (X_test, y_test) = evaluation_split_from_task(task)
X_train = np.concatenate([X for X, y in Xys_train], axis=0)
normalizer = from_string(normalization)
z_train = np.concatenate([normalizer(y).transform(y) for X, y in Xys_train], axis=0)
# y_test is only used for measuring RMSE on the prior as mentioned in the paper
z_test = normalizer(y_test).transform(y_test)
# todo normalization inside prior
prior = ParametricPrior(
X_train=X_train,
y_train=z_train,
num_gradient_updates=2000,
num_decays=2,
num_layers=3,
num_hidden=50,
dropout=0.1,
lr=0.001,
)
mu_pred, sigma_pred = prior.predict(X_test)
rmse = np.sqrt(np.square(mu_pred - z_test).mean())
mae = np.abs(mu_pred - z_test).mean()
row = {"task": task, "rmse": rmse, "mae": mae}
rows.append(row)
print(row)
df = pd.DataFrame(rows)
print(df.to_string()) | 1,360 | 26.22 | 88 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/experiments/optimizer_names.py |
class names:
# put names into a class to add structure and avoid having lots of imports
RS = "RS"
# ablation
GP = "GP"
GCP_ho_prior = "GCP + homosk. prior"
GCP = "GCP"
GCP_prior = "GCP + prior (ours)"
GP_prior = "GP + prior"
CTS_ho_prior = "CTS + homosk. prior"
CTS_prior = "CTS (ours)"
TS_prior = "TS"
GP_prior = "GP + prior"
# multi-objectives
MO_suffix = " + MO"
GP_prior_mo = GP_prior + MO_suffix
GP_mo = GP + MO_suffix
GCP_prior_mo = "GCP + prior" + MO_suffix + " (ours)"
GCP_mo = GCP + MO_suffix
CTS_prior_mo = "CTS + prior" + MO_suffix + " (ours)"
TS_prior_mo = TS_prior + MO_suffix
# baselines
WS_BEST = 'WS GP'
AUTORANGE_GP = "AutoGP"
AUTORANGE_RS = "AutoRS"
BOHB = 'BOHB'
REA = 'R-EA'
REINFORCE = 'REINFORCE'
ABLR = "ABLR"
ABLR_COPULA = 'ABLR Copula'
SGPT = "SGPT"
SGPT_COPULA = "SGPT Copula"
EHI = "EHI"
SMS = "SMS"
SUR = "SUR"
EMI = "EMI"
def method_name(dataset_name):
for prefix in ["fcnet", "xgboost"]:
if prefix in dataset_name:
return prefix
if 'nas102' in dataset_name:
return 'NAS'
return "DeepAR"
def rename_results(df):
rename_dict = {
'ablr_norm_fixed_set_tr': names.ABLR,
'ablr_copula': names.ABLR_COPULA,
'copula_gp_1_5_random_fix_sigma_5_tr': names.GCP_ho_prior,
'copula_gp_1_5_random_pred_sigma_5_tr': names.GCP_prior,
'copula_gp_1_5_random_pred_sigma_std_5_tr': names.GP_prior,
'copula_rs_1_fix_sigma_tr': names.CTS_ho_prior,
'copula_rs_1_pred_sigma_std_tr': names.TS_prior,
'copula_rs_1_pred_sigma_tr': names.CTS_prior,
'gp_fixed_set_tr': names.GP,
'random_fixed_set_tr': names.RS,
'warm-start-gp-top1-1init': names.WS_BEST,
'auto-range-gp': names.AUTORANGE_GP,
'copula_gp_no_proir': names.GCP,
'sgpt_0.01': names.SGPT,
#'sgpt_0.10': names.SGPT_010,
#'sgpt_1.00': names.SGPT_100,
'sgpt_0.01_copula': names.SGPT_COPULA
}
df.method = df.method.apply(lambda name: rename_dict[name] if name in rename_dict else "")
df = df.loc[df.method != "", :]
df.dataset = df.dataset.apply(
lambda name: name.replace("xgboost_", "")
.replace("_max_resource", "")
.replace("fcnet_", "")
.replace("nas102_", "")
.replace("_lookup", "")
)
df = df[df.dataset != 'skin_nonskin']
return df | 2,513 | 27.247191 | 94 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/experiments/table2.py | from typing import List, Optional
import pandas as pd
import numpy as np
from pathlib import Path
from blackbox.offline import deepar, fcnet, xgboost, nas102
from experiments.load_results import load_results_paper
from experiments.optimizer_names import names
path = Path(__file__).parent
def adtm_scores(df, optimizers_to_plot = None, baseline: Optional[str] = "RS"):
# return adtm table per blackbox and per dataset
scores_df = df.groupby(["blackbox", "task", "optimizer", "iteration"])[
"ADTM"
].mean().reset_index().pivot_table(
values='ADTM',
columns=['optimizer'],
index=['blackbox', 'task', 'iteration'],
)
rel_scores = (scores_df[[baseline]].values - scores_df.values) / scores_df[[baseline]].values
rel_scores_df = pd.DataFrame(rel_scores, index=scores_df.index, columns=scores_df.columns).reset_index(
level=2).drop(
columns='iteration')
scores_per_task = rel_scores_df.groupby(['blackbox', 'task']).mean()
avg_scores_per_blackbox = rel_scores_df.groupby(['blackbox']).mean()
if optimizers_to_plot is not None:
avg_scores_per_blackbox = avg_scores_per_blackbox[optimizers_to_plot]
scores_per_task = scores_per_task[optimizers_to_plot]
scores_per_blackbox = avg_scores_per_blackbox.T[["DeepAR", "FCNET", "XGBoost", "nas_bench102"]]
return scores_per_blackbox, scores_per_task
def rank(scores_per_task: pd.DataFrame, blackboxes: List[str]):
ranks = {}
for b in blackboxes:
ranks[b] = scores_per_task.transpose()[b].rank(ascending=False).mean(axis=1)
return pd.DataFrame(ranks)
if __name__ == '__main__':
df_paper = load_results_paper()
print(df_paper.head())
baseline = names.RS
renamed_baseline = f"{names.RS} (baseline)"
df_paper.optimizer = df_paper.optimizer.apply(lambda name: renamed_baseline if name == baseline else name)
optimizers_to_plot = [
renamed_baseline,
names.TS_prior,
names.CTS_prior,
names.GP_prior,
names.GCP,
names.GCP_prior,
names.GP,
names.AUTORANGE_GP,
names.WS_BEST,
names.ABLR,
names.ABLR_COPULA,
names.SGPT,
names.SGPT_COPULA,
names.BOHB,
names.REA,
names.REINFORCE,
]
scores_per_blackbox, scores_per_task = adtm_scores(
df_paper,
optimizers_to_plot,
baseline=renamed_baseline,
)
print(scores_per_blackbox.to_string())
print(scores_per_blackbox.to_latex(float_format='%.2f', na_rep='-'))
rank_df = rank(scores_per_task=scores_per_task, blackboxes=[deepar, fcnet, xgboost, nas102])
print(rank_df.to_string())
print(rank_df.to_latex(float_format='%.1f', na_rep='-'))
# generates "dtm (rank)" numbers dataframe so that it can be exported easily in latex
dtm_and_rank_values = []
for x, y in zip(scores_per_blackbox.values.reshape(-1), rank_df.values.reshape(-1)):
dtm_and_rank_values.append("{:.2f}".format(x) + " (" + "{:.1f}".format(y) + ")")
dtm_and_rank = pd.DataFrame(
np.array(dtm_and_rank_values).reshape(rank_df.shape),
index=rank_df.index,
columns=rank_df.columns
)
print(dtm_and_rank.to_latex())
| 3,258 | 30.038095 | 110 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/experiments/table2-new-implem.py | import os
import pandas as pd
from pathlib import Path
from experiments.load_results import load_results_paper, load_results_reimplem, add_adtm
from experiments.optimizer_names import names
from experiments.table2 import adtm_scores, rank
path = Path(__file__).parent
if __name__ == '__main__':
df_paper = load_results_paper(do_add_adtm=False)
df_reimplem = load_results_reimplem()
df = pd.concat([df_paper, df_reimplem], sort=False)
print(df.optimizer.unique())
optimizers_to_plot = [
"RS",
names.CTS_prior,
"CTS (sklearn)",
"CTS (pytorch)",
names.GCP_prior,
"GCP+prior (sklearn)",
"GCP+prior (pytorch)",
]
df = add_adtm(df)
scores_per_blackbox, scores_per_task = adtm_scores(df, optimizers_to_plot)
print(scores_per_blackbox.to_string())
print(scores_per_blackbox.to_latex(float_format='%.2f', na_rep='-')) | 912 | 26.666667 | 88 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/experiments/figure1.py | from pathlib import Path
import matplotlib.pyplot as plt
from blackbox.offline import deepar, fcnet, xgboost, nas102
from experiments.load_results import load_results_paper
from experiments.optimizer_names import names
from experiments.optimizer_styles import optimizer_style
path = Path(__file__).parent
def plot_optimizers(df, ax, blackbox, optimizers, legend: bool = False):
df_plot = df.loc[df.optimizer.isin(optimizers), :]
pivot_df = df_plot.loc[df_plot.blackbox == blackbox, :].groupby(
['blackbox', 'optimizer', 'iteration']
)['ADTM'].mean().reset_index().pivot_table(
index='iteration', columns='optimizer', values='ADTM'
).dropna()
# reorder optimizers to original list order
optimizers = [m for m in optimizers if m in pivot_df]
style, color = zip(*[optimizer_style(optimizer) for optimizer in optimizers])
pivot_df[optimizers].plot(
ax=ax,
title=blackbox,
color=list(color),
style=[a + b for a, b in style],
# marker=list(marker),
markevery=20,
alpha=0.8,
lw=2.5,
)
ax.grid()
if blackbox == 'DeepAR':
ax.set_ylim([None, 1e-2])
if blackbox == 'fcnet':
ax.set_ylim([None, 0.3])
if blackbox == 'xgboost':
ax.set_ylim([1e-2, 0.3])
if blackbox == 'NAS':
ax.set_xlim([None, 65])
# ax.set_ylim([0.001, None])
ax.set_yscale('log')
ax.set_ylabel('ADTM')
if not legend:
ax.get_legend().remove()
else:
ax.legend(loc="upper right")
if __name__ == '__main__':
df = load_results_paper()
blackboxes = [deepar, fcnet, xgboost, nas102]
optimizers_to_plot = [
[
names.RS,
names.GP,
names.AUTORANGE_GP,
names.WS_BEST,
names.ABLR,
names.CTS_prior,
names.GCP_prior,
# 'BOHB', 'R-EA', 'REINFORCE',
],
[
names.GP,
names.GP_prior,
names.GCP,
names.GCP_prior,
names.TS_prior,
names.CTS_prior,
]
]
fig, axes = plt.subplots(4, 2, figsize=(10, 12), sharex='row', sharey='row')
for i, blackbox in enumerate(blackboxes):
for j, optimizers in enumerate(optimizers_to_plot):
plot_optimizers(df, blackbox=blackbox, ax=axes[i, j], optimizers=optimizers, legend=(i == 0))
plt.savefig("adtm.pdf")
plt.show()
| 2,457 | 27.581395 | 105 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/optimizer/gaussian_process_functional_prior.py | from typing import Optional, Tuple, Callable, Union, List
import logging
import numpy as np
import torch
from gpytorch import ExactMarginalLogLikelihood
from gpytorch.constraints import GreaterThan
from gpytorch.likelihoods import GaussianLikelihood
from torch import Tensor
from torch.distributions import Normal
from botorch import fit_gpytorch_model
from botorch.acquisition import ExpectedImprovement, ScalarizedObjective
from botorch.models import SingleTaskGP
from botorch.models.model import Model
from botorch.optim import optimize_acqf
from botorch.utils.transforms import t_batch_mode_transform
from blackbox import Blackbox
from constants import num_gradient_updates
from misc.artificial_data import artificial_task1
from optimizer.gaussian_process import GP
from optimizer.thompson_sampling_functional_prior import TS
def residual_transform(y, mu_pred, sigma_pred):
return (y - mu_pred) / sigma_pred
def residual_transform_inv(z, mu_pred, sigma_pred):
return z * sigma_pred + mu_pred
def scale_posterior(mu_posterior, sigma_posterior, mu_est, sigma_est):
mean = mu_posterior * sigma_est + mu_est
sigma = (sigma_posterior * sigma_est)
return mean, sigma
class ShiftedExpectedImprovement(ExpectedImprovement):
"""
Applies ExpectedImprovement taking care to shift residual posterior with the predicted
prior mean and variance
:param model:
:param best_f: best value observed (not residual but actual value)
:param mean_std_predictor:
:param objective:
:param maximize:
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
mean_std_predictor: Callable[[np.array], Tuple[np.array, np.array]],
objective: Optional[ScalarizedObjective] = None,
maximize: bool = True,
) -> None:
super(ShiftedExpectedImprovement, self).__init__(model=model, best_f=best_f, objective=objective,
maximize=maximize)
self.mean_std_predictor = mean_std_predictor
@t_batch_mode_transform(expected_q=1)
def forward(self, X: Tensor) -> Tensor:
"""
:param X: A (..., 1, input_dim) batched tensor of input_dim design points.
Expected Improvement is computed for each point individually,
i.e., what is considered are the marginal posteriors, not the
joint.
:return: A (...) tensor of Expected Improvement values at the
given design points `X`.
"""
with torch.no_grad():
# both (..., 1,)
# (..., input_dim)
X_features = X.detach().numpy().squeeze(1)
mu_est, sigma_est = self.mean_std_predictor(X_features)
# both (..., 1, 1)
mu_est = torch.Tensor(mu_est).unsqueeze(1)
sigma_est = torch.Tensor(sigma_est).unsqueeze(1)
posterior = self._get_posterior(X=X)
mean, sigma = scale_posterior(
mu_posterior=posterior.mean,
sigma_posterior=posterior.variance.clamp_min(1e-6).sqrt(),
mu_est=mu_est,
sigma_est=sigma_est,
)
u = (mean - self.best_f.expand_as(mean)) / sigma
if not self.maximize:
u = -u
normal = Normal(torch.zeros_like(u), torch.ones_like(u))
ucdf = normal.cdf(u)
updf = torch.exp(normal.log_prob(u))
ei = sigma * (updf + u * ucdf)
return ei.squeeze(dim=-1).squeeze(dim=-1)
class ShiftedThompsonSampling(ExpectedImprovement):
"""
Applies Thompson sampling taking care to shift residual posterior with the predicted
prior mean and variance
:param model:
:param best_f:
:param mean_std_predictor:
:param objective:
:param maximize:
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
mean_std_predictor: Callable[[np.array], Tuple[np.array, np.array]],
objective: Optional[ScalarizedObjective] = None,
maximize: bool = True,
) -> None:
super(ShiftedThompsonSampling, self).__init__(model=model, best_f=best_f, objective=objective,
maximize=maximize)
self.mean_std_predictor = mean_std_predictor
@t_batch_mode_transform(expected_q=1)
def forward(self, X: Tensor) -> Tensor:
"""
:param X: A `... x 1 x d`-dim batched tensor of `d`-dim design points.
Expected Improvement is computed for each point individually,
i.e., what is considered are the marginal posteriors, not the
joint.
:return: A `...` tensor of Expected Improvement values at the
given design points `X`.
"""
with torch.no_grad():
# both (..., 1,)
mu_est, sigma_est = self.mean_std_predictor(X)
posterior = self._get_posterior(X=X)
mean, sigma = scale_posterior(
mu_posterior=posterior.mean,
sigma_posterior=posterior.variance.clamp_min(1e-9).sqrt(),
mu_est=mu_est,
sigma_est=sigma_est,
)
normal = Normal(torch.zeros_like(mean), torch.ones_like(mean))
u = normal.sample() * sigma + mean
if not self.maximize:
u = -u
return u.squeeze(dim=-1).squeeze(dim=-1)
class G3P(GP):
def __init__(
self,
input_dim: int,
output_dim: int,
bounds: Optional[np.array] = None,
evaluations_other_tasks: Optional[List[Tuple[np.array, np.array]]] = None,
num_gradient_updates: int = num_gradient_updates,
normalization: str = "standard",
prior: str = "pytorch",
):
super(G3P, self).__init__(
input_dim=input_dim,
output_dim=output_dim,
bounds=bounds,
normalization=normalization,
)
self.initial_sampler = TS(
input_dim=input_dim,
output_dim=output_dim,
evaluations_other_tasks=evaluations_other_tasks,
num_gradient_updates=num_gradient_updates,
normalization=normalization,
prior=prior,
)
def _sample(self, candidates: Optional[np.array] = None) -> np.array:
if len(self.X_observed) < self.num_initial_random_draws:
return self.initial_sampler.sample(candidates=candidates)
else:
z_observed = torch.Tensor(self.transform_outputs(self.y_observed.numpy()))
with torch.no_grad():
# both (n, 1)
#mu_pred, sigma_pred = self.thompson_sampling.prior(self.X_observed)
mu_pred, sigma_pred = self.initial_sampler.prior.predict(self.X_observed)
mu_pred = torch.Tensor(mu_pred)
sigma_pred = torch.Tensor(sigma_pred)
# (n, 1)
r_observed = residual_transform(z_observed, mu_pred, sigma_pred)
# build and fit GP on residuals
gp = SingleTaskGP(
train_X=self.X_observed,
train_Y=r_observed,
likelihood=GaussianLikelihood(noise_constraint=GreaterThan(1e-3)),
)
mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
fit_gpytorch_model(mll)
acq = ShiftedExpectedImprovement(
model=gp,
best_f=z_observed.min(dim=0).values,
mean_std_predictor=self.initial_sampler.prior.predict,
maximize=False,
)
if candidates is None:
candidate, acq_value = optimize_acqf(
acq,
bounds=self.bounds_tensor,
q=1,
num_restarts=5,
raw_samples=100,
)
# import matplotlib.pyplot as plt
# x = torch.linspace(-1, 1).unsqueeze(dim=-1)
# x = torch.cat((x, x * 0), dim=1)
# plt.plot(x[:, 0].flatten().tolist(), acq(x.unsqueeze(dim=1)).tolist())
# plt.show()
return candidate[0]
else:
# (N,)
ei = acq(torch.Tensor(candidates).unsqueeze(dim=-2))
return torch.Tensor(candidates[ei.argmax()])
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
num_evaluations = 10
Xy_train, X_test, y_test = artificial_task1()
blackbox = Blackbox(
input_dim=2,
output_dim=1,
eval_fun=lambda x: x.sum(axis=-1, keepdims=True),
)
optimizer = G3P(
input_dim=blackbox.input_dim,
output_dim=blackbox.output_dim,
evaluations_other_tasks=Xy_train,
num_gradient_updates=2,
)
candidates = X_test
for i in range(num_evaluations):
x = optimizer.sample(candidates)
#x = optimizer.sample()
y = blackbox(x)
logging.info(f"criterion {y} for arguments {x}")
optimizer.observe(x=x, y=y)
| 9,128 | 33.711027 | 105 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/optimizer/thompson_sampling_functional_prior.py | import logging
from typing import Optional, List, Tuple
import numpy as np
from constants import num_gradient_updates
from optimizer import Optimizer
from optimizer.normalization_transforms import from_string
from optimizer.random_search import RS
from prior.mlp_pytorch import ParametricPrior
from prior.mlp_sklearn import ParametricPriorSklearn
class TS(Optimizer):
def __init__(
self,
input_dim: int,
output_dim: int,
bounds: Optional[np.array] = None,
evaluations_other_tasks: Optional[List[Tuple[np.array, np.array]]] = None,
num_gradient_updates: int = num_gradient_updates,
normalization: str = "standard",
prior: str = "pytorch",
):
super(TS, self).__init__(
input_dim=input_dim,
output_dim=output_dim,
evaluations_other_tasks=evaluations_other_tasks,
bounds=bounds,
)
# todo add option for data transform
assert evaluations_other_tasks is not None
X_train = np.concatenate([X for X, y in evaluations_other_tasks], axis=0)
normalizer = from_string(normalization)
z_train = np.concatenate([normalizer(y).transform(y) for X, y in evaluations_other_tasks], axis=0)
prior_dict = {
"sklearn": ParametricPriorSklearn,
"pytorch": ParametricPrior,
}
logging.info(f"fit prior {prior}")
self.prior = prior_dict[prior](
X_train=X_train,
y_train=z_train,
num_gradient_updates=num_gradient_updates,
)
logging.info("prior fitted")
def _sample(self, candidates: Optional[np.array] = None) -> np.array:
if candidates is None:
num_random_candidates = 10000
# since Thompson Sampling selects from discrete set of options,
# when no candidates are given we draw random candidates
candidates = self.draw_random_candidates(num_random_candidates)
mu_pred, sigma_pred = self.prior.predict(candidates)
samples = np.random.normal(loc=mu_pred, scale=sigma_pred)
return candidates[np.argmin(samples)]
def draw_random_candidates(self, num_random_candidates: int):
random_sampler = RS(
input_dim=self.input_dim,
output_dim=self.output_dim,
bounds=self.bounds,
)
candidates = np.stack([random_sampler.sample() for _ in range(num_random_candidates)])
return candidates
| 2,528 | 35.128571 | 106 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/optimizer/gaussian_process.py | import logging
from typing import Optional
import numpy as np
import torch
from botorch import fit_gpytorch_model
from botorch.acquisition import ExpectedImprovement
from botorch.models import SingleTaskGP
from botorch.optim import optimize_acqf
from botorch.utils.transforms import normalize
from gpytorch import ExactMarginalLogLikelihood
from gpytorch.constraints import GreaterThan
from gpytorch.likelihoods import GaussianLikelihood
from blackbox import Blackbox, BlackboxOffline
from constants import num_initial_random_draws
from misc import set_seed
from misc.artificial_data import artificial_task1
from optimizer import Optimizer
from optimizer.normalization_transforms import from_string
from optimizer.random_search import RS
class GP(Optimizer):
def __init__(
self,
input_dim: int,
output_dim: int,
bounds: Optional[np.array] = None,
normalization: str = "standard",
evaluations_other_tasks=None,
):
super(GP, self).__init__(
input_dim=input_dim,
output_dim=output_dim,
evaluations_other_tasks=evaluations_other_tasks,
bounds=bounds,
)
# maintains observations
# (num_observations, input_dim)
self.X_observed = torch.empty(size=(0, input_dim))
# (num_observations, output_dim)
self.y_observed = torch.empty(size=(0, output_dim))
self.num_initial_random_draws = num_initial_random_draws
self.normalizer = from_string(normalization)
self.initial_sampler = RS(
input_dim=input_dim,
output_dim=output_dim,
bounds=bounds,
)
self.bounds_tensor = torch.Tensor(self.bounds)
def expected_improvement(self, model, best_f):
return ExpectedImprovement(
model=model,
best_f=best_f,
maximize=False,
)
def transform_outputs(self, y: np.array):
psi = self.normalizer(y)
z = psi.transform(y)
return z
def _sample(self, candidates: Optional[np.array] = None) -> np.array:
if len(self.X_observed) < self.num_initial_random_draws:
return self.initial_sampler.sample(candidates=candidates)
else:
z_observed = torch.Tensor(self.transform_outputs(self.y_observed.numpy()))
# build and fit GP
gp = SingleTaskGP(
train_X=self.X_observed,
train_Y=z_observed,
# special likelihood for numerical Cholesky errors, following advice from
# https://www.gitmemory.com/issue/pytorch/botorch/179/506276521
likelihood=GaussianLikelihood(noise_constraint=GreaterThan(1e-3)),
)
mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
fit_gpytorch_model(mll)
acq = self.expected_improvement(
model=gp,
best_f=z_observed.min(dim=0).values,
)
if candidates is None:
candidate, acq_value = optimize_acqf(
acq, bounds=self.bounds_tensor, q=1, num_restarts=5, raw_samples=100,
)
return candidate[0]
else:
# (N,)
ei = acq(torch.Tensor(candidates).unsqueeze(dim=-2))
return torch.Tensor(candidates[ei.argmax()])
def _observe(self, x: np.array, y: np.array):
# remark, we could fit the GP there so that sampling several times avoid the cost of refitting the GP
self.X_observed = torch.cat((self.X_observed, torch.Tensor(x).unsqueeze(dim=0)), dim=0)
self.y_observed = torch.cat((self.y_observed, torch.Tensor(y).unsqueeze(dim=0)), dim=0)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
num_evaluations = 10
Xy_train, X_test, y_test = artificial_task1(seed=0)
print(y_test[0])
set_seed(0)
blackbox = BlackboxOffline(
X=X_test,
y=y_test,
)
optimizer = GP(
input_dim=blackbox.input_dim,
output_dim=blackbox.output_dim,
)
candidates = X_test
for i in range(num_evaluations):
#x = optimizer.sample(candidates)
x = optimizer.sample()
y = blackbox(x)
logging.info(f"criterion {y} for arguments {x}")
optimizer.observe(x=x, y=y)
| 4,389 | 32.51145 | 109 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/blackbox/offline.py | from pathlib import Path
import pandas as pd
import numpy as np
deepar = 'DeepAR'
fcnet = 'FCNET'
xgboost = 'XGBoost'
nas102 = 'nas_bench102'
metric_error = 'metric_error'
metric_time = 'metric_time'
def evaluations_df(blackbox: str) -> pd.DataFrame:
"""
:returns a dataframe where each row corresponds to one hyperparameter evaluated for one task.
The hyperparamers columns are all prefixed by 'hp_', the metric columns (error, time, etc) are
prefixed by 'metric_' and dataset information are prefixed by 'dataset_' (only available for
DeepAR). Two columns 'task' and 'blackbox' contains the name of the task and of the blackbox.
## DeepAR
Hyperparameters:
* num_layers
* num_cells
* context_length_ratio, context_length_ratio = context_length / prediction_length
* dropout_rate
* learning_rate
* num_batches_per_epoch
Constants:
* epochs = 100
* early_stopping_patience = 5
Dataset specific:
* time_freq
* prediction_length
Metrics:
* CRPS
* train_loss
* throughput
* RMSE
## FCNET
"""
assert blackbox in [deepar, fcnet, xgboost, nas102]
df = pd.read_csv(Path(__file__).parent / f"offline_evaluations/{blackbox}.csv.zip")
return df
if __name__ == '__main__':
df = evaluations_df(deepar)
import seaborn as sns
import matplotlib.pyplot as plt
df["hp_learning_rate"] = df.hp_learning_rate_log.apply(np.exp)
df["hp_context_length_ratio"] = df.hp_context_length_ratio_log.apply(np.exp)
df["hp_num_batches_per_epoch"] = df.hp_num_batches_per_epoch_log.apply(np.exp)
ax = sns.scatterplot(data=df, x='hp_learning_rate', y='metric_CRPS', hue='task')
plt.show()
ax = sns.scatterplot(data=df, x='hp_learning_rate', y='metric_CRPS', hue='task')
ax.set(xscale="log", yscale="log")
plt.show()
ax = sns.scatterplot(data=df, x='hp_context_length_ratio', y='metric_CRPS', hue='task')
ax.set(yscale="log")
plt.show()
ax = sns.scatterplot(data=df, x='hp_num_batches_per_epoch', y='metric_time', hue='task')
ax.set(xscale="log", yscale="log")
plt.show() | 2,136 | 27.878378 | 98 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/blackbox/load_utils.py | import logging
from typing import Tuple, List
import numpy as np
from blackbox.offline import evaluations_df, deepar, fcnet, nas102, xgboost
blackbox_tasks = {
nas102: [
'cifar10',
'cifar100',
'ImageNet16-120'
],
fcnet: [
'naval',
'parkinsons',
'protein',
'slice',
],
deepar: [
'm4-Hourly',
'm4-Daily',
'm4-Weekly',
'm4-Monthly',
'm4-Quarterly',
'm4-Yearly',
'electricity',
'exchange-rate',
'solar',
'traffic',
],
xgboost: [
'a6a',
'australian',
'german.numer',
'heart',
'ijcnn1',
'madelon',
'skin_nonskin',
'spambase',
'svmguide1',
'w6a'
],
}
error_metric = {
deepar: 'metric_CRPS',
fcnet: 'metric_error',
nas102: 'metric_error',
xgboost: 'metric_error',
}
tasks = [task for bb, tasks in blackbox_tasks.items() for task in tasks]
def evaluations_np(
blackbox: str,
test_task: str,
metric_cols: List[str],
min_max_features: bool = False
) -> Tuple[List[Tuple[np.array, np.array]], Tuple[np.array, np.array]] :
"""
:param blackbox:
:param test_task:
:param metric_cols:
:param min_max_features: whether to apply min-max scaling on input features
:return: list of features/evaluations on train task and features/evaluations of the test task.
"""
logging.info(f"retrieving metrics {metric_cols} of blackbox {blackbox} for test-task {test_task}")
df = evaluations_df(blackbox=blackbox)
assert test_task in df.task.unique()
for c in metric_cols:
assert c in df.columns
Xy_dict = {}
for task in sorted(df.task.unique()):
mask = df.loc[:, 'task'] == task
hp_cols = [c for c in sorted(df.columns) if c.startswith("hp_")]
X = df.loc[mask, hp_cols].values
y = df.loc[mask, metric_cols].values
Xy_dict[task] = X, y
# todo it would be better done as a post-processing step
if blackbox in [fcnet, nas102]:
# applies onehot encoding to *all* hp columns as all hps are categories for those two blackboxes
# it would be nice to detect column types or pass it as an argument
from sklearn.preprocessing import OneHotEncoder
enc = OneHotEncoder(handle_unknown='ignore', sparse=False)
hp_cols = [c for c in sorted(df.columns) if c.startswith("hp_")]
enc.fit(df.loc[:, hp_cols])
for task, (X, y) in Xy_dict.items():
X_features = enc.transform(X)
Xy_dict[task] = X_features, y
if min_max_features:
# min-max scaling of input features
from sklearn.preprocessing import MinMaxScaler
X = np.vstack([X for (X, y) in Xy_dict.values()])
scaler = MinMaxScaler().fit(X)
Xy_dict = {t: (scaler.transform(X), y) for (t, (X, y)) in Xy_dict.items()}
Xys_train = [Xy_dict[t] for t in df.task.unique() if t != test_task]
Xy_test = Xy_dict[test_task]
return Xys_train, Xy_test
def blackbox_from_task(task: str) -> str:
for bb, tasks in blackbox_tasks.items():
if task in tasks:
return bb
assert f"unknown task {task}"
def evaluation_split_from_task(test_task: str, min_max_features: bool = True) -> Tuple[np.array, np.array]:
"""
:param test_task:
:param min_max_features: whether inputs are maped to [0, 1] with min-max scaling
:return: list of features/evaluations on train task and features/evaluations of the test task.
"""
blackbox = blackbox_from_task(test_task)
Xys_train, Xy_test = evaluations_np(
blackbox=blackbox,
test_task=test_task,
metric_cols=[error_metric[blackbox]],
min_max_features=min_max_features
)
return Xys_train, Xy_test
if __name__ == '__main__':
Xys_train, (X_test, y_test) = evaluation_split_from_task("a6a")
for task in [
'electricity',
'cifar10',
'australian',
'parkinsons',
]:
Xys_train, (X_test, y_test) = evaluation_split_from_task(task)
print(len(Xys_train), X_test.shape) | 4,186 | 28.076389 | 107 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/tst/test_prior.py | import numpy as np
from prior.mlp_pytorch import ParametricPrior
num_train_examples = 10000
num_test_examples = num_train_examples
dim = 2
num_gradient_updates = 200
lr = 1e-2
def make_random_X_y(num_examples: int, dim: int, noise_std: float):
X = np.random.rand(num_examples, dim)
noise = np.random.normal(scale=noise_std, size=(num_examples, 1))
y = X.sum(axis=-1, keepdims=True) + noise
return X, y
def test_mu_fit():
# test that parametric prior can recover a simple linear function for the mean
noise_std = 0.01
X_train, y_train = make_random_X_y(num_examples=num_train_examples, dim=dim, noise_std=noise_std)
prior = ParametricPrior(
X_train=X_train,
y_train=y_train,
num_gradient_updates=num_gradient_updates,
num_decays=1,
# smaller network for UT speed
num_layers=2,
num_hidden=20,
dropout=0.0,
lr=lr
)
X_test, y_test = make_random_X_y(num_examples=num_test_examples, dim=dim, noise_std=noise_std)
mu_pred, sigma_pred = prior.predict(X_test)
mu_l1_error = np.abs(mu_pred - y_test).mean()
print(mu_l1_error)
assert mu_l1_error < 0.3
def test_sigma_fit():
# test that parametric prior can recover a simple constant function for the variance
noise_std = 0.5
X_train, y_train = make_random_X_y(num_examples=num_train_examples, dim=dim, noise_std=noise_std)
prior = ParametricPrior(
X_train=X_train,
y_train=y_train,
num_gradient_updates=num_gradient_updates,
num_decays=1,
num_layers=2,
num_hidden=20,
dropout=0.0,
lr=lr
)
X_test, y_test = make_random_X_y(num_examples=num_test_examples, dim=dim, noise_std=noise_std)
mu_pred, sigma_pred = prior.predict(X_test)
sigma_l1_error = (sigma_pred.mean() - noise_std)
assert sigma_l1_error < 0.05
| 1,884 | 29.403226 | 101 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/tst/test_optimization.py | import logging
import random
from functools import partial
import numpy as np
import pytest
import torch
from blackbox import Blackbox, BlackboxOffline
from misc import set_seed
from misc.artificial_data import artificial_task1
from optimizer.gaussian_process import GP
from optimizer.gaussian_process_functional_prior import G3P
from optimizer.normalization_transforms import StandardTransform, GaussianTransform
from optimizer.thompson_sampling_functional_prior import TS
from optimizer.random_search import RS
Xy_train, X_test, y_test = artificial_task1()
@pytest.mark.parametrize("blackbox", [
Blackbox(
input_dim=2,
output_dim=1,
eval_fun=lambda x: x.sum(axis=-1, keepdims=True),
),
BlackboxOffline(
X=X_test,
y=y_test,
)
])
def test_blackbox_works_with_optimization(blackbox: Blackbox):
logging.basicConfig(level=logging.INFO)
seed = 3
num_evaluations = 5
optimizer_cls = RS
set_seed(seed)
optimizer = optimizer_cls(
input_dim=blackbox.input_dim,
output_dim=blackbox.output_dim,
evaluations_other_tasks=Xy_train,
)
candidates = X_test
for i in range(num_evaluations):
x = optimizer.sample(candidates)
y = blackbox(x)
logging.info(f"criterion {y} for arguments {x}")
optimizer.observe(x=x, y=y)
@pytest.mark.parametrize("optimizer_cls", [
RS,
# 5 gradient updates to makes it faster as we are only smoke-checking
partial(TS, num_gradient_updates=5, normalization="standard"),
partial(TS, num_gradient_updates=5, normalization="gaussian"),
partial(GP, normalization="standard"),
partial(GP, normalization="gaussian"),
partial(G3P, num_gradient_updates=5, normalization="standard"),
])
@pytest.mark.parametrize("constrained_search", [False, True])
def test_smoke_optimizers(optimizer_cls, constrained_search: bool):
logging.basicConfig(level=logging.INFO)
num_evaluations = 10
blackbox = Blackbox(
input_dim=2,
output_dim=1,
eval_fun=lambda x: x.sum(axis=-1, keepdims=True),
)
optimizer = optimizer_cls(
input_dim=blackbox.input_dim,
output_dim=blackbox.output_dim,
evaluations_other_tasks=Xy_train,
)
candidates = X_test
for i in range(num_evaluations):
if constrained_search:
x = optimizer.sample(candidates)
else:
x = optimizer.sample()
y = blackbox(x)
logging.info(f"criterion {y} for arguments {x}")
optimizer.observe(x=x, y=y)
| 2,572 | 26.967391 | 83 | py |
PC-JeDi | PC-JeDi-main/src/physics.py | # import jetnet
import numpy as np
import pytorch_lightning as pl
import torch as T
# FIX RANDOM SEED FOR REPRODUCIBILITY
pl.seed_everything(0, workers=True)
def locals_to_mass_and_pt(csts: T.Tensor, mask: T.BoolTensor) -> T.Tensor:
"""Calculate the overall jet pt and mass from the constituents. The
constituents are expected to be expressed as:
- del_eta
- del_phi
- log_pt
"""
# Calculate the constituent pt, eta and phi
eta = csts[..., 0]
phi = csts[..., 1]
pt = csts[..., 2].exp()
# Calculate the total jet values in cartensian coordinates, include mask for sum
jet_px = (pt * T.cos(phi) * mask).sum(axis=-1)
jet_py = (pt * T.sin(phi) * mask).sum(axis=-1)
jet_pz = (pt * T.sinh(eta) * mask).sum(axis=-1)
jet_e = (pt * T.cosh(eta) * mask).sum(axis=-1)
# Get the derived jet values, the clamps ensure NaNs dont occur
jet_pt = T.clamp_min(jet_px**2 + jet_py**2, 0).sqrt()
jet_m = T.clamp_min(jet_e**2 - jet_px**2 - jet_py**2 - jet_pz**2, 0).sqrt()
return T.vstack([jet_pt, jet_m]).T
def numpy_locals_to_mass_and_pt(
csts: np.ndarray,
mask: np.ndarray,
pt_logged=False,
) -> np.ndarray:
"""Calculate the overall jet pt and mass from the constituents. The
constituents are expected to be expressed as:
- del_eta
- del_phi
- log_pt or just pt depending on pt_logged
"""
# Calculate the constituent pt, eta and phi
eta = csts[..., 0]
phi = csts[..., 1]
pt = np.exp(csts[..., 2]) * mask if pt_logged else csts[..., 2]
# Calculate the total jet values in cartensian coordinates, include mask for sum
jet_px = (pt * np.cos(phi) * mask).sum(axis=-1)
jet_py = (pt * np.sin(phi) * mask).sum(axis=-1)
jet_pz = (pt * np.sinh(eta) * mask).sum(axis=-1)
jet_e = (pt * np.cosh(eta) * mask).sum(axis=-1)
# Get the derived jet values, the clamps ensure NaNs dont occur
jet_pt = np.sqrt(np.clip(jet_px**2 + jet_py**2, 0, None))
jet_m = np.sqrt(
np.clip(jet_e**2 - jet_px**2 - jet_py**2 - jet_pz**2, 0, None)
)
return np.vstack([jet_pt, jet_m]).T
| 2,120 | 30.191176 | 84 | py |
PC-JeDi | PC-JeDi-main/src/torch_utils.py | from typing import Union
import numpy as np
import torch as T
import torch.nn as nn
def get_loss_fn(name: str, **kwargs) -> nn.Module:
"""Return a pytorch loss function given a name."""
if name == "none":
return None
# Regression losses
if name == "huber":
return nn.HuberLoss(reduction="none")
if name == "mse":
return nn.MSELoss(reduction="none")
if name == "mae":
return nn.L1Loss(reduction="none")
def to_np(inpt: Union[T.Tensor, tuple]) -> np.ndarray:
"""More consicse way of doing all the necc steps to convert a pytorch
tensor to numpy array.
- Includes gradient deletion, and device migration
"""
if isinstance(inpt, (tuple, list)):
return type(inpt)(to_np(x) for x in inpt)
if inpt.dtype == T.bfloat16: # Numpy conversions don't support bfloat16s
inpt = inpt.half()
return inpt.detach().cpu().numpy()
| 918 | 26.848485 | 77 | py |
PC-JeDi | PC-JeDi-main/src/hydra_utils.py | """A collection of misculaneous functions usefull for the lighting/hydra
template."""
import logging
import os
from pathlib import Path
from typing import Any, List, Sequence
import hydra
import rich
import rich.syntax
import rich.tree
import wandb
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.utilities.rank_zero import rank_zero_only
log = logging.getLogger(__name__)
@rank_zero_only
def reload_original_config(cfg: OmegaConf, get_best: bool = False) -> OmegaConf:
"""Replaces the cfg with the one stored at the checkpoint location.
Will also set the chkpt_dir to the latest version of the last or
best checkpoint
"""
# Load the original config found in the the file directory
orig_cfg = OmegaConf.load(Path("full_config.yaml"))
# Get the latest updated checkpoint with the prefix last or best
flag = "best" if get_best else "last"
orig_cfg.ckpt_path = str(
sorted(Path.cwd().glob(f"checkpoints/{flag}*.ckpt"), key=os.path.getmtime)[-1]
)
# Set the wandb logger to attempt to resume the job
if hasattr(orig_cfg, "loggers"):
if hasattr(orig_cfg.loggers, "wandb"):
orig_cfg.loggers.wandb.resume = True
return orig_cfg
@rank_zero_only
def print_config(
cfg: DictConfig,
print_order: Sequence[str] = (
"datamodule",
"model",
"callbacks",
"loggers",
"trainer",
"paths",
),
resolve: bool = True,
) -> None:
"""Prints content of DictConfig using Rich library and its tree structure.
Args:
cfg: Configuration composed by Hydra.
print_order: Determines in what order config components are printed.
resolve: Whether to resolve reference fields of DictConfig.
save_to_file: Whether to export config to the hydra output folder.
"""
style = "dim"
tree = rich.tree.Tree("CONFIG", style=style, guide_style=style)
queue = []
# add fields from `print_order` to queue
for field in print_order:
queue.append(field) if field in cfg else log.warning(
f"Field '{field}' not found in config. Skipping '{field}' printing..."
)
# add all the other fields to queue (not specified in `print_order`)
for field in cfg:
if field not in queue:
queue.insert(0, field)
# generate config tree from queue
for field in queue:
branch = tree.add(field, style=style, guide_style=style)
config_group = cfg[field]
if isinstance(config_group, DictConfig):
branch_content = OmegaConf.to_yaml(config_group, resolve=resolve)
else:
branch_content = str(config_group)
branch.add(rich.syntax.Syntax(branch_content, "yaml"))
# print config tree
rich.print(tree)
def save_config(cfg: OmegaConf) -> None:
"""Saves the config to the output directory.
This is necc ontop of hydra's default conf.yaml as it will resolve the entries
allowing one to resume jobs identically with elements such as ${now:%H-%M-%S}.
Furthermore, hydra does not allow resuming a previous job from the same dir.
The work around is reload_original_config but that will fail as hydra overwites
the default config.yaml file on startup, so this backup is needed for resuming.
"""
# In order to be able to resume the wandb logger session, save the run id
if hasattr(cfg, "loggers"):
if hasattr(cfg.loggers, "wandb"):
if wandb.run is not None:
cfg.loggers.wandb.id = wandb.run.id
# save config tree to file
OmegaConf.save(cfg, Path(cfg.paths.full_path, "full_config.yaml"), resolve=True)
@rank_zero_only
def log_hyperparameters(
cfg: DictConfig, model: LightningModule, trainer: Trainer
) -> None:
"""Passes the config dict to the trainer's logger, also calculates #
params."""
# Convert the config object to a hyperparameter dict
hparams = OmegaConf.to_container(cfg, resolve=True)
# calculate the number of trainable parameters in the model and add it
hparams["model/params/total"] = sum(p.numel() for p in model.parameters())
hparams["model/params/trainable"] = sum(
p.numel() for p in model.parameters() if p.requires_grad
)
hparams["model/params/non_trainable"] = sum(
p.numel() for p in model.parameters() if not p.requires_grad
)
trainer.logger.log_hyperparams(hparams)
def instantiate_collection(cfg_coll: DictConfig) -> List[Any]:
"""Uses hydra to instantiate a collection of classes and return a list."""
objs = []
if not cfg_coll:
log.warning("List of configs is empty")
return objs
if not isinstance(cfg_coll, DictConfig):
raise TypeError("List of configs must be a DictConfig!")
for _, cb_conf in cfg_coll.items():
if isinstance(cb_conf, DictConfig) and "_target_" in cb_conf:
log.info(f"Instantiating <{cb_conf._target_}>")
objs.append(hydra.utils.instantiate(cb_conf))
return objs
| 5,097 | 30.8625 | 86 | py |
PC-JeDi | PC-JeDi-main/src/datamodules/jetnet.py | from copy import deepcopy
from typing import Mapping
import numpy as np
from jetnet.datasets import JetNet
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, Dataset
from src.numpy_utils import log_squash
from src.physics import numpy_locals_to_mass_and_pt
class JetNetData(Dataset):
"""Wrapper for the JetNet dataset so it works with our models with
different inputs."""
def __init__(self, **kwargs) -> None:
# Extra arguments used here
self.log_squash_pt = kwargs.pop("log_squash_pt", False)
self.high_as_context = kwargs.pop("high_as_context", True)
self.recalc_high = kwargs.pop("recalculate_jet_from_pc", True)
self.n_jets = kwargs.pop("n_jets", None)
# All other arguments passed to the jetnet dataset constructor
self.csts, self.high = JetNet.getData(**kwargs)
self.csts = self.csts.astype(np.float32)
self.high = self.high.astype(np.float32)
# Trim the data based on the requested number of jets (None does nothing)
self.csts = self.csts[: self.n_jets].astype(np.float32)
self.high = self.high[: self.n_jets].astype(np.float32)
# Manually calculate the mask by looking for zero padding
self.mask = ~np.all(self.csts == 0, axis=-1)
# Change the constituent information from pt-fraction to pure pt
csts = self.csts.copy()
csts[..., -1] = csts[..., -1] * self.high[..., 0:1]
# Recalculate the jet mass and pt using the point cloud
if self.recalc_high:
self.high = numpy_locals_to_mass_and_pt(csts, self.mask)
# Change the pt fraction to log_squash(pt)
if self.log_squash_pt:
self.csts[..., -1] = log_squash(csts[..., -1]) * self.mask
def __getitem__(self, idx) -> tuple:
csts = self.csts[idx]
high = self.high[idx] if self.high_as_context else np.empty(0, dtype="f")
mask = self.mask[idx]
return csts, mask, high
def __len__(self) -> int:
return len(self.high)
class JetNetDataModule(LightningDataModule):
def __init__(
self,
*,
data_conf: Mapping,
loader_kwargs: Mapping,
) -> None:
super().__init__()
self.save_hyperparameters(logger=False)
# Get the dimensions of the data from the config file
self.dim = len(data_conf["particle_features"])
self.n_nodes = data_conf["num_particles"]
if data_conf["high_as_context"]:
self.ctxt_dim = len(data_conf["jet_features"])
else:
self.ctxt_dim = 0
def setup(self, stage: str) -> None:
"""Sets up the relevant datasets."""
if stage == "fit":
self.train_set = JetNetData(**self.hparams.data_conf, split="train")
self.valid_set = JetNetData(**self.hparams.data_conf, split="test")
if stage == "test":
self.test_set = JetNetData(**self.hparams.data_conf, split="test")
def train_dataloader(self) -> DataLoader:
return DataLoader(self.train_set, **self.hparams.loader_kwargs, shuffle=True)
def val_dataloader(self) -> DataLoader:
return DataLoader(self.valid_set, **self.hparams.loader_kwargs, shuffle=False)
def test_dataloader(self) -> DataLoader:
test_kwargs = deepcopy(self.hparams.loader_kwargs)
test_kwargs["drop_last"] = False
return DataLoader(self.test_set, **test_kwargs, shuffle=False)
| 3,490 | 34.989691 | 86 | py |
PC-JeDi | PC-JeDi-main/src/models/diffusion.py | import math
from typing import Optional, Tuple
import torch as T
from tqdm import tqdm
class VPDiffusionSchedule:
def __init__(self, max_sr: float = 1, min_sr: float = 1e-2) -> None:
self.max_sr = max_sr
self.min_sr = min_sr
def __call__(self, time: T.Tensor) -> T.Tensor:
return cosine_diffusion_shedule(time, self.max_sr, self.min_sr)
def get_betas(self, time: T.Tensor) -> T.Tensor:
return cosine_beta_shedule(time, self.max_sr, self.min_sr)
def cosine_diffusion_shedule(
diff_time: T.Tensor, max_sr: float = 1, min_sr: float = 1e-2
) -> Tuple[T.Tensor, T.Tensor]:
"""Calculates the signal and noise rate for any point in the diffusion
processes.
Using continuous diffusion times between 0 and 1 which make switching between
different numbers of diffusion steps between training and testing much easier.
Returns only the values needed for the jump forward diffusion step and the reverse
DDIM step.
These are sqrt(alpha_bar) and sqrt(1-alphabar) which are called the signal_rate
and noise_rate respectively.
The jump forward diffusion process is simply a weighted sum of:
input * signal_rate + eps * noise_rate
Uses a cosine annealing schedule as proposed in
Proposed in https://arxiv.org/abs/2102.09672
Args:
diff_time: The time used to sample the diffusion scheduler
Output will match the shape
Must be between 0 and 1
max_sr: The initial rate at the first step
min_sr: How much signal is preserved at end of diffusion
(can't be zero due to log)
"""
# Use cosine annealing, which requires switching from times -> angles
start_angle = math.acos(max_sr)
end_angle = math.acos(min_sr)
diffusion_angles = start_angle + diff_time * (end_angle - start_angle)
signal_rates = T.cos(diffusion_angles)
noise_rates = T.sin(diffusion_angles)
return signal_rates, noise_rates
def cosine_beta_shedule(
diff_time: T.Tensor, max_sr: float = 1, min_sr: float = 1e-2
) -> T.Tensor:
"""Returns the beta values for the continuous flows using the above cosine
scheduler."""
start_angle = math.acos(max_sr)
end_angle = math.acos(min_sr)
diffusion_angles = start_angle + diff_time * (end_angle - start_angle)
return 2 * (end_angle - start_angle) * T.tan(diffusion_angles)
def ddim_predict(
noisy_data: T.Tensor,
pred_noises: T.Tensor,
signal_rates: T.Tensor,
noise_rates: T.Tensor,
) -> T.Tensor:
"""Use a single ddim step to predict the final image from anywhere in the
diffusion process."""
return (noisy_data - noise_rates * pred_noises) / signal_rates
@T.no_grad()
def ddim_sampler(
model,
diff_sched: VPDiffusionSchedule,
initial_noise: T.Tensor,
n_steps: int = 50,
keep_all: bool = False,
mask: Optional[T.Tensor] = None,
ctxt: Optional[T.BoolTensor] = None,
clip_predictions: Optional[tuple] = None,
) -> Tuple[T.Tensor, list]:
"""Apply the DDIM sampling process to generate a batch of samples from
noise.
Args:
model: A denoising diffusion model
Requires: inpt_dim, device, forward() method that outputs pred noise
diif_sched: A diffusion schedule object to calculate signal and noise rates
initial_noise: The initial noise to pass through the process
If none it will be generated here
n_steps: The number of iterations to generate the samples
keep_all: Return all stages of diffusion process
Can be memory heavy for large batches
num_samples: How many samples to generate
Ignored if initial_noise is provided
mask: The mask for the output point clouds
ctxt: The context tensor for the output point clouds
clip_predictions: Can stabalise generation by clipping the outputs
"""
# Get the initial noise for generation and the number of sammples
num_samples = initial_noise.shape[0]
# The shape needed for expanding the time encodings
expanded_shape = [-1] + [1] * (initial_noise.dim() - 1)
# Check the input argument for the n_steps, must be less than what was trained
all_stages = []
step_size = 1 / n_steps
# The initial variables needed for the loop
noisy_data = initial_noise
diff_times = T.ones(num_samples, device=model.device)
next_signal_rates, next_noise_rates = diff_sched(diff_times.view(expanded_shape))
for step in tqdm(range(n_steps), "DDIM-sampling", leave=False):
# Update with the previous 'next' step
signal_rates = next_signal_rates
noise_rates = next_noise_rates
# Keep track of the diffusion evolution
if keep_all:
all_stages.append(noisy_data)
# Apply the denoise step to get X_0 and expected noise
pred_noises = model(noisy_data, diff_times, mask, ctxt)
pred_data = ddim_predict(noisy_data, pred_noises, signal_rates, noise_rates)
# Get the next predicted components using the next signal and noise rates
diff_times = diff_times - step_size
next_signal_rates, next_noise_rates = diff_sched(
diff_times.view(expanded_shape)
)
# Clamp the predicted X_0 for stability
if clip_predictions is not None:
pred_data.clamp_(*clip_predictions)
# Remix the predicted components to go from estimated X_0 -> X_{t-1}
noisy_data = next_signal_rates * pred_data + next_noise_rates * pred_noises
return pred_data, all_stages
@T.no_grad()
def euler_maruyama_sampler(
model,
diff_sched: VPDiffusionSchedule,
initial_noise: T.Tensor,
n_steps: int = 50,
keep_all: bool = False,
mask: Optional[T.Tensor] = None,
ctxt: Optional[T.BoolTensor] = None,
clip_predictions: Optional[tuple] = None,
) -> Tuple[T.Tensor, list]:
"""Apply the full reverse process to noise to generate a batch of
samples."""
# Get the initial noise for generation and the number of sammples
num_samples = initial_noise.shape[0]
# The shape needed for expanding the time encodings
expanded_shape = [-1] + [1] * (initial_noise.dim() - 1)
# Check the input argument for the n_steps, must be less than what was trained
all_stages = []
delta_t = 1 / n_steps
# The initial variables needed for the loop
x_t = initial_noise
t = T.ones(num_samples, device=model.device)
for step in tqdm(range(n_steps), "Euler-Maruyama-sampling", leave=False):
# Use the model to get the expected noise
pred_noises = model(x_t, t, mask, ctxt)
# Use to get s_theta
_, noise_rates = diff_sched(t.view(expanded_shape))
s = -pred_noises / noise_rates
# Take one step using the em method
betas = diff_sched.get_betas(t.view(expanded_shape))
x_t += 0.5 * betas * (x_t + 2 * s) * delta_t
x_t += (betas * delta_t).sqrt() * T.randn_like(x_t)
t -= delta_t
# Keep track of the diffusion evolution
if keep_all:
all_stages.append(x_t)
# Clamp the denoised data for stability
if clip_predictions is not None:
x_t.clamp_(*clip_predictions)
return x_t, all_stages
@T.no_grad()
def euler_sampler(
model,
diff_sched: VPDiffusionSchedule,
initial_noise: T.Tensor,
n_steps: int = 50,
keep_all: bool = False,
mask: Optional[T.Tensor] = None,
ctxt: Optional[T.BoolTensor] = None,
clip_predictions: Optional[tuple] = None,
) -> Tuple[T.Tensor, list]:
"""Apply the full reverse process to noise to generate a batch of
samples."""
# Get the initial noise for generation and the number of sammples
num_samples = initial_noise.shape[0]
# The shape needed for expanding the time encodings
expanded_shape = [-1] + [1] * (initial_noise.dim() - 1)
# Check the input argument for the n_steps, must be less than what was trained
all_stages = []
delta_t = 1 / n_steps
# The initial variables needed for the loop
t = T.ones(num_samples, device=model.device)
signal_rates, noise_rates = diff_sched(t.view(expanded_shape))
x_t = initial_noise * (signal_rates + noise_rates)
for step in tqdm(range(n_steps), "Euler-sampling", leave=False):
# Take a step using the euler method and the gradient calculated by the ode
x_t += get_ode_gradient(model, diff_sched, x_t, t, mask, ctxt) * delta_t
t -= delta_t
# Keep track of the diffusion evolution
if keep_all:
all_stages.append(x_t)
# Clamp the denoised data for stability
if clip_predictions is not None:
x_t.clamp_(*clip_predictions)
return x_t, all_stages
@T.no_grad()
def runge_kutta_sampler(
model,
diff_sched: VPDiffusionSchedule,
initial_noise: T.Tensor,
n_steps: int = 50,
keep_all: bool = False,
mask: Optional[T.Tensor] = None,
ctxt: Optional[T.BoolTensor] = None,
clip_predictions: Optional[tuple] = None,
) -> Tuple[T.Tensor, list]:
"""Apply the full reverse process to noise to generate a batch of
samples."""
# Get the initial noise for generation and the number of sammples
num_samples = initial_noise.shape[0]
# Check the input argument for the n_steps, must be less than what was trained
all_stages = []
delta_t = 1 / n_steps
# Wrap the ode gradient in a lambda function depending only on xt and t
ode_grad = lambda t, x_t: get_ode_gradient(model, diff_sched, x_t, t, mask, ctxt)
# The initial variables needed for the loop
x_t = initial_noise
t = T.ones(num_samples, device=model.device)
for step in tqdm(range(n_steps), "Runge-Kutta-sampling", leave=False):
k1 = delta_t * (ode_grad(t, x_t))
k2 = delta_t * (ode_grad((t - delta_t / 2), (x_t + k1 / 2)))
k3 = delta_t * (ode_grad((t - delta_t / 2), (x_t + k2 / 2)))
k4 = delta_t * (ode_grad((T.clamp_min(t - delta_t, 0)), (x_t + k3)))
k = (k1 + 2 * k2 + 2 * k3 + k4) / 6
x_t += k
t -= delta_t
# Keep track of the diffusion evolution
if keep_all:
all_stages.append(x_t)
# Clamp the denoised data for stability
if clip_predictions is not None:
x_t.clamp_(*clip_predictions)
return x_t, all_stages
def get_ode_gradient(
model,
diff_sched: VPDiffusionSchedule,
x_t: T.Tensor,
t: T.Tensor,
mask: Optional[T.BoolTensor] = None,
ctxt: Optional[T.Tensor] = None,
) -> T.Tensor:
expanded_shape = [-1] + [1] * (x_t.dim() - 1)
_, noise_rates = diff_sched(t.view(expanded_shape))
betas = diff_sched.get_betas(t.view(expanded_shape))
return 0.5 * betas * (x_t - model(x_t, t, mask, ctxt) / noise_rates)
def run_sampler(sampler: str, *args, **kwargs) -> Tuple[T.Tensor, list]:
if sampler == "em":
return euler_maruyama_sampler(*args, **kwargs)
if sampler == "euler":
return euler_sampler(*args, **kwargs)
if sampler == "rk":
return runge_kutta_sampler(*args, **kwargs)
if sampler == "ddim":
return ddim_sampler(*args, **kwargs)
raise RuntimeError(f"Unknown sampler: {sampler}")
| 11,263 | 33.873065 | 86 | py |
PC-JeDi | PC-JeDi-main/src/models/transformers.py | """Some classes to describe transformer architectures."""
import math
from typing import Mapping, Optional, Union
import torch as T
import torch.nn as nn
from torch.nn.functional import dropout, softmax
from .modules import DenseNetwork
def merge_masks(
q_mask: Union[T.BoolTensor, None],
kv_mask: Union[T.BoolTensor, None],
attn_mask: Union[T.BoolTensor, None],
q_shape: T.Size,
k_shape: T.Size,
device: T.device,
) -> Union[None, T.BoolTensor]:
"""Create a full attention mask which incoporates the padding
information."""
# Create the full mask which combines the attention and padding masks
merged_mask = None
# If either pad mask exists, create
if q_mask is not None or kv_mask is not None:
if q_mask is None:
q_mask = T.full(q_shape[:-1], True, device=device)
if kv_mask is None:
kv_mask = T.full(k_shape[:-1], True, device=device)
merged_mask = q_mask.unsqueeze(-1) & kv_mask.unsqueeze(-2)
# If attention mask exists, create
if attn_mask is not None:
merged_mask = attn_mask if merged_mask is None else attn_mask & merged_mask
return merged_mask
def attention(
query: T.Tensor,
key: T.Tensor,
value: T.Tensor,
dim_key: int,
attn_mask: Optional[T.BoolTensor] = None,
attn_bias: Optional[T.Tensor] = None,
drp: float = 0.0,
training: bool = True,
) -> T.Tensor:
"""Apply the attention using the scaled dot product between the key query
and key tensors, then matrix multiplied by the value.
Note that the attention scores are ordered in recv x send, which is the opposite
to how I usually do it for the graph network, which is send x recv
We use masked fill -T.inf as this kills the padded key/values elements but
introduces nans for padded query elements. We could used a very small number like
-1e9 but this would need to scale with if we are using half precision.
Args:
query: Batched query sequence of tensors (b, h, s, f)
key: Batched key sequence of tensors (b, h, s, f)
value: Batched value sequence of tensors (b, h, s, f)
dim_key: The dimension of the key features, used to scale the dot product
attn_mask: The attention mask, used to blind certain combinations of k,q pairs
attn_bias: Extra weights to combine with attention weights
drp: Dropout probability
training: If the model is in training mode, effects the dropout applied
"""
# Perform the matrix multiplication
scores = T.matmul(query, key.transpose(-2, -1)) / math.sqrt(dim_key)
# Add the bias terms if present
if attn_bias is not None: # Move the head dimension to the first
scores = scores + attn_bias.permute(0, 3, 1, 2)
# Mask away the scores between invalid elements in sequence
if attn_mask is not None:
scores = scores.masked_fill(~attn_mask.unsqueeze(-3), -T.inf)
# Apply the softmax function per head feature
scores = softmax(scores, dim=-1)
# Kill the nans introduced by the padded query elements
scores = T.nan_to_num(scores, 0)
# Apply dropout to the attention scores
scores = dropout(scores, p=drp, training=training)
# Finally multiply these scores by the output
scores = T.matmul(scores, value)
return scores
class MultiHeadedAttentionBlock(nn.Module):
"""Generic Multiheaded Attention.
Takes in three sequences with dim: (batch, sqeuence, features)
- q: The primary sequence queries (determines output sequence length)
- k: The attending sequence keys (determines incoming information)
- v: The attending sequence values
In a message passing sense you can think of q as your receiver nodes, v and k
are the information coming from the sender nodes.
When q == k(and v) this is a SELF attention operation
When q != k(and v) this is a CROSS attention operation
===
Block operations:
1) Uses three linear layers to project the sequences.
- q = q_linear * q
- k = k_linear * k
- v = v_linear * v
2) Outputs are reshaped to add a head dimension, and transposed for matmul.
- features = model_dim = head_dim * num_heads
- dim becomes: batch, num_heads, sequence, head_dim
3) Passes these through to the attention module (message passing)
- In standard transformers this is the scaled dot product attention
- Also takes additional dropout layer to mask the attention
4) Flatten out the head dimension and pass through final linear layer
- results are same as if attention was done seperately for each head and concat
- dim: batch, q_seq, head_dim * num_heads
"""
def __init__(
self,
model_dim: int,
num_heads: int = 1,
drp: float = 0,
) -> None:
"""
Args:
model_dim: The dimension of the model
num_heads: The number of different attention heads to process in parallel
- Must allow interger division into model_dim
drp: The dropout probability used in the MHA operation
"""
super().__init__()
# Define model base attributes
self.model_dim = model_dim
self.num_heads = num_heads
self.head_dim = model_dim // num_heads
# Check that the dimension of each head makes internal sense
if self.head_dim * num_heads != model_dim:
raise ValueError("Model dimension must be divisible by number of heads!")
# Initialise the weight matrices
self.q_linear = nn.Linear(model_dim, model_dim)
self.k_linear = nn.Linear(model_dim, model_dim)
self.v_linear = nn.Linear(model_dim, model_dim)
self.out_linear = nn.Linear(model_dim, model_dim)
self.drp = drp
def forward(
self,
q: T.Tensor,
k: Optional[T.Tensor] = None,
v: Optional[T.Tensor] = None,
q_mask: Optional[T.BoolTensor] = None,
kv_mask: Optional[T.BoolTensor] = None,
attn_mask: Optional[T.BoolTensor] = None,
attn_bias: Optional[T.Tensor] = None,
) -> T.Tensor:
"""
Args:
q: The main sequence queries (determines the output length)
k: The incoming information keys
v: The incoming information values
q_mask: Shows which elements of the main sequence are real
kv_mask: Shows which elements of the attn sequence are real
attn_mask: Extra mask for the attention matrix (eg: look ahead)
attn_bias: Extra bias term for the attention matrix (eg: edge features)
"""
# If only q and q_mask are provided then we automatically apply self attention
if k is None:
k = q
if kv_mask is None:
kv_mask = q_mask
v = v if v is not None else k
# Store the batch size, useful for reshaping
b_size, seq, feat = q.shape
# Work out the masking situation, with padding, no peaking etc
attn_mask = merge_masks(q_mask, kv_mask, attn_mask, q.shape, k.shape, q.device)
# Generate the q, k, v projections, break final head dimension in 2
shape = (b_size, -1, self.num_heads, self.head_dim)
q = self.q_linear(q).view(shape)
k = self.k_linear(k).view(shape)
v = self.v_linear(v).view(shape)
# Transpose to get dimensions: B,H,Seq,HD (required for matmul)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
# Calculate the new sequence values, for memory reasons overwrite q
q = attention(
q,
k,
v,
self.head_dim,
attn_mask=attn_mask,
attn_bias=attn_bias,
drp=self.drp,
training=self.training,
) # Returned shape is B,H,Q_seq,HD
# Concatenate the all of the heads together to get shape: B,Seq,F
q = q.transpose(1, 2).contiguous().view(b_size, -1, self.model_dim)
# Pass through final linear layer
q = self.out_linear(q)
return q
class TransformerEncoderLayer(nn.Module):
"""A transformer encoder layer based on the GPT-2+Normformer style
arcitecture.
We choose Normformer as it has often proved to be the most stable to train
https://arxiv.org/abs/2210.06423
https://arxiv.org/abs/2110.09456
It contains:
- Multihead(self)Attention block
- A dense network
Layernorm is applied before each operation
Residual connections are used to bypass each operation
"""
def __init__(
self,
model_dim: int,
mha_config: Optional[Mapping] = None,
dense_config: Optional[Mapping] = None,
ctxt_dim: int = 0,
) -> None:
"""
Args:
model_dim: The embedding dimensio of the transformer block
mha_config: Keyword arguments for multiheaded-attention block
dense_config: Keyword arguments for feed forward network
ctxt_dim: Context dimension,
"""
super().__init__()
mha_config = mha_config or {}
dense_config = dense_config or {}
self.model_dim = model_dim
self.ctxt_dim = ctxt_dim
# The basic blocks
self.self_attn = MultiHeadedAttentionBlock(model_dim, **mha_config)
self.dense = DenseNetwork(
model_dim, outp_dim=model_dim, ctxt_dim=ctxt_dim, **dense_config
)
# The normalisation layers (lots from NormFormer)
self.norm1 = nn.LayerNorm(model_dim)
self.norm2 = nn.LayerNorm(model_dim)
self.norm3 = nn.LayerNorm(model_dim)
def forward(
self,
x: T.Tensor,
mask: Optional[T.BoolTensor] = None,
ctxt: Optional[T.Tensor] = None,
attn_bias: Optional[T.Tensor] = None,
attn_mask: Optional[T.BoolTensor] = None,
) -> T.Tensor:
"Pass through the layer using residual connections and layer normalisation"
x = x + self.norm2(
self.self_attn(
self.norm1(x), q_mask=mask, attn_mask=attn_mask, attn_bias=attn_bias
)
)
x = x + self.dense(self.norm3(x), ctxt)
return x
class TransformerEncoder(nn.Module):
"""A stack of N transformer encoder layers followed by a final
normalisation step.
Sequence -> Sequence
"""
def __init__(
self,
model_dim: int = 64,
num_layers: int = 3,
mha_config: Optional[Mapping] = None,
dense_config: Optional[Mapping] = None,
ctxt_dim: int = 0,
) -> None:
"""
Args:
model_dim: Feature sieze for input, output, and all intermediate layers
num_layers: Number of encoder layers used
mha_config: Keyword arguments for the mha block
dense_config: Keyword arguments for the dense network in each layer
ctxt_dim: Dimension of the context inputs
"""
super().__init__()
self.model_dim = model_dim
self.num_layers = num_layers
self.layers = nn.ModuleList(
[
TransformerEncoderLayer(model_dim, mha_config, dense_config, ctxt_dim)
for _ in range(num_layers)
]
)
self.final_norm = nn.LayerNorm(model_dim)
def forward(self, x: T.Tensor, **kwargs) -> T.Tensor:
"""Pass the input through all layers sequentially."""
for layer in self.layers:
x = layer(x, **kwargs)
return self.final_norm(x)
class FullTransformerEncoder(nn.Module):
"""A transformer encoder with added input and output embedding networks.
Sequence -> Sequence
"""
def __init__(
self,
inpt_dim: int,
outp_dim: int,
edge_dim: int = 0,
ctxt_dim: int = 0,
te_config: Optional[Mapping] = None,
node_embd_config: Optional[Mapping] = None,
outp_embd_config: Optional[Mapping] = None,
edge_embd_config: Optional[Mapping] = None,
ctxt_embd_config: Optional[Mapping] = None,
) -> None:
"""
Args:
inpt_dim: Dim. of each element of the sequence
outp_dim: Dim. of of the final output vector
edge_dim: Dim. of the input edge features
ctxt_dim: Dim. of the context vector to pass to the embedding nets
te_config: Keyword arguments to pass to the TVE constructor
node_embd_config: Keyword arguments for node dense embedder
outp_embd_config: Keyword arguments for output dense embedder
edge_embd_config: Keyword arguments for edge dense embedder
ctxt_embd_config: Keyword arguments for context dense embedder
"""
super().__init__()
self.inpt_dim = inpt_dim
self.outp_dim = outp_dim
self.ctxt_dim = ctxt_dim
self.edge_dim = edge_dim
te_config = te_config or {}
node_embd_config = node_embd_config or {}
outp_embd_config = outp_embd_config or {}
edge_embd_config = edge_embd_config or {}
# Initialise the context embedding network (optional)
if self.ctxt_dim:
self.ctxt_emdb = DenseNetwork(
inpt_dim=self.ctxt_dim,
**ctxt_embd_config,
)
self.ctxt_out = self.ctxt_emdb.outp_dim
else:
self.ctxt_out = 0
# Initialise the TVE, the main part of this network
self.te = TransformerEncoder(**te_config, ctxt_dim=self.ctxt_out)
self.model_dim = self.te.model_dim
# Initialise all embedding networks
self.node_embd = DenseNetwork(
inpt_dim=self.inpt_dim,
outp_dim=self.model_dim,
ctxt_dim=self.ctxt_out,
**node_embd_config,
)
self.outp_embd = DenseNetwork(
inpt_dim=self.model_dim,
outp_dim=self.outp_dim,
ctxt_dim=self.ctxt_out,
**outp_embd_config,
)
# Initialise the edge embedding network (optional)
if self.edge_dim:
self.edge_embd = DenseNetwork(
inpt_dim=self.edge_dim,
outp_dim=self.te.layers[0].self_attn.num_heads,
ctxt_dim=self.ctxt_out,
**edge_embd_config,
)
def forward(
self,
x: T.Tensor,
mask: Optional[T.BoolTensor] = None,
ctxt: Optional[T.Tensor] = None,
attn_bias: Optional[T.Tensor] = None,
attn_mask: Optional[T.BoolTensor] = None,
) -> T.Tensor:
"""Pass the input through all layers sequentially."""
if self.ctxt_dim:
ctxt = self.ctxt_emdb(ctxt)
if self.edge_dim:
attn_bias = self.edge_embd(attn_bias, ctxt)
x = self.node_embd(x, ctxt)
x = self.te(x, mask=mask, ctxt=ctxt, attn_bias=attn_bias, attn_mask=attn_mask)
x = self.outp_embd(x, ctxt)
return x
| 15,049 | 33.837963 | 87 | py |
PC-JeDi | PC-JeDi-main/src/models/schedulers.py | from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
class WarmupToConstant(_LRScheduler):
"""Gradually warm-up learning rate in optimizer to a constant value."""
def __init__(self, optimizer: Optimizer, num_steps: int = 100) -> None:
"""
args:
optimizer (Optimizer): Wrapped optimizer.
num_steps: target learning rate is reached at num_steps.
"""
self.num_steps = num_steps
self.finished = False
super().__init__(optimizer)
def get_lr(self) -> list[float]:
if self.last_epoch > self.num_steps:
return [base_lr for base_lr in self.base_lrs]
return [
(base_lr / self.num_steps) * self.last_epoch for base_lr in self.base_lrs
]
| 793 | 32.083333 | 85 | py |
PC-JeDi | PC-JeDi-main/src/models/modules.py | """Collection of pytorch modules that make up the networks."""
import math
from typing import Optional, Union
import torch as T
import torch.nn as nn
def get_act(name: str) -> nn.Module:
"""Return a pytorch activation function given a name."""
if name == "relu":
return nn.ReLU()
if name == "lrlu":
return nn.LeakyReLU(0.1)
if name == "silu" or name == "swish":
return nn.SiLU()
if name == "selu":
return nn.SELU()
if name == "softmax":
return nn.Softmax()
if name == "gelu":
return nn.GELU()
if name == "tanh":
return nn.Tanh()
if name == "softmax":
return nn.Softmax()
if name == "sigmoid":
return nn.Sigmoid()
raise ValueError("No activation function with name: ", name)
def get_nrm(name: str, outp_dim: int) -> nn.Module:
"""Return a 1D pytorch normalisation layer given a name and a output size
Returns None object if name is none."""
if name == "batch":
return nn.BatchNorm1d(outp_dim)
if name == "layer":
return nn.LayerNorm(outp_dim)
if name == "none":
return None
else:
raise ValueError("No normalistation with name: ", name)
class MLPBlock(nn.Module):
"""A simple MLP block that makes up a dense network.
Made up of several layers containing:
- linear map
- activation function [Optional]
- layer normalisation [Optional]
- dropout [Optional]
Only the input of the block is concatentated with context information.
For residual blocks, the input is added to the output of the final layer.
"""
def __init__(
self,
inpt_dim: int,
outp_dim: int,
ctxt_dim: int = 0,
n_layers: int = 1,
act: str = "lrlu",
nrm: str = "none",
drp: float = 0,
do_res: bool = False,
) -> None:
"""Init method for MLPBlock.
Parameters
----------
inpt_dim : int
The number of features for the input layer
outp_dim : int
The number of output features
ctxt_dim : int, optional
The number of contextual features to concat to the inputs, by default 0
n_layers : int, optional1
A string indicating the name of the activation function, by default 1
act : str, optional
A string indicating the name of the normalisation, by default "lrlu"
nrm : str, optional
The dropout probability, 0 implies no dropout, by default "none"
drp : float, optional
Add to previous output, only if dim does not change, by default 0
do_res : bool, optional
The number of transform layers in this block, by default False
"""
super().__init__()
# Save the input and output dimensions of the module
self.inpt_dim = inpt_dim
self.outp_dim = outp_dim
self.ctxt_dim = ctxt_dim
# If this layer includes an additive residual connection
self.do_res = do_res and (inpt_dim == outp_dim)
# Initialise the block layers as a module list
self.block = nn.ModuleList()
for n in range(n_layers):
# Increase the input dimension of the first layer to include context
lyr_in = inpt_dim + ctxt_dim if n == 0 else outp_dim
# Linear transform, activation, normalisation, dropout
self.block.append(nn.Linear(lyr_in, outp_dim))
if act != "none":
self.block.append(get_act(act))
if nrm != "none":
self.block.append(get_nrm(nrm, outp_dim))
if drp > 0:
self.block.append(nn.Dropout(drp))
def forward(self, inpt: T.Tensor, ctxt: Optional[T.Tensor] = None) -> T.Tensor:
"""
args:
tensor: Pytorch tensor to pass through the network
ctxt: The conditioning tensor, can be ignored
"""
# Concatenate the context information to the input of the block
if self.ctxt_dim and ctxt is None:
raise ValueError(
"Was expecting contextual information but none has been provided!"
)
temp = T.cat([inpt, ctxt], dim=-1) if self.ctxt_dim else inpt
# Pass through each transform in the block
for layer in self.block:
temp = layer(temp)
# Add the original inputs again for the residual connection
if self.do_res:
temp = temp + inpt
return temp
def __repr__(self) -> str:
"""Generate a one line string summing up the components of the
block."""
string = str(self.inpt_dim)
if self.ctxt_dim:
string += f"({self.ctxt_dim})"
string += "->"
string += "->".join([str(b).split("(", 1)[0] for b in self.block])
string += "->" + str(self.outp_dim)
if self.do_res:
string += "(add)"
return string
class DenseNetwork(nn.Module):
"""A dense neural network made from a series of consecutive MLP blocks and
context injection layers."""
def __init__(
self,
inpt_dim: int,
outp_dim: int = 0,
ctxt_dim: int = 0,
hddn_dim: Union[int, list] = 32,
num_blocks: int = 1,
n_lyr_pbk: int = 1,
act_h: str = "lrlu",
act_o: str = "none",
do_out: bool = True,
nrm: str = "none",
drp: float = 0,
do_res: bool = False,
ctxt_in_inpt: bool = True,
ctxt_in_hddn: bool = False,
) -> None:
"""Initialise the DenseNetwork.
Parameters
----------
inpt_dim : int
The number of input neurons
outp_dim : int, optional
The number of output neurons. If none it will take from inpt or hddn,
by default 0
ctxt_dim : int, optional
The number of context features. The context feature use is determined by
ctxt_type, by default 0
hddn_dim : Union[int, list], optional
The width of each hidden block. If a list it overides depth, by default 32
num_blocks : int, optional
The number of hidden blocks, can be overwritten by hddn_dim, by default 1
n_lyr_pbk : int, optional
The number of transform layers per hidden block, by default 1
act_h : str, optional
The name of the activation function to apply in the hidden blocks,
by default "lrlu"
act_o : str, optional
The name of the activation function to apply to the outputs,
by default "none"
do_out : bool, optional
If the network has a dedicated output block, by default True
nrm : str, optional
Type of normalisation (layer or batch) in each hidden block, by default "none"
drp : float, optional
Dropout probability for hidden layers (0 means no dropout), by default 0
do_res : bool, optional
Use resisdual-connections between hidden blocks (only if same size),
by default False
ctxt_in_inpt : bool, optional
Include the ctxt tensor in the input block, by default True
ctxt_in_hddn : bool, optional
Include the ctxt tensor in the hidden blocks, by default False
Raises
------
ValueError
If the network was given a context input but both ctxt_in_inpt and
ctxt_in_hddn were False
"""
super().__init__()
# Check that the context is used somewhere
if ctxt_dim:
if not ctxt_in_hddn and not ctxt_in_inpt:
raise ValueError("Network has context inputs but nowhere to use them!")
# We store the input, hddn (list), output, and ctxt dims to query them later
self.inpt_dim = inpt_dim
if not isinstance(hddn_dim, int):
self.hddn_dim = hddn_dim
else:
self.hddn_dim = num_blocks * [hddn_dim]
self.outp_dim = outp_dim or inpt_dim if do_out else self.hddn_dim[-1]
self.num_blocks = len(self.hddn_dim)
self.ctxt_dim = ctxt_dim
self.do_out = do_out
# Necc for this module to work with the nflows package
self.hidden_features = self.hddn_dim[-1]
# Input MLP block
self.input_block = MLPBlock(
inpt_dim=self.inpt_dim,
outp_dim=self.hddn_dim[0],
ctxt_dim=self.ctxt_dim if ctxt_in_inpt else 0,
act=act_h,
nrm=nrm,
drp=drp,
)
# All hidden blocks as a single module list
self.hidden_blocks = []
if self.num_blocks > 1:
self.hidden_blocks = nn.ModuleList()
for h_1, h_2 in zip(self.hddn_dim[:-1], self.hddn_dim[1:]):
self.hidden_blocks.append(
MLPBlock(
inpt_dim=h_1,
outp_dim=h_2,
ctxt_dim=self.ctxt_dim if ctxt_in_hddn else 0,
n_layers=n_lyr_pbk,
act=act_h,
nrm=nrm,
drp=drp,
do_res=do_res,
)
)
# Output block (optional and there is no normalisation, dropout or context)
if do_out:
self.output_block = MLPBlock(
inpt_dim=self.hddn_dim[-1],
outp_dim=self.outp_dim,
act=act_o,
)
def forward(self, inputs: T.Tensor, ctxt: Optional[T.Tensor] = None) -> T.Tensor:
"""Pass through all layers of the dense network."""
# Reshape the context if it is available
if ctxt is not None:
dim_diff = inputs.dim() - ctxt.dim()
if dim_diff > 0:
ctxt = ctxt.view(ctxt.shape[0], *dim_diff * (1,), *ctxt.shape[1:])
ctxt = ctxt.expand(*inputs.shape[:-1], -1)
# Pass through the input block
inputs = self.input_block(inputs, ctxt)
# Pass through each hidden block
for h_block in self.hidden_blocks: # Context tensor will only be used if
inputs = h_block(inputs, ctxt) # block was initialised with a ctxt dim
# Pass through the output block
if self.do_out:
inputs = self.output_block(inputs)
return inputs
def __repr__(self):
string = ""
string += "\n (inp): " + repr(self.input_block) + "\n"
for i, h_block in enumerate(self.hidden_blocks):
string += f" (h-{i+1}): " + repr(h_block) + "\n"
if self.do_out:
string += " (out): " + repr(self.output_block)
return string
def one_line_string(self):
"""Return a one line string that sums up the network structure."""
string = str(self.inpt_dim)
if self.ctxt_dim:
string += f"({self.ctxt_dim})"
string += ">"
string += str(self.input_block.outp_dim) + ">"
if self.num_blocks > 1:
string += ">".join(
[
str(layer.out_features)
for hidden in self.hidden_blocks
for layer in hidden.block
if isinstance(layer, nn.Linear)
]
)
string += ">"
if self.do_out:
string += str(self.outp_dim)
return string
class IterativeNormLayer(nn.Module):
"""A basic normalisation layer so it can be part of the model.
Note! If a mask is provided in the forward pass, then this must be
the dimension to apply over the masked inputs! For example: Graph
nodes are usually batch x n_nodes x features so to normalise over
the features one would typically give extra_dims as (0,) But nodes
are always passed with the mask which flattens it to batch x
features. Batch dimension is done automatically, so we dont pass any
extra_dims!!!
"""
def __init__(
self,
inpt_dim: Union[T.Tensor, tuple, int],
means: Optional[T.Tensor] = None,
vars: Optional[T.Tensor] = None,
n: int = 0,
max_n: int = 5_00_000,
extra_dims: Union[tuple, int] = (),
) -> None:
"""Init method for Normalisatiion module.
Args:
inpt_dim: Shape of the input tensor, required for reloading
means: Calculated means for the mapping. Defaults to None.
vars: Calculated variances for the mapping. Defaults to None.
n: Number of samples used to make the mapping. Defaults to None.
max_n: Maximum number of iterations before the means and vars are frozen
extra_dims: The extra dimension(s) over which to calculate the stats
Will always calculate over the batch dimension
"""
super().__init__()
# Fail if only one of means or vars is provided
if (means is None) ^ (vars is None): # XOR
raise ValueError(
"""Only one of 'means' and 'vars' is defined. Either both or
neither must be defined"""
)
# Allow interger inpt_dim and n arguments
if isinstance(inpt_dim, int):
inpt_dim = (inpt_dim,)
if isinstance(n, int):
n = T.tensor(n)
# The dimensions over which to apply the normalisation, make positive!
if isinstance(extra_dims, int): # Ensure it is a list
extra_dims = [extra_dims]
else:
extra_dims = list(extra_dims)
if any([abs(e) > len(inpt_dim) for e in extra_dims]): # Check size
raise ValueError("extra_dims argument lists dimensions outside input range")
for d in range(len(extra_dims)):
if extra_dims[d] < 0: # make positive
extra_dims[d] = len(inpt_dim) + extra_dims[d]
extra_dims[d] += 1 # Add one because we are inserting a batch dimension
self.extra_dims = extra_dims
# Calculate the input and output shapes
self.max_n = max_n
self.inpt_dim = list(inpt_dim)
self.stat_dim = [1] + list(inpt_dim) # Add batch dimension
for d in range(len(self.stat_dim)):
if d in self.extra_dims:
self.stat_dim[d] = 1
# Buffers arenneeded for saving/loading the layer
self.register_buffer(
"means", T.zeros(self.stat_dim) if means is None else means
)
self.register_buffer("vars", T.ones(self.stat_dim) if vars is None else vars)
self.register_buffer("n", n)
# For the welford algorithm it is useful to have another variable m2
self.register_buffer("m2", T.ones(self.stat_dim) if vars is None else vars)
# If the means are set here then the model is "frozen" and not updated
self.frozen = means is not None
def _mask(self, inpt: T.Tensor, mask: Optional[T.BoolTensor] = None) -> T.Tensor:
if mask is None:
return inpt
return inpt[mask]
def _check_attributes(self) -> None:
if self.means is None or self.vars is None:
raise ValueError(
"Stats for have not been initialised or fit() has not been run!"
)
def fit(
self, inpt: T.Tensor, mask: Optional[T.BoolTensor] = None, freeze: bool = True
) -> None:
"""Set the stats given a population of data."""
inpt = self._mask(inpt, mask)
self.vars, self.means = T.var_mean(
inpt, dim=(0, *self.extra_dims), keepdim=True
)
self.n = T.tensor(len(inpt), device=self.means.device)
self.m2 = self.vars * self.n
self.frozen = freeze
def forward(self, inpt: T.Tensor, mask: Optional[T.BoolTensor] = None) -> T.Tensor:
"""Applies the standardisation to a batch of inputs, also uses the
inputs to update the running stats if in training mode."""
with T.no_grad():
sel_inpt = self._mask(inpt, mask)
if not self.frozen and self.training:
self.update(sel_inpt)
# Apply the mapping
normed_inpt = (sel_inpt - self.means) / (self.vars.sqrt() + 1e-8)
# Undo the masking
if mask is not None:
inpt = inpt.clone() # prevents inplace operation, bad for autograd
inpt[mask] = normed_inpt
return inpt
return normed_inpt
def reverse(self, inpt: T.Tensor, mask: Optional[T.BoolTensor] = None) -> T.Tensor:
"""Unnormalises the inputs given the recorded stats."""
sel_inpt = self._mask(inpt, mask)
unnormed_inpt = sel_inpt * self.vars.sqrt() + self.means
# Undo the masking
if mask is not None:
inpt = inpt.clone() # prevents inplace operation, bad for autograd
inpt[mask] = unnormed_inpt
return inpt
return unnormed_inpt
def update(self, inpt: T.Tensor, mask: Optional[T.BoolTensor] = None) -> None:
"""Update the running stats using a batch of data."""
inpt = self._mask(inpt, mask)
# For first iteration
if self.n == 0:
self.fit(inpt, freeze=False)
return
# later iterations based on batched welford algorithm
with T.no_grad():
self.n += len(inpt)
delta = inpt - self.means
self.means += (delta / self.n).mean(
dim=(0, *self.extra_dims), keepdim=True
) * len(inpt)
delta2 = inpt - self.means
self.m2 += (delta * delta2).mean(
dim=(0, *self.extra_dims), keepdim=True
) * len(inpt)
self.vars = self.m2 / self.n
# Freeze the model if we exceed the requested stats
self.frozen = self.n >= self.max_n
class CosineEncoding:
def __init__(
self,
outp_dim: int = 32,
min_value: float = 0.0,
max_value: float = 1.0,
frequency_scaling: str = "exponential",
) -> None:
self.outp_dim = outp_dim
self.min_value = min_value
self.max_value = max_value
self.frequency_scaling = frequency_scaling
def __call__(self, inpt: T.Tensor) -> T.Tensor:
return cosine_encoding(
inpt, self.outp_dim, self.min_value, self.max_value, self.frequency_scaling
)
def cosine_encoding(
x: T.Tensor,
outp_dim: int = 32,
min_value: float = 0.0,
max_value: float = 1.0,
frequency_scaling: str = "exponential",
) -> T.Tensor:
"""Computes a positional cosine encodings with an increasing series of
frequencies.
The frequencies either increase linearly or exponentially (default).
The latter is good for when max_value is large and extremely high sensitivity to the
input is required.
If inputs greater than the max value are provided, the outputs become degenerate.
If inputs smaller than the min value are provided, the inputs the the cosine will
be both positive and negative, which may lead degenerate outputs.
Always make sure that the min and max bounds are not exceeded!
Args:
x: The input, the final dimension is encoded. If 1D then it will be unqueezed
out_dim: The dimension of the output encoding
min_value: Added to x (and max) as cosine embedding works with positive inputs
max_value: The maximum expected value, sets the scale of the lowest frequency
frequency_scaling: Either 'linear' or 'exponential'
Returns:
The cosine embeddings of the input using (out_dim) many frequencies
"""
# Unsqueeze if final dimension is flat
if x.shape[-1] != 1 or x.dim() == 1:
x = x.unsqueeze(-1)
# Check the the bounds are obeyed
if T.any(x > max_value):
print("Warning! Passing values to cosine_encoding encoding that exceed max!")
if T.any(x < min_value):
print("Warning! Passing values to cosine_encoding encoding below min!")
# Calculate the various frequencies
if frequency_scaling == "exponential":
freqs = T.arange(outp_dim, device=x.device).exp()
elif frequency_scaling == "linear":
freqs = T.arange(1, outp_dim + 1, device=x.device)
else:
raise RuntimeError(f"Unrecognised frequency scaling: {frequency_scaling}")
return T.cos((x + min_value) * freqs * math.pi / (max_value + min_value))
| 20,518 | 35.575758 | 90 | py |
PC-JeDi | PC-JeDi-main/src/models/pc_jedi.py | import copy
from functools import partial
from typing import Mapping, Optional, Tuple
import numpy as np
import pytorch_lightning as pl
import torch as T
import wandb
from jetnet.evaluation import w1efp, w1m, w1p
from src.models.diffusion import VPDiffusionSchedule, run_sampler
from src.models.modules import CosineEncoding, IterativeNormLayer
from src.models.schedulers import WarmupToConstant
from src.models.transformers import FullTransformerEncoder
from src.numpy_utils import undo_log_squash
from src.plotting import plot_mpgan_marginals
from src.torch_utils import get_loss_fn, to_np
class TransformerDiffusionGenerator(pl.LightningModule):
"""A generative model which uses the diffusion process on a point cloud."""
def __init__(
self,
*,
pc_dim: list,
ctxt_dim: int,
n_nodes: int,
cosine_config: Mapping,
diff_config: Mapping,
normaliser_config: Mapping,
trans_enc_config: Mapping,
optimizer: partial,
loss_name: str = "mse",
mle_loss_weight: float = 0.0,
ema_sync: float = 0.999,
sampler_name: str = "em",
sampler_steps: int = 100,
) -> None:
"""
Args:
pc_dim: The dimension of the point cloud
ctxt_dim: The size of the context vector for the point cloud
n_nodes: Max number of nodes used to train this model
cosine_config: For defining the cosine embedding arguments
normaliser_config: For defining the iterative normalisation layer
diff_shedule: The diffusion scheduler, defines the signal and noise rates
trans_enc_config: Keyword arguments for the TransformerEncoder network
optimizer: Partially initialised optimizer
sched_config: The config for how to apply the scheduler
ema_sync: How fast the ema network syncs with the given one
loss_name: Name of the loss function to use for noise estimation
mle_loss_weight: Relative weight of the Maximum-Liklihood loss term
sampler_name: Name of O/SDE solver, does not effect training.
sampler_steps: Steps used in generation, does not effect training.
"""
super().__init__()
self.save_hyperparameters(logger=False)
# Class attributes
self.pc_dim = pc_dim
self.ctxt_dim = ctxt_dim
self.n_nodes = n_nodes
self.loss_fn = get_loss_fn(loss_name)
self.mle_loss_weight = mle_loss_weight
self.ema_sync = ema_sync
# The encoder and scheduler needed for diffusion
self.diff_sched = VPDiffusionSchedule(**diff_config)
self.time_encoder = CosineEncoding(**cosine_config)
# The layer which normalises the input point cloud data
self.normaliser = IterativeNormLayer((pc_dim,), **normaliser_config)
if self.ctxt_dim:
self.ctxt_normaliser = IterativeNormLayer((ctxt_dim,), **normaliser_config)
# The denoising transformer
self.net = FullTransformerEncoder(
inpt_dim=pc_dim,
outp_dim=pc_dim,
ctxt_dim=ctxt_dim + self.time_encoder.outp_dim,
**trans_enc_config,
)
# A copy of the network which will sync with an exponential moving average
self.ema_net = copy.deepcopy(self.net)
# Sampler to run in the validation/testing loop
self.sampler_name = sampler_name
self.sampler_steps = sampler_steps
# Record of the outputs of the validation step
self.val_outs = []
def forward(
self,
noisy_data: T.Tensor,
diffusion_times: T.Tensor,
mask: T.BoolTensor,
ctxt: Optional[T.Tensor] = None,
) -> T.Tensor:
"""Pass through the model and get an estimate of the noise added to the
input."""
# Use the appropriate network for training or validation
if self.training:
network = self.net
else:
network = self.ema_net
# Encode the times and combine with existing context info
context = self.time_encoder(diffusion_times)
if self.ctxt_dim:
context = T.cat([context, ctxt], dim=-1)
# Use the selected network to esitmate the noise present in the data
return network(noisy_data, mask=mask, ctxt=context)
def _shared_step(self, sample: tuple) -> Tuple[T.Tensor, T.Tensor]:
"""Shared step used in both training and validaiton."""
# Unpack the sample tuple
nodes, mask, ctxt = sample
# Pass through the normalisers
nodes = self.normaliser(nodes, mask)
if self.ctxt_dim:
ctxt = self.ctxt_normaliser(ctxt)
# Sample from the gaussian latent space to perturb the point clouds
noises = T.randn_like(nodes) * mask.unsqueeze(-1)
# Sample uniform random diffusion times and get the rates
diffusion_times = T.rand(size=(len(nodes), 1), device=self.device)
signal_rates, noise_rates = self.diff_sched(diffusion_times.view(-1, 1, 1))
# Mix the signal and noise according to the diffusion equation
noisy_nodes = signal_rates * nodes + noise_rates * noises
# Predict the noise using the network
pred_noises = self.forward(noisy_nodes, diffusion_times, mask, ctxt)
# Simple noise loss is for "perceptual quality"
simple_loss = self.loss_fn(noises[mask], pred_noises[mask])
# MLE loss is for maximum liklihood training
if self.mle_loss_weight:
betas = self.diff_sched.get_betas(diffusion_times.view(-1, 1, 1))
mle_weights = betas / noise_rates
mle_loss = mle_weights * simple_loss
else:
mle_loss = T.zeros_like(simple_loss)
return simple_loss.mean(), mle_loss.mean()
def training_step(self, sample: tuple, _batch_idx: int) -> T.Tensor:
simple_loss, mle_loss = self._shared_step(sample)
total_loss = simple_loss + self.mle_loss_weight * mle_loss
self.log("train/simple_loss", simple_loss)
self.log("train/mle_loss", mle_loss)
self.log("train/total_loss", total_loss)
self._sync_ema_network()
return total_loss
def validation_step(self, sample: tuple, batch_idx: int) -> None:
simple_loss, mle_loss = self._shared_step(sample)
total_loss = simple_loss + self.mle_loss_weight * mle_loss
self.log("valid/simple_loss", simple_loss)
self.log("valid/mle_loss", mle_loss)
self.log("valid/total_loss", total_loss)
# Run the full generation of the sample during a validation step
outputs = self.full_generation(
self.sampler_name,
self.sampler_steps,
mask=sample[1],
ctxt=sample[2],
)
# Add to the collection of the validaiton outputs
self.val_outs.append((to_np(outputs), to_np(sample)))
def on_validation_epoch_end(self) -> None:
"""At the end of the validation epoch, calculate and log the metrics
and plot the histograms.
This function right now only works with MPGAN configs
"""
# Combine all outputs
gen_nodes = np.vstack([v[0] for v in self.val_outs])
real_nodes = np.vstack([v[1][0] for v in self.val_outs])
mask = np.vstack([v[1][1] for v in self.val_outs])
high = np.vstack([v[1][2] for v in self.val_outs])
# Change the data from log(pt+1) into pt fraction (needed for metrics)
if self.trainer.datamodule.hparams.data_conf.log_squash_pt:
gen_nodes[..., -1] = undo_log_squash(gen_nodes[..., -1]) / high[..., 0:1]
real_nodes[..., -1] = undo_log_squash(real_nodes[..., -1]) / high[..., 0:1]
# Apply clipping
gen_nodes = np.nan_to_num(gen_nodes)
gen_nodes[..., 0] = np.clip(gen_nodes[..., 0], -0.5, 0.5)
gen_nodes[..., 1] = np.clip(gen_nodes[..., 1], -0.5, 0.5)
gen_nodes[..., 2] = np.clip(gen_nodes[..., 2], 0, 1)
real_nodes = np.nan_to_num(real_nodes)
real_nodes[..., 0] = np.clip(real_nodes[..., 0], -0.5, 0.5)
real_nodes[..., 1] = np.clip(real_nodes[..., 1], -0.5, 0.5)
real_nodes[..., 2] = np.clip(real_nodes[..., 2], 0, 1)
# Calculate and log the Wasserstein discriminants
bootstrap = {
"num_eval_samples": 10000,
"num_batches": 10,
}
w1m_val, w1m_err = w1m(real_nodes, gen_nodes, **bootstrap)
w1p_val, w1p_err = w1p(real_nodes, gen_nodes, **bootstrap)
w1efp_val, w1efp_err = w1efp(real_nodes, gen_nodes, efp_jobs=1, **bootstrap)
self.log("valid/w1m", w1m_val)
self.log("valid/w1m_err", w1m_err)
self.log("valid/w1p", w1p_val.mean())
self.log("valid/w1p_err", w1p_err.mean())
self.log("valid/w1efp", w1efp_val.mean())
self.log("valid/w1efp_err", w1efp_err.mean())
# Plot the MPGAN-like marginals
plot_mpgan_marginals(gen_nodes, real_nodes, mask, self.trainer.current_epoch)
self.val_outs.clear()
def _sync_ema_network(self) -> None:
"""Updates the Exponential Moving Average Network."""
with T.no_grad():
for params, ema_params in zip(
self.net.parameters(), self.ema_net.parameters()
):
ema_params.data.copy_(
self.ema_sync * ema_params.data
+ (1.0 - self.ema_sync) * params.data
)
def on_fit_start(self, *_args) -> None:
"""Function to run at the start of training."""
# Define the metrics for wandb (otherwise the min wont be stored!)
if wandb.run is not None:
wandb.define_metric("train/simple_loss", summary="min")
wandb.define_metric("train/mle_loss", summary="min")
wandb.define_metric("train/total_loss", summary="min")
wandb.define_metric("valid/simple_loss", summary="min")
wandb.define_metric("valid/mle_loss", summary="min")
wandb.define_metric("valid/total_loss", summary="min")
wandb.define_metric("valid/w1m", summary="min")
wandb.define_metric("valid/w1p", summary="min")
wandb.define_metric("valid/w1efp", summary="min")
def set_sampler(
self, sampler_name: Optional[str] = None, sampler_steps: Optional[int] = None
) -> None:
"""Replaces the sampler list with a new one."""
if sampler_name is not None:
self.sampler_name = sampler_name
if sampler_steps is not None:
self.sampler_steps = sampler_steps
def full_generation(
self,
sampler: str,
steps: int,
mask: Optional[T.BoolTensor] = None,
ctxt: Optional[T.Tensor] = None,
initial_noise: Optional[T.Tensor] = None,
) -> T.Tensor:
"""Fully generate a batch of data from noise, given context information
and a mask."""
# Either a mask or initial noise must be defined or we dont know how
# many samples to generate and with what cardinality
if mask is None and initial_noise is None:
raise ValueError("Please provide either a mask or noise to generate from")
if mask is None:
mask = T.full(initial_noise.shape[:-1], True, device=self.device)
if initial_noise is None:
initial_noise = T.randn((*mask.shape, self.pc_dim), device=self.device)
# Normalise the context
if self.ctxt_dim:
ctxt = self.ctxt_normaliser(ctxt)
assert len(ctxt) == len(initial_noise)
# Run the sampling method
outputs, _ = run_sampler(
sampler,
self,
self.diff_sched,
initial_noise=initial_noise * mask.unsqueeze(-1),
n_steps=steps,
mask=mask,
ctxt=ctxt,
clip_predictions=(-25, 25),
)
# Ensure that the output adheres to the mask
outputs[~mask] = 0
# Return the normalisation of the generated point cloud
return self.normaliser.reverse(outputs, mask=mask)
def configure_optimizers(self) -> dict:
"""Configure the optimisers and learning rate sheduler for this
model."""
# Finish initialising the optimiser and create the scheduler
opt = self.hparams.optimizer(params=self.parameters())
sched = WarmupToConstant(opt, num_steps=10_000)
# Return the dict for the lightning trainer
return {
"optimizer": opt,
"lr_scheduler": {
"scheduler": sched,
"interval": "step",
"frequency": 1,
},
}
| 12,805 | 38.403077 | 87 | py |
PC-JeDi | PC-JeDi-main/scripts/train.py | import pyrootutils
root = pyrootutils.setup_root(search_from=__file__, pythonpath=True)
import logging
import hydra
import pytorch_lightning as pl
from omegaconf import DictConfig
from src.hydra_utils import (
instantiate_collection,
log_hyperparameters,
print_config,
reload_original_config,
save_config,
)
log = logging.getLogger(__name__)
@hydra.main(
version_base=None, config_path=str(root / "configs"), config_name="train.yaml"
)
def main(cfg: DictConfig) -> None:
log.info("Setting up full job config")
if cfg.full_resume:
cfg = reload_original_config(cfg)
print_config(cfg)
if cfg.seed:
log.info(f"Setting seed to: {cfg.seed}")
pl.seed_everything(cfg.seed, workers=True)
log.info("Instantiating the data module")
datamodule = hydra.utils.instantiate(cfg.datamodule)
log.info("Instantiating the model")
model = hydra.utils.instantiate(
cfg.model,
pc_dim=datamodule.dim,
n_nodes=datamodule.n_nodes,
ctxt_dim=datamodule.ctxt_dim,
)
log.info(model)
log.info("Instantiating all callbacks")
callbacks = instantiate_collection(cfg.callbacks)
log.info("Instantiating the loggers")
loggers = instantiate_collection(cfg.loggers)
log.info("Instantiating the trainer")
trainer = hydra.utils.instantiate(cfg.trainer, callbacks=callbacks, logger=loggers)
if loggers:
log.info("Logging all hyperparameters")
log_hyperparameters(cfg, model, trainer)
log.info("Saving config so job can be resumed")
save_config(cfg)
log.info("Starting training!")
trainer.fit(model, datamodule, ckpt_path=cfg.ckpt_path)
if __name__ == "__main__":
main()
| 1,731 | 23.742857 | 87 | py |
trees_from_transformers | trees_from_transformers-master/run.py | import argparse
import datetime
import logging
import os
import pickle
from tqdm import tqdm
import torch
from transformers import *
from data.dataset import Dataset
from utils.measure import Measure
from utils.parser import not_coo_parser, parser
from utils.tools import set_seed, select_indices, group_indices
from utils.yk import get_actions, get_nonbinary_spans
MODELS = [(BertModel, BertTokenizer, BertConfig, 'bert-base-cased'),
(BertModel, BertTokenizer, BertConfig, 'bert-large-cased'),
(GPT2Model, GPT2Tokenizer, GPT2Config, 'gpt2'),
(GPT2Model, GPT2Tokenizer, GPT2Config, 'gpt2-medium'),
(RobertaModel, RobertaTokenizer, RobertaConfig, 'roberta-base'),
(RobertaModel, RobertaTokenizer, RobertaConfig, 'roberta-large'),
(XLNetModel, XLNetTokenizer, XLNetConfig, 'xlnet-base-cased'),
(XLNetModel, XLNetTokenizer, XLNetConfig, 'xlnet-large-cased')]
def evaluate(args):
scores = dict()
for model_class, tokenizer_class, model_config, pretrained_weights in MODELS:
tokenizer = tokenizer_class.from_pretrained(
pretrained_weights, cache_dir=args.lm_cache_path)
if args.from_scratch:
config = model_config.from_pretrained(pretrained_weights)
config.output_hidden_states = True
config.output_attentions = True
model = model_class(config).to(args.device)
else:
model = model_class.from_pretrained(
pretrained_weights,
cache_dir=args.lm_cache_path,
output_hidden_states=True,
output_attentions=True).to(args.device)
with torch.no_grad():
test_sent = tokenizer.encode('test', add_special_tokens=False)
token_ids = torch.tensor([test_sent]).to(args.device)
all_hidden, all_att = model(token_ids)[-2:]
n_layers = len(all_att)
n_att = all_att[0].size(1)
n_hidden = all_hidden[0].size(-1)
measure = Measure(n_layers, n_att)
data = Dataset(path=args.data_path, tokenizer=tokenizer)
for idx, s in tqdm(enumerate(data.sents), total=len(data.sents),
desc=pretrained_weights, ncols=70):
raw_tokens = data.raw_tokens[idx]
tokens = data.tokens[idx]
if len(raw_tokens) < 2:
data.cnt -= 1
continue
token_ids = tokenizer.encode(s, add_special_tokens=False)
token_ids_tensor = torch.tensor([token_ids]).to(args.device)
with torch.no_grad():
all_hidden, all_att = model(token_ids_tensor)[-2:]
all_hidden, all_att = list(all_hidden[1:]), list(all_att)
# (n_layers, seq_len, hidden_dim)
all_hidden = torch.cat([all_hidden[n] for n in range(n_layers)], dim=0)
# (n_layers, n_att, seq_len, seq_len)
all_att = torch.cat([all_att[n] for n in range(n_layers)], dim=0)
if len(tokens) > len(raw_tokens):
th = args.token_heuristic
if th == 'first' or th == 'last':
mask = select_indices(tokens, raw_tokens, pretrained_weights, th)
assert len(mask) == len(raw_tokens)
all_hidden = all_hidden[:, mask]
all_att = all_att[:, :, mask, :]
all_att = all_att[:, :, :, mask]
else:
# mask = torch.tensor(data.masks[idx])
mask = group_indices(tokens, raw_tokens, pretrained_weights)
raw_seq_len = len(raw_tokens)
all_hidden = torch.stack(
[all_hidden[:, mask == i].mean(dim=1)
for i in range(raw_seq_len)], dim=1)
all_att = torch.stack(
[all_att[:, :, :, mask == i].sum(dim=3)
for i in range(raw_seq_len)], dim=3)
all_att = torch.stack(
[all_att[:, :, mask == i].mean(dim=2)
for i in range(raw_seq_len)], dim=2)
l_hidden, r_hidden = all_hidden[:, :-1], all_hidden[:, 1:]
l_att, r_att = all_att[:, :, :-1], all_att[:, :, 1:]
syn_dists = measure.derive_dists(l_hidden, r_hidden, l_att, r_att)
gold_spans = data.gold_spans[idx]
gold_tags = data.gold_tags[idx]
assert len(gold_spans) == len(gold_tags)
for m, d in syn_dists.items():
pred_spans = []
for i in range(measure.scores[m].n):
dist = syn_dists[m][i].tolist()
if len(dist) > 1:
bias_base = (sum(dist) / len(dist)) * args.bias
bias = [bias_base * (1 - (1 / (len(dist) - 1)) * x)
for x in range(len(dist))]
dist = [dist[i] + bias[i] for i in range(len(dist))]
if args.use_not_coo_parser:
pred_tree = not_coo_parser(dist, raw_tokens)
else:
pred_tree = parser(dist, raw_tokens)
ps = get_nonbinary_spans(get_actions(pred_tree))[0]
pred_spans.append(ps)
measure.scores[m].update(pred_spans, gold_spans, gold_tags)
measure.derive_final_score()
scores[pretrained_weights] = measure.scores
if not os.path.exists(args.result_path):
os.makedirs(args.result_path)
with open(f'{args.result_path}/{pretrained_weights}.txt', 'w') as f:
print('Model name:', pretrained_weights, file=f)
print('Experiment time:', args.time, file=f)
print('# of layers:', n_layers, file=f)
print('# of attentions:', n_att, file=f)
print('# of hidden dimensions:', n_hidden, file=f)
print('# of processed sents:', data.cnt, file=f)
max_corpus_f1, max_sent_f1 = 0, 0
for n in range(n_layers):
print(f'[Layer {n + 1}]', file=f)
print('-' * (119 + measure.max_m_len), file=f)
for m, s in measure.scores.items():
if m in measure.h_measures + measure.a_avg_measures:
print(
f'| {m.upper()} {" " * (measure.max_m_len - len(m))} '
f'| Corpus F1: {s.corpus_f1[n] * 100:.2f} '
f'| Sent F1: {s.sent_f1[n] * 100:.2f} ',
end='', file=f)
for z in range(len(s.label_recalls[0])):
print(
f'| {s.labels[z]}: '
f'{s.label_recalls[n][z] * 100:.2f} ',
end='', file=f)
print('|', file=f)
if s.sent_f1[n] > max_sent_f1:
max_corpus_f1 = s.corpus_f1[n]
max_sent_f1 = s.sent_f1[n]
max_measure = m
max_layer = n + 1
else:
for i in range(n_att):
m_att = str(i) if i > 9 else '0' + str(i)
m_att = m + m_att + " " * (
measure.max_m_len - len(m))
i_att = n_att * n + i
print(
f'| {m_att.upper()}'
f'| Corpus F1: {s.corpus_f1[i_att] * 100:.2f} '
f'| Sent F1: {s.sent_f1[i_att] * 100:.2f} ',
end='', file=f)
for z in range(len(s.label_recalls[0])):
print(f'| {s.labels[z]}: '
f'{s.label_recalls[i_att][z] * 100:.2f} ',
end='', file=f)
print('|', file=f)
if s.sent_f1[i_att] > max_sent_f1:
max_corpus_f1 = s.corpus_f1[i_att]
max_sent_f1 = s.sent_f1[i_att]
max_measure = m_att
max_layer = n + 1
print('-' * (119 + measure.max_m_len), file=f)
print(f'[MAX]: | Layer: {max_layer} '
f'| {max_measure.upper()} '
f'| Corpus F1: {max_corpus_f1 * 100:.2f} '
f'| Sent F1: {max_sent_f1 * 100:.2f} |')
print(f'[MAX]: | Layer: {max_layer} '
f'| {max_measure.upper()} '
f'| Corpus F1: {max_corpus_f1 * 100:.2f} '
f'| Sent F1: {max_sent_f1 * 100:.2f} |', file=f)
return scores
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data-path',
default='.data/PTB/ptb-test.txt', type=str)
parser.add_argument('--result-path', default='outputs', type=str)
parser.add_argument('--lm-cache-path',
default='/data/transformers', type=str)
parser.add_argument('--from-scratch', default=False, action='store_true')
parser.add_argument('--gpu', default=0, type=int)
parser.add_argument('--bias', default=0.0, type=float,
help='the right-branching bias hyperparameter lambda')
parser.add_argument('--seed', default=1234, type=int)
parser.add_argument('--token-heuristic', default='mean', type=str,
help='Available options: mean, first, last')
parser.add_argument('--use-not-coo-parser', default=False,
action='store_true',
help='Turning on this option will allow you to exploit '
'the NOT-COO parser (named by Dyer et al. 2019), '
'which has been broadly adopted by recent methods '
'for unsupervised parsing. As this parser utilizes'
' the right-branching bias in its inner workings, '
'it may give rise to some unexpected gains or '
'latent issues for the resulting trees. For more '
'details, see https://arxiv.org/abs/1909.09428.')
args = parser.parse_args()
setattr(args, 'device', f'cuda:{args.gpu}'
if torch.cuda.is_available() and args.gpu >= 0 else 'cpu')
setattr(args, 'time', datetime.datetime.now().strftime('%Y%m%d-%H:%M:%S'))
dataset_name = args.data_path.split('/')[-1].split('.')[0]
parser = '-w-not-coo-parser' if args.use_not_coo_parser else ''
pretrained = 'scratch' if args.from_scratch else 'pretrained'
result_path = f'{args.result_path}/{dataset_name}-{args.token_heuristic}'
result_path += f'-{pretrained}-{args.bias}{parser}'
setattr(args, 'result_path', result_path)
set_seed(args.seed)
logging.disable(logging.WARNING)
print('[List of arguments]')
for a in args.__dict__:
print(f'{a}: {args.__dict__[a]}')
scores = evaluate(args)
with open(f'{args.result_path}/scores.pickle', 'wb') as f:
pickle.dump(scores, f)
if __name__ == '__main__':
main()
| 11,441 | 45.893443 | 85 | py |
trees_from_transformers | trees_from_transformers-master/utils/score.py | import numpy as np
import torch
from utils.yk import get_stats
class Score(object):
def __init__(self, n):
self.corpus_f1 = torch.zeros(n, 3, dtype=torch.float)
self.sent_f1 = torch.zeros(n, dtype=torch.float)
self.n = n
self.cnt = 0
self.labels = ['SBAR', 'NP', 'VP', 'PP', 'ADJP', 'ADVP']
self.label_recalls = np.zeros((n, 6), dtype=float)
self.label_cnts = np.zeros(6, dtype=float)
def update(self, pred_spans, gold_spans, gold_tags):
pred_sets = [set(ps[:-1]) for ps in pred_spans]
gold_set = set(gold_spans[:-1])
self.update_corpus_f1(pred_sets, gold_set)
self.update_sentence_f1(pred_sets, gold_set)
self.update_label_recalls(pred_spans, gold_spans, gold_tags)
self.cnt += 1
def update_label_recalls(self, pred, gold, tags):
for i, tag in enumerate(tags):
if tag not in self.labels:
continue
tag_idx = self.labels.index(tag)
self.label_cnts[tag_idx] += 1
for z in range(len(pred)):
if gold[i] in pred[z]:
self.label_recalls[z][tag_idx] += 1
def update_corpus_f1(self, pred, gold):
stats = torch.tensor([get_stats(pred[i], gold) for i in range(self.n)],
dtype=torch.float)
self.corpus_f1 += stats
def update_sentence_f1(self, pred, gold):
# sent-level F1 is based on L83-89 from
# https://github.com/yikangshen/PRPN/test_phrase_grammar.py
for i in range(self.n):
model_out, std_out = pred[i], gold
overlap = model_out.intersection(std_out)
prec = float(len(overlap)) / (len(model_out) + 1e-8)
reca = float(len(overlap)) / (len(std_out) + 1e-8)
if len(std_out) == 0:
reca = 1.
if len(model_out) == 0:
prec = 1.
f1 = 2 * prec * reca / (prec + reca + 1e-8)
self.sent_f1[i] += f1
def derive_final_score(self):
tp = self.corpus_f1[:, 0]
fp = self.corpus_f1[:, 1]
fn = self.corpus_f1[:, 2]
prec = tp / (tp + fp)
recall = tp / (tp + fn)
epsilon = 1e-8
self.corpus_f1 = 2 * prec * recall / (prec + recall + epsilon)
self.sent_f1 /= self.cnt
for i in range(len(self.label_recalls)):
for j in range(len(self.label_recalls[0])):
self.label_recalls[i][j] /= self.label_cnts[j]
| 2,521 | 35.550725 | 79 | py |
trees_from_transformers | trees_from_transformers-master/utils/tools.py | import logging
import random
import torch
specials = {'bert': '#', 'gpt2': 'Ġ', 'xlnet': '▁', 'roberta': 'Ġ'}
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
def select_indices(tokens, raw_tokens, model, mode):
mask = []
raw_i = 0
collapsed = ''
model = model.split('-')[0]
special = specials[model]
for i in range(len(tokens)):
token = tokens[i]
while len(token) > 0 and token[0] == special:
token = token[1:]
if collapsed == '' and len(token) > 0:
start_idx = i
collapsed += token
if collapsed == raw_tokens[raw_i]:
if mode == 'first':
mask.append(start_idx)
elif mode == 'last':
mask.append(i)
else:
raise NotImplementedError
raw_i += 1
collapsed = ''
if raw_i != len(raw_tokens):
raise Exception(f'Token mismatch: \n{tokens}\n{raw_tokens}')
return mask
def group_indices(tokens, raw_tokens, model):
mask = []
raw_i = 0
collapsed = ''
model = model.split('-')[0]
special = specials[model]
for i in range(len(tokens)):
token = tokens[i]
while len(token) > 0 and token[0] == special:
token = token[1:]
collapsed += token
mask.append(raw_i)
if collapsed == raw_tokens[raw_i]:
raw_i += 1
collapsed = ''
if raw_i != len(raw_tokens):
raise Exception(f'Token mismatch: \n{tokens}\n{raw_tokens}')
return torch.tensor(mask)
| 1,612 | 24.603175 | 68 | py |
trees_from_transformers | trees_from_transformers-master/utils/extractor.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class Extractor(nn.Module):
def __init__(self, n_hidden):
super(Extractor, self).__init__()
self.linear = nn.Linear(n_hidden * 2, 1)
nn.init.uniform_(self.linear.weight, -0.01, 0.01)
nn.init.uniform_(self.linear.bias, 0)
def forward(self, l, r):
h = torch.cat([l, r], dim=-1)
o = self.linear(h)
# (seq_len-1)
return o.squeeze(-1)
def loss(self, d, gold):
assert len(d) == len(gold)
gold = d.new_tensor(gold)
l = 0
for i in range(len(d)):
for j in range(i+1, len(d)):
l += F.relu(1 - torch.sign(gold[i]- gold[j]) * (d[i] - d[j]))
return l
| 752 | 27.961538 | 77 | py |
trees_from_transformers | trees_from_transformers-master/utils/measure.py | import math
import torch
import torch.nn.functional as F
from utils.score import Score
class Measure(object):
def __init__(self, n_layers, n_att):
self.h_measures = ['cos', 'l1', 'l2']
self.a_measures = ['hellinger', 'jsd']
self.a_avg_measures = ['avg_hellinger', 'avg_jsd']
self.measures = self.h_measures + self.a_measures + self.a_avg_measures
self.max_m_len = max([len(m) for m in self.measures]) + 2
self.scores = {m: Score(n_layers) for m in self.h_measures}
for m in self.a_measures:
self.scores[m] = Score(n_layers * n_att)
for m in self.a_avg_measures:
self.scores[m] = Score(n_layers)
def derive_dists(self, l_hidden, r_hidden, l_att, r_att):
syn_dists = {}
for m in self.h_measures:
syn_dists[m] = getattr(self, m)(l_hidden, r_hidden)
for m in self.a_measures:
syn_dists[m] = getattr(self, m)(l_att, r_att)
syn_dists[m] = syn_dists[m].view(-1, syn_dists[m].size(-1))
for m in self.a_avg_measures:
syn_dists[m] = getattr(self, m)(l_att, r_att)
return syn_dists
def derive_final_score(self):
for m in self.scores.keys():
self.scores[m].derive_final_score()
@staticmethod
def cos(l_hidden, r_hidden):
# (n_layers, seq_len-1, hidden_dim) * 2 -> (n_layers, seq_len-1)
return (F.cosine_similarity(l_hidden, r_hidden, dim=-1) + 1) / 2
@staticmethod
def l1(l_hidden, r_hidden):
# (n_layers, seq_len-1, hidden_dim) * 2 -> (n_layers, seq_len-1)
return torch.norm(l_hidden - r_hidden, p=1, dim=-1)
@staticmethod
def l2(l_hidden, r_hidden):
# (n_layers, seq_len-1, hidden_dim) * 2 -> (n_layers, seq_len-1)
return torch.norm(l_hidden - r_hidden, p=2, dim=-1)
@staticmethod
def kl(p, q):
eps = 1e-30
p, q = p + eps, q + eps
p, q = p / p.sum(dim=-1, keepdim=True), q / q.sum(dim=-1, keepdim=True)
kl = F.kl_div(torch.log(q), p, reduction='none').sum(dim=-1)
# kl = (p * (torch.log(p) - torch.log(q))).sum(dim=-1)
# To deal with the numerical instability of the KL-div function in PyTorch
if (kl < 0).sum() > 0:
kl = kl * (1 - (kl < 0).float())
assert torch.isinf(kl).sum() == 0
assert torch.isnan(kl).sum() == 0
return kl
@staticmethod
def jsd(l_att, r_att):
m = (l_att + r_att) / 2
l_kl = Measure.kl(l_att, m)
r_kl = Measure.kl(r_att, m)
d = torch.sqrt((l_kl + r_kl) / 2)
assert (d < 0).sum() == 0
assert torch.isnan(d).sum() == 0
return d
@staticmethod
def hellinger(l_att, r_att):
d = (((l_att.sqrt() - r_att.sqrt()) ** 2).sum(dim=-1)).sqrt()
d /= math.sqrt(2)
return d
@staticmethod
def avg_hellinger(l_att, r_att):
d = Measure.hellinger(l_att, r_att)
return d.mean(dim=1)
@staticmethod
def avg_jsd(l_att, r_att):
d = Measure.jsd(l_att, r_att)
return d.mean(dim=1) | 3,102 | 33.477778 | 82 | py |
pi-peps | pi-peps-master/docs/source/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'pi-peps'
copyright = '2019, Juraj Hasik, Alberto Sartori'
author = 'Juraj Hasik, Alberto Sartori'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pi-pepsdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pi-peps.tex', 'pi-peps Documentation',
'Juraj Hasik, Alberto Sartori', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pi-peps', 'pi-peps Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pi-peps', 'pi-peps Documentation',
author, 'pi-peps', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| 5,615 | 28.557895 | 79 | py |
SSTAP | SSTAP-main/main.py | import sys
from dataset import VideoDataSet, VideoDataSet_unlabel
from loss_function import bmn_loss_func, get_mask
import os
import json
import torch
import torch.nn.parallel
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import numpy as np
import opts
from ipdb import set_trace
from models import BMN, TemporalShift, TemporalShift_random
import pandas as pd
import random
from post_processing import BMN_post_processing
from eval import evaluation_proposal
from ipdb import set_trace
seed = 400
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed) # Numpy module.
random.seed(seed) # Python random module.
torch.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
os.environ["CUDA_VISIBLE_DEVICES"] = '0,1,2,3'
blue = lambda x: '\033[94m' + x + '\033[0m'
sys.dont_write_bytecode = True
global_step = 0
eval_loss = []
consistency_rampup = 5
consistency = 6 # 30 # 3 # None
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def softmax_mse_loss(input_logits, target_logits):
"""Takes softmax on both sides and returns MSE loss
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
# input_softmax = F.softmax(input_logits, dim=1)
# target_softmax = F.softmax(target_logits, dim=1)
# num_classes = input_logits.size()[1]
# return F.mse_loss(input_softmax, target_softmax, reduction='sum') / num_classes # size_average=False
return F.mse_loss(input_logits, target_logits, reduction='mean')
def softmax_kl_loss(input_logits, target_logits):
"""Takes softmax on both sides and returns KL divergence
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
# input_log_softmax = F.log_softmax(input_logits, dim=1)
# target_softmax = F.softmax(target_logits, dim=1)
# return F.kl_div(input_log_softmax, target_softmax, reduction='sum')
return F.kl_div(input_logits, target_logits, reduction='mean')
def Motion_MSEloss(output,clip_label,motion_mask=torch.ones(100).cuda()):
z = torch.pow((output-clip_label),2)
loss = torch.mean(motion_mask*z)
return loss
def sigmoid_rampup(current, rampup_length):
"""Exponential rampup from https://arxiv.org/abs/1610.02242"""
if rampup_length == 0:
return 1.0
else:
current = np.clip(current, 0.0, rampup_length)
phase = 1.0 - current / rampup_length
return float(np.exp(-5.0 * phase * phase))
def linear_rampup(current, rampup_length):
"""Linear rampup"""
assert current >= 0 and rampup_length >= 0
if current >= rampup_length:
return 1.0
else:
return current / rampup_length
def cosine_rampdown(current, rampdown_length):
"""Cosine rampdown from https://arxiv.org/abs/1608.03983"""
assert 0 <= current <= rampdown_length
return float(.5 * (np.cos(np.pi * current / rampdown_length) + 1))
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return consistency * sigmoid_rampup(epoch, consistency_rampup)
def train_BMN(data_loader, model, optimizer, epoch, bm_mask):
model.train()
epoch_pemreg_loss = 0
epoch_pemclr_loss = 0
epoch_tem_loss = 0
epoch_loss = 0
for n_iter, (input_data, label_confidence, label_start, label_end) in enumerate(data_loader):
input_data = input_data.cuda()
label_start = label_start.cuda()
label_end = label_end.cuda()
label_confidence = label_confidence.cuda()
confidence_map, start, end = model(input_data) # [B, 2, 100, 100], [B,100],[B,100]
loss = bmn_loss_func(confidence_map, start, end, label_confidence, label_start, label_end, bm_mask.cuda()) # loss = tem_loss + 10 * pem_reg_loss + pem_cls_loss
# return loss, tem_loss, pem_reg_loss, pem_cls_loss
optimizer.zero_grad()
loss[0].backward()
optimizer.step()
epoch_pemreg_loss += loss[2].cpu().detach().numpy()
epoch_pemclr_loss += loss[3].cpu().detach().numpy()
epoch_tem_loss += loss[1].cpu().detach().numpy()
epoch_loss += loss[0].cpu().detach().numpy()
print(
"BMN training loss(epoch %d): tem_loss: %.03f, pem class_loss: %.03f, pem reg_loss: %.03f, total_loss: %.03f" % (
epoch, epoch_tem_loss / (n_iter + 1),
epoch_pemclr_loss / (n_iter + 1),
epoch_pemreg_loss / (n_iter + 1),
epoch_loss / (n_iter + 1)))
def train_BMN_Semi(data_loader, train_loader_unlabel, model, model_ema, optimizer, epoch, bm_mask):
global global_step
model.train()
epoch_pemreg_loss = 0
epoch_pemclr_loss = 0
epoch_tem_loss = 0
epoch_loss = 0
consistency_loss_all = 0
consistency_loss_ema_all = 0
consistency_criterion = softmax_mse_loss # softmax_kl_loss
temporal_perb = TemporalShift_random(400, 64)
order_clip_criterion = nn.CrossEntropyLoss()
consistency = True
clip_order = True
dropout2d = True
temporal_re = True
unlabeled_train_iter = iter(train_loader_unlabel)
for n_iter, (input_data, label_confidence, label_start, label_end) in enumerate(data_loader):
input_data = input_data.cuda()
label_start = label_start.cuda()
label_end = label_end.cuda()
label_confidence = label_confidence.cuda()
input_data_student = temporal_perb(input_data)
if dropout2d:
input_data_student = F.dropout2d(input_data_student, 0.2)
else:
input_data_student = F.dropout(input_data_student, 0.2)
confidence_map, start, end = model(input_data_student) # [B, 2, 100, 100], [B,100],[B,100]
loss = bmn_loss_func(confidence_map, start, end, label_confidence, label_start, label_end, bm_mask.cuda())
confidence_map = confidence_map * bm_mask.cuda()
if temporal_re:
input_recons = F.dropout2d(input_data.permute(0,2,1), 0.2).permute(0,2,1)
else:
input_recons = F.dropout2d(input_data, 0.2)
recons_feature = model(input_recons, recons=True)
try:
input_data_unlabel= unlabeled_train_iter.next()
input_data_unlabel = input_data_unlabel.cuda()
except:
unlabeled_train_iter = iter(train_loader_unlabel)
input_data_unlabel = unlabeled_train_iter.next()
input_data_unlabel = input_data_unlabel.cuda()
input_data_unlabel_student = temporal_perb(input_data_unlabel)
if dropout2d:
input_data_unlabel_student = F.dropout2d(input_data_unlabel_student, 0.2)
else:
input_data_unlabel_student = F.dropout(input_data_unlabel_student, 0.2)
confidence_map_unlabel_student, start_unlabel_student, end_unlabel_student = model(input_data_unlabel_student)
confidence_map_unlabel_student = confidence_map_unlabel_student * bm_mask.cuda()
# label
input_data_label_student_flip = F.dropout2d(input_data.flip(2).contiguous(), 0.1)
confidence_map_label_student_flip, start_label_student_flip, end_label_student_flip = model(
input_data_label_student_flip)
confidence_map_label_student_flip = confidence_map_label_student_flip * bm_mask.cuda()
# unlabel
input_data_unlabel_student_flip = F.dropout2d(input_data_unlabel.flip(2).contiguous(), 0.1)
confidence_map_unlabel_student_flip, start_unlabel_student_flip, end_unlabel_student_flip = model(
input_data_unlabel_student_flip)
confidence_map_unlabel_student_flip = confidence_map_unlabel_student_flip * bm_mask.cuda()
if temporal_re:
recons_input_student = F.dropout2d(input_data_unlabel.permute(0,2,1), 0.2).permute(0,2,1)
else:
recons_input_student = F.dropout2d(input_data_unlabel, 0.2)
recons_feature_unlabel_student = model(recons_input_student, recons=True)
loss_recons = 0.0005 * (
Motion_MSEloss(recons_feature, input_data) + Motion_MSEloss(recons_feature_unlabel_student,
input_data_unlabel)) # 0.0001
with torch.no_grad():
# input_data_unlabel = input_data_unlabel.cuda()
input_data_ema = F.dropout(input_data, 0.05) # 0.3
confidence_map_teacher, start_teacher, end_teacher = model_ema(input_data_ema)
confidence_map_teacher = confidence_map_teacher * bm_mask.cuda()
input_data_unlabel_teacher = F.dropout(input_data_unlabel, 0.05) # 0.3
confidence_map_unlabel_teacher, start_unlabel_teacher, end_unlabel_teacher = model_ema(
input_data_unlabel_teacher)
confidence_map_unlabel_teacher = confidence_map_unlabel_teacher * bm_mask.cuda()
# flip (label)
out = torch.zeros_like(confidence_map_unlabel_teacher)
out_m = confidence_map_unlabel_teacher.flip(3).contiguous()
for i in range(100):
out[:, :, i, :100 - i] = out_m[:, :, i, i:]
confidence_map_unlabel_teacher_flip = out
# flip (unlabel)
out = torch.zeros_like(confidence_map_teacher)
out_m = confidence_map_teacher.flip(3).contiguous()
for i in range(100):
out[:, :, i, :100 - i] = out_m[:, :, i, i:]
confidence_map_label_teacher_flip = out
# start_unlabel_teacher_flip = start_unlabel_teacher.flip(1).contiguous()
# end_unlabel_teacher_flip = end_unlabel_teacher.flip(1).contiguous()
# add mask
start_unlabel_teacher[start_unlabel_teacher >= 0.9] = 1.0
start_unlabel_teacher[start_unlabel_teacher <= 0.1] = 0.0 # 2_add
end_unlabel_teacher[end_unlabel_teacher >= 0.9] = 1.0
end_unlabel_teacher[end_unlabel_teacher <= 0.1] = 0.0
# flip (label)
start_label_teacher_flip = start_teacher.flip(1).contiguous()
end_label_teacher_flip = end_teacher.flip(1).contiguous()
# flip (unlabel)
start_unlabel_teacher_flip = start_unlabel_teacher.flip(1).contiguous()
end_unlabel_teacher_flip = end_unlabel_teacher.flip(1).contiguous()
mask = torch.eq(
(start_unlabel_teacher.max(1)[0] > 0.6).float() + (end_unlabel_teacher.max(1)[0] > 0.6).float(), 2.)
confidence_map_unlabel_teacher = confidence_map_unlabel_teacher[mask]
start_unlabel_teacher = start_unlabel_teacher[mask]
end_unlabel_teacher = end_unlabel_teacher[mask]
# flip
confidence_map_unlabel_teacher_flip = confidence_map_unlabel_teacher_flip[mask]
start_unlabel_teacher_flip = start_unlabel_teacher_flip[mask]
end_unlabel_teacher_flip = end_unlabel_teacher_flip[mask]
# add mask
confidence_map_unlabel_student = confidence_map_unlabel_student[mask]
start_unlabel_student = start_unlabel_student[mask]
end_unlabel_student = end_unlabel_student[mask]
# flip add mask
confidence_map_unlabel_student_flip = confidence_map_unlabel_student_flip[mask]
start_unlabel_student_flip = start_unlabel_student_flip[mask]
end_unlabel_student_flip = end_unlabel_student_flip[mask]
if consistency:
consistency_weight = get_current_consistency_weight(epoch)
# meters.update('cons_weight', consistency_weight)
# set_trace()
consistency_loss = consistency_weight * (consistency_criterion(confidence_map, confidence_map_teacher) +
consistency_criterion(start, start_teacher) +
consistency_criterion(end, end_teacher))
consistency_loss_ema = consistency_weight * (
consistency_criterion(confidence_map_unlabel_teacher, confidence_map_unlabel_student) +
consistency_criterion(start_unlabel_teacher, start_unlabel_student) +
consistency_criterion(end_unlabel_teacher, end_unlabel_student))
# set_trace()
if torch.isnan(consistency_loss_ema):
consistency_loss_ema = torch.tensor(0.).cuda()
consistency_loss_ema_flip = 0.1 * consistency_weight * (
consistency_criterion(confidence_map_unlabel_teacher_flip, confidence_map_unlabel_student_flip) +
consistency_criterion(start_unlabel_teacher_flip, start_unlabel_student_flip) +
consistency_criterion(end_unlabel_teacher_flip, end_unlabel_student_flip)) + 0.1 * consistency_weight * (
consistency_criterion(confidence_map_label_teacher_flip, confidence_map_label_student_flip) +
consistency_criterion(start_label_teacher_flip, start_label_student_flip) +
consistency_criterion(end_label_teacher_flip, end_label_student_flip))
# meters.update('cons_loss', consistency_loss.item())
else:
consistency_loss = torch.tensor(0).cuda()
consistency_loss_ema = torch.tensor(0).cuda()
consistency_loss_ema_flip = torch.tensor(0).cuda()
# meters.update('cons_loss', 0)
if clip_order:
input_data_all = torch.cat([input_data, input_data_unlabel], 0)
batch_size, C, T = input_data_all.size()
idx = torch.randperm(batch_size)
input_data_all_new = input_data_all[idx]
forw_input = torch.cat(
[input_data_all_new[:batch_size // 2, :, T // 2:], input_data_all_new[:batch_size // 2, :, :T // 2]], 2)
back_input = input_data_all_new[batch_size // 2:, :, :]
input_all = torch.cat([forw_input, back_input], 0)
label_order = [0] * (batch_size // 2) + [1] * (batch_size - batch_size // 2)
label_order = torch.tensor(label_order).long().cuda()
out = model(input_all, clip_order=True)
loss_clip_order = order_clip_criterion(out, label_order)
loss_all = loss[0] + consistency_loss + consistency_loss_ema + loss_recons + 0.01 * loss_clip_order + consistency_loss_ema_flip
optimizer.zero_grad()
loss_all.backward()
optimizer.step()
global_step += 1
update_ema_variables(model, model_ema, 0.999, float(global_step/20)) # //5 //25
epoch_pemreg_loss += loss[2].cpu().detach().numpy()
epoch_pemclr_loss += loss[3].cpu().detach().numpy()
epoch_tem_loss += loss[1].cpu().detach().numpy()
epoch_loss += loss[0].cpu().detach().numpy()
consistency_loss_all += consistency_loss.cpu().detach().numpy()
consistency_loss_ema_all += consistency_loss_ema.cpu().detach().numpy()
if n_iter % 10 == 0:
print(
"training %d (epoch %d): tem_loss: %.03f, pem class_loss: %.03f, pem reg_loss: %.03f, consistency_loss: %.05f, consistency_loss_ema: %.05f, total_loss: %.03f" % (global_step,
epoch, epoch_tem_loss / (n_iter + 1),
epoch_pemclr_loss / (n_iter + 1),
epoch_pemreg_loss / (n_iter + 1),
consistency_loss_all / (n_iter + 1),
consistency_loss_ema_all / (n_iter + 1),
epoch_loss / (n_iter + 1)))
print(
blue("BMN training loss(epoch %d): tem_loss: %.03f, pem class_loss: %.03f, pem reg_loss: %.03f, total_loss: %.03f" % (
epoch, epoch_tem_loss / (n_iter + 1),
epoch_pemclr_loss / (n_iter + 1),
epoch_pemreg_loss / (n_iter + 1),
epoch_loss / (n_iter + 1))))
def train_BMN_Semi_Full(data_loader, model, model_ema, optimizer, epoch, bm_mask):
global global_step
model.train()
epoch_pemreg_loss = 0
epoch_pemclr_loss = 0
epoch_tem_loss = 0
epoch_loss = 0
consistency_loss_all = 0
consistency_loss_ema_all = 0
consistency_criterion = softmax_mse_loss # softmax_kl_loss
# perturbance = nn.dropout(0.3)
temporal_perb = TemporalShift_random(400, 64) # TemporalShift(400, 8) 16
order_clip_criterion = nn.CrossEntropyLoss()
consistency = True
clip_order = True
dropout2d = True
temporal_re = True
# unlabeled_train_iter = iter(train_loader_unlabel)
for n_iter, (input_data, label_confidence, label_start, label_end) in enumerate(data_loader):
input_data = input_data.cuda()
label_start = label_start.cuda()
label_end = label_end.cuda()
label_confidence = label_confidence.cuda()
input_data_student = temporal_perb(input_data)
if dropout2d:
input_data_student = F.dropout2d(input_data_student, 0.2)
else:
input_data_student = F.dropout(input_data_student, 0.2)
confidence_map, start, end = model(input_data_student) # [B, 2, 100, 100], [B,100],[B,100]
loss = bmn_loss_func(confidence_map, start, end, label_confidence, label_start, label_end, bm_mask.cuda())
confidence_map = confidence_map * bm_mask.cuda()
if temporal_re:
input_recons = F.dropout2d(input_data.permute(0, 2, 1), 0.2).permute(0, 2, 1)
else:
input_recons = F.dropout2d(input_data, 0.2)
recons_feature = model(input_recons, recons=True)
# try:
# input_data_unlabel= unlabeled_train_iter.next()
# input_data_unlabel = input_data_unlabel.cuda()
# except:
# unlabeled_train_iter = iter(train_loader_unlabel)
# input_data_unlabel = unlabeled_train_iter.next()
# input_data_unlabel = input_data_unlabel.cuda()
# input_data_unlabel = F.dropout2d(input_data_unlabel.cuda(), 0.2)
# input_data_unlabel_student = temporal_perb(input_data_unlabel)
# if dropout2d:
# input_data_unlabel_student = F.dropout2d(input_data_unlabel_student, 0.2)
# else:
# input_data_unlabel_student = F.dropout(input_data_unlabel_student, 0.2)
# confidence_map_unlabel_student, start_unlabel_student, end_unlabel_student = model(input_data_unlabel_student)
# confidence_map_unlabel_student = confidence_map_unlabel_student * bm_mask.cuda()
input_data_label_student_flip = F.dropout2d(input_data.flip(2).contiguous(), 0.1)
confidence_map_label_student_flip, start_label_student_flip, end_label_student_flip = model(
input_data_label_student_flip)
confidence_map_label_student_flip = confidence_map_label_student_flip * bm_mask.cuda()
# recons_input_student = F.dropout2d(input_data_unlabel.cuda(), 0.2)
# recons_feature_unlabel_student = model(recons_input_student, recons=True)
# set_trace()
loss_recons = 0.0005 * (
Motion_MSEloss(recons_feature, input_data)) # 0.0001
with torch.no_grad():
# input_data_unlabel = input_data_unlabel.cuda()
input_data_ema = F.dropout(input_data, 0.05) # 0.3
confidence_map_teacher, start_teacher, end_teacher = model_ema(input_data_ema)
confidence_map_teacher = confidence_map_teacher * bm_mask.cuda()
# input_data_unlabel_teacher = F.dropout(input_data_unlabel, 0.05) # 0.3
# confidence_map_unlabel_teacher, start_unlabel_teacher, end_unlabel_teacher = model_ema(
# input_data_unlabel_teacher)
# confidence_map_unlabel_teacher = confidence_map_unlabel_teacher * bm_mask.cuda()
# flip
out = torch.zeros_like(confidence_map_teacher)
out_m = confidence_map_teacher.flip(3).contiguous()
for i in range(100):
out[:, :, i, :100 - i] = out_m[:, :, i, i:]
confidence_map_label_teacher = out
# start_unlabel_teacher_flip = start_unlabel_teacher.flip(1).contiguous()
# end_unlabel_teacher_flip = end_unlabel_teacher.flip(1).contiguous()
# add mask
# start_label_teacher[start_label_teacher >= 0.9] = 1.0
# start_label_teacher[start_label_teacher <= 0.1] = 0.0 # 2_add
# end_unlabel_teacher[end_unlabel_teacher >= 0.9] = 1.0
# end_unlabel_teacher[end_unlabel_teacher <= 0.1] = 0.0
# flip
start_label_teacher_flip = label_start.flip(1).contiguous()
end_label_teacher_flip = label_end.flip(1).contiguous()
# mask = torch.eq(
# (start_unlabel_teacher.max(1)[0] > 0.6).float() + (end_unlabel_teacher.max(1)[0] > 0.6).float(), 2.)
# confidence_map_unlabel_teacher = confidence_map_unlabel_teacher[mask]
# start_unlabel_teacher = start_unlabel_teacher[mask]
# end_unlabel_teacher = end_unlabel_teacher[mask]
# flip
# confidence_map_unlabel_teacher_flip = confidence_map_unlabel_teacher_flip[mask]
# start_unlabel_teacher_flip = start_unlabel_teacher_flip[mask]
# end_unlabel_teacher_flip = end_unlabel_teacher_flip[mask]
# add mask
# confidence_map_unlabel_student = confidence_map_unlabel_student[mask]
# start_unlabel_student = start_unlabel_student[mask]
# end_unlabel_student = end_unlabel_student[mask]
# flip add mask
# confidence_map_unlabel_student_flip = confidence_map_label_student_flip[mask]
# start_unlabel_student_flip = start_label_student_flip[mask]
# end_unlabel_student_flip = end_label_student_flip[mask]
if consistency:
consistency_weight = get_current_consistency_weight(epoch)
# meters.update('cons_weight', consistency_weight)
# set_trace()
consistency_loss = consistency_weight * (consistency_criterion(confidence_map, confidence_map_teacher) +
consistency_criterion(start, start_teacher) +
consistency_criterion(end, end_teacher))
consistency_loss_ema_flip = 0.1 * consistency_weight * (
consistency_criterion(confidence_map_label_student_flip, confidence_map_label_teacher) +
consistency_criterion(start_label_student_flip, start_label_teacher_flip) +
consistency_criterion(end_label_student_flip, end_label_teacher_flip))
# consistency_loss_ema_flip = 0.1 * consistency_weight * (
# consistency_criterion(confidence_map_label_teacher, confidence_map_label_student_flip) +
# consistency_criterion(start_label_teacher_flip, start_label_student_flip) +
# consistency_criterion(end_label_teacher_flip, end_label_student_flip))
# meters.update('cons_loss', consistency_loss.item())
else:
consistency_loss = torch.tensor(0).cuda()
consistency_loss_ema = torch.tensor(0).cuda()
consistency_loss_ema_flip = torch.tensor(0).cuda()
# meters.update('cons_loss', 0)
if clip_order:
input_data_all = input_data # torch.cat([input_data, input_data_unlabel], 0)
batch_size, C, T = input_data_all.size()
idx = torch.randperm(batch_size)
input_data_all_new = input_data_all[idx]
forw_input = torch.cat(
[input_data_all_new[:batch_size // 2, :, T // 2:], input_data_all_new[:batch_size // 2, :, :T // 2]], 2)
back_input = input_data_all_new[batch_size // 2:, :, :]
input_all = torch.cat([forw_input, back_input], 0)
label_order = [0] * (batch_size // 2) + [1] * (batch_size - batch_size // 2)
label_order = torch.tensor(label_order).long().cuda()
out = model(input_all, clip_order=True)
loss_clip_order = order_clip_criterion(out, label_order)
loss_all = loss[0] + consistency_loss + loss_recons + 0.01 * loss_clip_order + consistency_loss_ema_flip
optimizer.zero_grad()
loss_all.backward()
optimizer.step()
global_step += 1
update_ema_variables(model, model_ema, 0.999, float(global_step/20)) # //5 //25
epoch_pemreg_loss += loss[2].cpu().detach().numpy()
epoch_pemclr_loss += loss[3].cpu().detach().numpy()
epoch_tem_loss += loss[1].cpu().detach().numpy()
epoch_loss += loss[0].cpu().detach().numpy()
consistency_loss_all += consistency_loss.cpu().detach().numpy()
# consistency_loss_ema_all += consistency_loss_ema.cpu().detach().numpy()
if n_iter % 10 == 0:
print(
"training %d (epoch %d): tem_loss: %.03f, pem class_loss: %.03f, pem reg_loss: %.03f, consistency_loss: %.05f, total_loss: %.03f" % (global_step,
epoch, epoch_tem_loss / (n_iter + 1),
epoch_pemclr_loss / (n_iter + 1),
epoch_pemreg_loss / (n_iter + 1),
consistency_loss_all / (n_iter + 1),
# consistency_loss_ema_all / (n_iter + 1),
epoch_loss / (n_iter + 1)))
print(
blue("BMN training loss(epoch %d): tem_loss: %.03f, pem class_loss: %.03f, pem reg_loss: %.03f, total_loss: %.03f" % (
epoch, epoch_tem_loss / (n_iter + 1),
epoch_pemclr_loss / (n_iter + 1),
epoch_pemreg_loss / (n_iter + 1),
epoch_loss / (n_iter + 1))))
def test_BMN(data_loader, model, epoch, bm_mask):
global eval_loss
model.eval()
best_loss = 1e10
epoch_pemreg_loss = 0
epoch_pemclr_loss = 0
epoch_tem_loss = 0
epoch_loss = 0
for n_iter, (input_data, label_confidence, label_start, label_end) in enumerate(data_loader):
input_data = input_data.cuda()
label_start = label_start.cuda()
label_end = label_end.cuda()
label_confidence = label_confidence.cuda()
confidence_map, start, end = model(input_data)
loss = bmn_loss_func(confidence_map, start, end, label_confidence, label_start, label_end, bm_mask.cuda())
epoch_pemreg_loss += loss[2].cpu().detach().numpy()
epoch_pemclr_loss += loss[3].cpu().detach().numpy()
epoch_tem_loss += loss[1].cpu().detach().numpy()
epoch_loss += loss[0].cpu().detach().numpy()
print(
blue("BMN val loss(epoch %d): tem_loss: %.03f, pem class_loss: %.03f, pem reg_loss: %.03f, total_loss: %.03f" % (
epoch, epoch_tem_loss / (n_iter + 1),
epoch_pemclr_loss / (n_iter + 1),
epoch_pemreg_loss / (n_iter + 1),
epoch_loss / (n_iter + 1))))
eval_loss.append(epoch_loss / (n_iter + 1))
state = {'epoch': epoch + 1,
'state_dict': model.state_dict()}
torch.save(state, opt["checkpoint_path"] + "/BMN_checkpoint.pth.tar") # ./checkpoint
if epoch_loss < model.module.tem_best_loss:
model.module.tem_best_loss = epoch_loss
torch.save(state, opt["checkpoint_path"] + "/BMN_best.pth.tar")
# eval_loss.append(epoch_loss / (n_iter + 1))
opt_file = open(opt["checkpoint_path"] + "/output_eval_loss.json", "w")
json.dump(eval_loss, opt_file)
opt_file.close()
def test_BMN_ema(data_loader, model, epoch, bm_mask):
model.eval()
best_loss = 1e10
epoch_pemreg_loss = 0
epoch_pemclr_loss = 0
epoch_tem_loss = 0
epoch_loss = 0
for n_iter, (input_data, label_confidence, label_start, label_end) in enumerate(data_loader):
input_data = input_data.cuda()
label_start = label_start.cuda()
label_end = label_end.cuda()
label_confidence = label_confidence.cuda()
confidence_map, start, end = model(input_data)
loss = bmn_loss_func(confidence_map, start, end, label_confidence, label_start, label_end, bm_mask.cuda())
epoch_pemreg_loss += loss[2].cpu().detach().numpy()
epoch_pemclr_loss += loss[3].cpu().detach().numpy()
epoch_tem_loss += loss[1].cpu().detach().numpy()
epoch_loss += loss[0].cpu().detach().numpy()
print(
blue("BMN val_ema loss(epoch %d): tem_loss: %.03f, pem class_loss: %.03f, pem reg_loss: %.03f, total_loss: %.03f" % (
epoch, epoch_tem_loss / (n_iter + 1),
epoch_pemclr_loss / (n_iter + 1),
epoch_pemreg_loss / (n_iter + 1),
epoch_loss / (n_iter + 1))))
state = {'epoch': epoch + 1,
'state_dict': model.state_dict()}
torch.save(state, opt["checkpoint_path"] + "/BMN_checkpoint_ema.pth.tar") # ./checkpoint
if epoch_loss < model.module.tem_best_loss:
model.module.tem_best_loss = epoch_loss
torch.save(state, opt["checkpoint_path"] + "/BMN_best_ema.pth.tar")
def BMN_Train(opt):
model = BMN(opt)
model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3]).cuda()
model_ema = BMN(opt)
model_ema = torch.nn.DataParallel(model_ema, device_ids=[0, 1, 2, 3]).cuda()
for param in model_ema.parameters():
param.detach_()
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=opt["training_lr"],
weight_decay=opt["weight_decay"]) # 1e-4
train_loader = torch.utils.data.DataLoader(VideoDataSet(opt, subset="train"), # [16,400,100]
batch_size=opt["batch_size"], shuffle=True, drop_last=True,
num_workers=8, pin_memory=True)
if opt['use_semi'] and opt['unlabel_percent'] > 0.:
train_loader_unlabel = torch.utils.data.DataLoader(VideoDataSet_unlabel(opt, subset="unlabel"), # [16,400,100]
batch_size=min(max(round(opt["batch_size"]*opt['unlabel_percent']/(4*(1.-opt['unlabel_percent'])))*4, 4), 24), shuffle=True,drop_last=True,
num_workers=8, pin_memory=True)
test_loader = torch.utils.data.DataLoader(VideoDataSet(opt, subset="validation"),
batch_size=opt["batch_size"], shuffle=False,
num_workers=8, pin_memory=True)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=opt["step_size"], gamma=opt["step_gamma"]) # 7 0.1
bm_mask = get_mask(opt["temporal_scale"])
use_semi = opt['use_semi']
print('use {} label for training!!!'.format(1-opt['unlabel_percent']))
print('training batchsize : {}'.format(opt["batch_size"]))
print('unlabel_training batchsize : {}'.format(min(max(round(opt["batch_size"]*opt['unlabel_percent']/(4*(1.-opt['unlabel_percent'])))*4, 4), 24)))
for epoch in range(opt["train_epochs"]): # 9
# scheduler.step()
if use_semi:
if opt['unlabel_percent'] == 0.:
print('use Semi !!! use all label !!!')
train_BMN_Semi_Full(train_loader, model, model_ema, optimizer, epoch, bm_mask)
test_BMN(test_loader, model, epoch, bm_mask)
test_BMN_ema(test_loader, model_ema, epoch, bm_mask)
else:
print('use Semi !!!')
train_BMN_Semi(train_loader, train_loader_unlabel, model, model_ema, optimizer, epoch, bm_mask)
test_BMN(test_loader, model, epoch, bm_mask)
test_BMN_ema(test_loader, model_ema, epoch, bm_mask)
else:
print('use Fewer label !!!')
train_BMN(train_loader, model, optimizer, epoch, bm_mask)
test_BMN(test_loader, model, epoch, bm_mask)
scheduler.step()
def BMN_inference(opt, eval_name):
model = BMN(opt)
model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3]).cuda()
model_checkpoint_dir = opt["checkpoint_path"] + eval_name # BMN_checkpoint.pth.tar BMN_best.pth.tar
checkpoint = torch.load(model_checkpoint_dir) # BMN_best.pth.tar
print('load :', model_checkpoint_dir, ' OK !')
model.load_state_dict(checkpoint['state_dict'])
model.eval()
test_loader = torch.utils.data.DataLoader(VideoDataSet(opt, subset="validation"),
batch_size=8, shuffle=False,
num_workers=8, pin_memory=True, drop_last=False)
tscale = opt["temporal_scale"]
with torch.no_grad():
for idx, input_data in test_loader:
# set_trace()
length = idx.shape[0]
# for ii in range(length):
video_name = []
for ii in range(length):
video_name_video = test_loader.dataset.video_list[idx[ii]]
video_name.append(video_name_video)
input_data = input_data.cuda()
confidence_map, start, end = model(input_data)
# set_trace()
for ii in range(length):
start_scores = start[ii].detach().cpu().numpy()
end_scores = end[ii].detach().cpu().numpy()
clr_confidence = (confidence_map[ii][1]).detach().cpu().numpy()
reg_confidence = (confidence_map[ii][0]).detach().cpu().numpy()
max_start = max(start_scores)
max_end = max(end_scores)
####################################################################################################
# generate the set of start points and end points
start_bins = np.zeros(len(start_scores))
start_bins[0] = 1 # [1,0,0...,0,1]
for idx in range(1, tscale - 1):
if start_scores[idx] > start_scores[idx + 1] and start_scores[idx] > start_scores[idx - 1]:
start_bins[idx] = 1
elif start_scores[idx] > (0.5 * max_start):
start_bins[idx] = 1
end_bins = np.zeros(len(end_scores))
end_bins[-1] = 1
for idx in range(1, tscale - 1):
if end_scores[idx] > end_scores[idx + 1] and end_scores[idx] > end_scores[idx - 1]:
end_bins[idx] = 1
elif end_scores[idx] > (0.5 * max_end):
end_bins[idx] = 1
########################################################################################################
#########################################################################
#
new_props = []
for idx in range(tscale):
for jdx in range(tscale):
start_index = jdx
end_index = start_index + idx+1
if end_index < tscale and start_bins[start_index] == 1 and end_bins[end_index] == 1:
xmin = start_index/tscale
xmax = end_index/tscale
xmin_score = start_scores[start_index]
xmax_score = end_scores[end_index]
clr_score = clr_confidence[idx, jdx]
reg_score = reg_confidence[idx, jdx]
score = xmin_score * xmax_score * clr_score*reg_score
new_props.append([xmin, xmax, xmin_score, xmax_score, clr_score, reg_score, score])
new_props = np.stack(new_props)
#########################################################################
col_name = ["xmin", "xmax", "xmin_score", "xmax_score", "clr_score", "reg_socre", "score"]
new_df = pd.DataFrame(new_props, columns=col_name)
new_df.to_csv("./output/BMN_results/" + video_name[ii] + ".csv", index=False)
def BMN_inference_ema(opt, eval_name):
model = BMN(opt)
model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3]).cuda()
model_checkpoint_dir = opt["checkpoint_path"] + eval_name # BMN_checkpoint.pth.tar BMN_best.pth.tar
checkpoint = torch.load(model_checkpoint_dir) # BMN_best.pth.tar
print('load :', model_checkpoint_dir, ' OK !')
model.load_state_dict(checkpoint['state_dict'])
model.eval()
test_loader = torch.utils.data.DataLoader(VideoDataSet(opt, subset="validation"),
batch_size=8, shuffle=False,
num_workers=8, pin_memory=True, drop_last=False)
tscale = opt["temporal_scale"]
with torch.no_grad():
for idx, input_data in test_loader:
# set_trace()
length = idx.shape[0]
# for ii in range(length):
video_name = []
for ii in range(length):
video_name_video = test_loader.dataset.video_list[idx[ii]]
video_name.append(video_name_video)
input_data = input_data.cuda()
confidence_map, start, end = model(input_data)
# set_trace()
for ii in range(length):
start_scores = start[ii].detach().cpu().numpy()
end_scores = end[ii].detach().cpu().numpy()
clr_confidence = (confidence_map[ii][1]).detach().cpu().numpy()
reg_confidence = (confidence_map[ii][0]).detach().cpu().numpy()
max_start = max(start_scores)
max_end = max(end_scores)
####################################################################################################
# generate the set of start points and end points
start_bins = np.zeros(len(start_scores))
start_bins[0] = 1 # [1,0,0...,0,1]
for idx in range(1, tscale - 1):
if start_scores[idx] > start_scores[idx + 1] and start_scores[idx] > start_scores[idx - 1]:
start_bins[idx] = 1
elif start_scores[idx] > (0.5 * max_start):
start_bins[idx] = 1
end_bins = np.zeros(len(end_scores))
end_bins[-1] = 1
for idx in range(1, tscale - 1):
if end_scores[idx] > end_scores[idx + 1] and end_scores[idx] > end_scores[idx - 1]:
end_bins[idx] = 1
elif end_scores[idx] > (0.5 * max_end):
end_bins[idx] = 1
########################################################################################################
#########################################################################
new_props = []
for idx in range(tscale):
for jdx in range(tscale):
start_index = jdx
end_index = start_index + idx+1
if end_index < tscale and start_bins[start_index] == 1 and end_bins[end_index] == 1:
xmin = start_index/tscale
xmax = end_index/tscale
xmin_score = start_scores[start_index]
xmax_score = end_scores[end_index]
clr_score = clr_confidence[idx, jdx]
reg_score = reg_confidence[idx, jdx]
score = xmin_score * xmax_score * clr_score*reg_score
new_props.append([xmin, xmax, xmin_score, xmax_score, clr_score, reg_score, score])
new_props = np.stack(new_props)
#########################################################################
col_name = ["xmin", "xmax", "xmin_score", "xmax_score", "clr_score", "reg_socre", "score"]
new_df = pd.DataFrame(new_props, columns=col_name)
new_df.to_csv("./output/BMN_results/" + video_name[ii] + ".csv", index=False)
def main(opt):
if opt["mode"] == "train":
BMN_Train(opt)
elif opt["mode"] == "inference":
if not os.path.exists("output/BMN_results"):
os.makedirs("output/BMN_results")
print('unlabel percent: ', opt['unlabel_percent'])
print('eval student model !!')
for eval_name in ['/BMN_checkpoint.pth.tar', '/BMN_best.pth.tar']:
BMN_inference(opt, eval_name)
print("Post processing start")
BMN_post_processing(opt)
print("Post processing finished")
evaluation_proposal(opt)
print('eval teacher model !!')
for eval_name in ['/BMN_checkpoint_ema.pth.tar', '/BMN_best_ema.pth.tar']:
BMN_inference_ema(opt, eval_name)
print("Post processing start")
BMN_post_processing(opt)
print("Post processing finished")
evaluation_proposal(opt)
if __name__ == '__main__':
opt = opts.parse_opt()
opt = vars(opt)
if not os.path.exists(opt["checkpoint_path"]):
os.makedirs(opt["checkpoint_path"])
if not os.path.exists('./output'):
os.makedirs('./output')
opt_file = open(opt["checkpoint_path"] + "/opts.json", "w")
json.dump(opt, opt_file)
opt_file.close()
main(opt)
| 42,436 | 48.173812 | 190 | py |
SSTAP | SSTAP-main/dataset.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import json
import torch.utils.data as data
import torch
from utils import ioa_with_anchors, iou_with_anchors
from ipdb import set_trace
def load_json(file):
with open(file) as json_file:
json_data = json.load(json_file)
return json_data
class VideoDataSet(data.Dataset):
def __init__(self, opt, subset="train"):
self.temporal_scale = opt["temporal_scale"] # 100
self.temporal_gap = 1. / self.temporal_scale
self.subset = subset
self.mode = opt["mode"]
self.feature_path = opt["feature_path"]
self.video_info_path = "./data/activitynet_annotations/video_info_new_{}.csv".format(opt['unlabel_percent'])
self.video_anno_path = opt["video_anno"]
self._getDatasetDict()
self._get_match_map()
# set_trace()
def _getDatasetDict(self):
anno_df = pd.read_csv(self.video_info_path)
anno_database = load_json(self.video_anno_path)
self.video_dict = {}
for i in range(len(anno_df)):
video_name = anno_df.video.values[i]
video_info = anno_database[video_name]
video_subset = anno_df.subset.values[i]
if self.subset in video_subset:
if 'unlabel' not in video_subset:
self.video_dict[video_name] = video_info
self.video_list = list(self.video_dict.keys())
print("%s subset video numbers: %d" % (self.subset, len(self.video_list)))
def __getitem__(self, index):
video_data = self._load_file(index)
if self.mode == "train":
match_score_start, match_score_end, confidence_score = self._get_train_label(index, self.anchor_xmin,
self.anchor_xmax)
return video_data,confidence_score, match_score_start, match_score_end # [400,100],[100,100],[100]
else:
return index, video_data
def _get_match_map(self):
match_map = []
for idx in range(self.temporal_scale):
tmp_match_window = []
xmin = self.temporal_gap * idx
for jdx in range(1, self.temporal_scale + 1):
xmax = xmin + self.temporal_gap * jdx
tmp_match_window.append([xmin, xmax])
match_map.append(tmp_match_window)
match_map = np.array(match_map) # 100x100x2
match_map = np.transpose(match_map, [1, 0, 2]) # [0,1] [1,2] [2,3].....[99,100]
match_map = np.reshape(match_map, [-1, 2]) # [0,2] [1,3] [2,4].....[99,101] # duration x start
self.match_map = match_map # duration is same in row, start is same in col [10000,2]
self.anchor_xmin = [self.temporal_gap * (i-0.5) for i in range(self.temporal_scale)] # [-0.5/100,0.5/100,...98.5/100]
self.anchor_xmax = [self.temporal_gap * (i+0.5) for i in range(1, self.temporal_scale + 1)] # [1.5/100,...,100.5/100]
def _load_file(self, index):
video_name = self.video_list[index]
video_df = pd.read_csv(self.feature_path + "csv_mean_" + str(self.temporal_scale) + "/" + video_name + ".csv")
video_data = video_df.values[:, :]
video_data = torch.Tensor(video_data)
video_data = torch.transpose(video_data, 0, 1)
video_data.float()
return video_data
def _get_train_label(self, index, anchor_xmin, anchor_xmax):
video_name = self.video_list[index] # video_name
video_info = self.video_dict[video_name]
video_frame = video_info['duration_frame']
video_second = video_info['duration_second']
feature_frame = video_info['feature_frame']
corrected_second = float(feature_frame) / video_frame * video_second # there are some frames not used
video_labels = video_info['annotations'] # the measurement is second, not frame
##############################################################################################
# change the measurement from second to percentage
gt_bbox = []
gt_iou_map = []
for j in range(len(video_labels)):
tmp_info = video_labels[j]
tmp_start = max(min(1, tmp_info['segment'][0] / corrected_second), 0)
tmp_end = max(min(1, tmp_info['segment'][1] / corrected_second), 0)
gt_bbox.append([tmp_start, tmp_end]) # gt_bbox [0~1]
tmp_gt_iou_map = iou_with_anchors(
self.match_map[:, 0], self.match_map[:, 1], tmp_start, tmp_end) # [100*100]
tmp_gt_iou_map = np.reshape(tmp_gt_iou_map,
[self.temporal_scale, self.temporal_scale])
gt_iou_map.append(tmp_gt_iou_map)
gt_iou_map = np.array(gt_iou_map) # gt [100*100]
gt_iou_map = np.max(gt_iou_map, axis=0)
gt_iou_map = torch.Tensor(gt_iou_map) # [100,100]
##############################################################################################
####################################################################################################
# generate R_s and R_e
gt_bbox = np.array(gt_bbox) # gt [start,end]
gt_xmins = gt_bbox[:, 0]
gt_xmaxs = gt_bbox[:, 1]
gt_lens = gt_xmaxs - gt_xmins
gt_len_small = 3 * self.temporal_gap # np.maximum(self.temporal_gap, self.boundary_ratio * gt_lens)
gt_start_bboxs = np.stack((gt_xmins - gt_len_small / 2, gt_xmins + gt_len_small / 2), axis=1)
gt_end_bboxs = np.stack((gt_xmaxs - gt_len_small / 2, gt_xmaxs + gt_len_small / 2), axis=1)
#####################################################################################################
##########################################################################################################
# calculate the ioa for all timestamp
match_score_start = []
for jdx in range(len(anchor_xmin)):
match_score_start.append(np.max(
ioa_with_anchors(anchor_xmin[jdx], anchor_xmax[jdx], gt_start_bboxs[:, 0], gt_start_bboxs[:, 1])))
match_score_end = []
for jdx in range(len(anchor_xmin)):
match_score_end.append(np.max(
ioa_with_anchors(anchor_xmin[jdx], anchor_xmax[jdx], gt_end_bboxs[:, 0], gt_end_bboxs[:, 1])))
match_score_start = torch.Tensor(match_score_start)
match_score_end = torch.Tensor(match_score_end)
############################################################################################################
return match_score_start, match_score_end, gt_iou_map
def __len__(self):
return len(self.video_list)
class VideoDataSet_unlabel(data.Dataset):
def __init__(self, opt, subset="unlabel"):
self.temporal_scale = opt["temporal_scale"] # 100
self.temporal_gap = 1. / self.temporal_scale
self.subset = subset
self.mode = opt["mode"]
self.feature_path = opt["feature_path"]
self.video_info_path = "./data/activitynet_annotations/video_info_new_{}.csv".format(opt['unlabel_percent'])
self.video_anno_path = opt["video_anno"]
self._getDatasetDict()
self.unlabel_percent = opt['unlabel_percent']
self._get_match_map()
def _getDatasetDict(self):
anno_df = pd.read_csv(self.video_info_path)
anno_database = load_json(self.video_anno_path)
self.video_dict = {}
for i in range(len(anno_df)):
video_name = anno_df.video.values[i]
video_info = anno_database[video_name]
video_subset = anno_df.subset.values[i]
if self.subset in video_subset:
self.video_dict[video_name] = 'unseen'
self.video_list = list(self.video_dict.keys())
print("%s unlabeled subset video numbers: %d" % (self.subset, len(self.video_list)))
def __getitem__(self, index):
video_data = self._load_file(index)
if self.mode == "train":
# match_score_start, match_score_end, confidence_score = self._get_train_label(index, self.anchor_xmin,
# self.anchor_xmax)
return video_data # ,confidence_score, match_score_start, match_score_end # [400,100],[100,100],[100]
else:
return index, video_data
def _get_match_map(self):
match_map = []
for idx in range(self.temporal_scale):
tmp_match_window = []
xmin = self.temporal_gap * idx
for jdx in range(1, self.temporal_scale + 1):
xmax = xmin + self.temporal_gap * jdx
tmp_match_window.append([xmin, xmax])
match_map.append(tmp_match_window)
match_map = np.array(match_map) # 100x100x2
match_map = np.transpose(match_map, [1, 0, 2]) # [0,1] [1,2] [2,3].....[99,100]
match_map = np.reshape(match_map, [-1, 2]) # [0,2] [1,3] [2,4].....[99,101] # duration x start
self.match_map = match_map # duration is same in row, start is same in col [10000,2]
self.anchor_xmin = [self.temporal_gap * (i-0.5) for i in range(self.temporal_scale)] # [-0.5/100,0.5/100,...98.5/100]
self.anchor_xmax = [self.temporal_gap * (i+0.5) for i in range(1, self.temporal_scale + 1)] # [1.5/100,...,100.5/100]
def _load_file(self, index):
video_name = self.video_list[index]
video_df = pd.read_csv(self.feature_path + "csv_mean_" + str(self.temporal_scale) + "/" + video_name + ".csv")
video_data = video_df.values[:, :]
video_data = torch.Tensor(video_data)
video_data = torch.transpose(video_data, 0, 1)
video_data.float()
return video_data
def _get_train_label(self, index, anchor_xmin, anchor_xmax):
video_name = self.video_list[index] # video_name
video_info = self.video_dict[video_name]
video_frame = video_info['duration_frame']
video_second = video_info['duration_second']
feature_frame = video_info['feature_frame']
corrected_second = float(feature_frame) / video_frame * video_second # there are some frames not used
video_labels = video_info['annotations'] # the measurement is second, not frame
##############################################################################################
# change the measurement from second to percentage
gt_bbox = []
gt_iou_map = []
for j in range(len(video_labels)):
tmp_info = video_labels[j]
tmp_start = max(min(1, tmp_info['segment'][0] / corrected_second), 0)
tmp_end = max(min(1, tmp_info['segment'][1] / corrected_second), 0)
gt_bbox.append([tmp_start, tmp_end]) # gt_bbox [0~1]
tmp_gt_iou_map = iou_with_anchors(
self.match_map[:, 0], self.match_map[:, 1], tmp_start, tmp_end) # [100*100]
tmp_gt_iou_map = np.reshape(tmp_gt_iou_map,
[self.temporal_scale, self.temporal_scale])
gt_iou_map.append(tmp_gt_iou_map)
gt_iou_map = np.array(gt_iou_map) # gt个[100*100]
gt_iou_map = np.max(gt_iou_map, axis=0)
gt_iou_map = torch.Tensor(gt_iou_map) # [100,100]
##############################################################################################
####################################################################################################
# generate R_s and R_e
gt_bbox = np.array(gt_bbox) # gt个[start,end]
gt_xmins = gt_bbox[:, 0]
gt_xmaxs = gt_bbox[:, 1]
gt_lens = gt_xmaxs - gt_xmins
gt_len_small = 3 * self.temporal_gap # np.maximum(self.temporal_gap, self.boundary_ratio * gt_lens)
gt_start_bboxs = np.stack((gt_xmins - gt_len_small / 2, gt_xmins + gt_len_small / 2), axis=1)
gt_end_bboxs = np.stack((gt_xmaxs - gt_len_small / 2, gt_xmaxs + gt_len_small / 2), axis=1)
#####################################################################################################
##########################################################################################################
# calculate the ioa for all timestamp
match_score_start = []
for jdx in range(len(anchor_xmin)):
match_score_start.append(np.max(
ioa_with_anchors(anchor_xmin[jdx], anchor_xmax[jdx], gt_start_bboxs[:, 0], gt_start_bboxs[:, 1])))
match_score_end = []
for jdx in range(len(anchor_xmin)):
match_score_end.append(np.max(
ioa_with_anchors(anchor_xmin[jdx], anchor_xmax[jdx], gt_end_bboxs[:, 0], gt_end_bboxs[:, 1])))
match_score_start = torch.Tensor(match_score_start)
match_score_end = torch.Tensor(match_score_end)
############################################################################################################
return match_score_start, match_score_end, gt_iou_map
def __len__(self):
return len(self.video_list)
if __name__ == '__main__':
import opts
opt = opts.parse_opt()
opt = vars(opt)
train_loader = torch.utils.data.DataLoader(VideoDataSet(opt, subset="train"),
batch_size=opt["batch_size"], shuffle=True,
num_workers=8, pin_memory=True)
for aaa,bbb,ccc,ddd in train_loader: # len(train_loader)=604
set_trace()
print(aaa.shape,bbb.shape,ccc.shape,ddd.shape) # torch.Size([16, 400, 100]) torch.Size([16, 100, 100]) torch.Size([16, 100]) torch.Size([16, 100])
# set_trace()
break
| 14,230 | 51.707407 | 155 | py |
SSTAP | SSTAP-main/loss_function.py | # -*- coding: utf-8 -*-
import torch
import numpy as np
import torch.nn.functional as F
def get_mask(tscale):
bm_mask = []
for idx in range(tscale):
mask_vector = [1 for i in range(tscale - idx)
] + [0 for i in range(idx)]
bm_mask.append(mask_vector)
bm_mask = np.array(bm_mask, dtype=np.float32)
return torch.Tensor(bm_mask)
''' [1, 1, 1, 1, 1]
[1, 1, 1, 1, 0]
[1, 1, 1, 0, 0]
[1, 1, 0, 0, 0]
[1, 0, 0, 0, 0]'''
def bmn_loss_func(pred_bm, pred_start, pred_end, gt_iou_map, gt_start, gt_end, bm_mask):
pred_bm_reg = pred_bm[:, 0].contiguous()
pred_bm_cls = pred_bm[:, 1].contiguous()
gt_iou_map = gt_iou_map * bm_mask # [b,100,100]*[100,100] ->[B,100,100]
pem_reg_loss = pem_reg_loss_func(pred_bm_reg, gt_iou_map, bm_mask)
pem_cls_loss = pem_cls_loss_func(pred_bm_cls, gt_iou_map, bm_mask)
tem_loss = tem_loss_func(pred_start, pred_end, gt_start, gt_end)
loss = tem_loss + 10 * pem_reg_loss + pem_cls_loss
return loss, tem_loss, pem_reg_loss, pem_cls_loss
def tem_loss_func(pred_start, pred_end, gt_start, gt_end):
def bi_loss(pred_score, gt_label):
pred_score = pred_score.view(-1)
gt_label = gt_label.view(-1)
pmask = (gt_label > 0.5).float()
num_entries = len(pmask)
num_positive = torch.sum(pmask)
ratio = num_entries / num_positive
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
epsilon = 0.000001
loss_pos = coef_1 * torch.log(pred_score + epsilon) * pmask
loss_neg = coef_0 * torch.log(1.0 - pred_score + epsilon)*(1.0 - pmask)
loss = -1 * torch.mean(loss_pos + loss_neg)
return loss
loss_start = bi_loss(pred_start, gt_start)
loss_end = bi_loss(pred_end, gt_end)
loss = loss_start + loss_end
return loss
def pem_reg_loss_func(pred_score, gt_iou_map, mask):
u_hmask = (gt_iou_map > 0.7).float()
u_mmask = ((gt_iou_map <= 0.7) & (gt_iou_map > 0.3)).float()
u_lmask = ((gt_iou_map <= 0.3) & (gt_iou_map > 0.)).float()
u_lmask = u_lmask * mask
num_h = torch.sum(u_hmask)
num_m = torch.sum(u_mmask)
num_l = torch.sum(u_lmask)
r_m = num_h / num_m
u_smmask = torch.Tensor(np.random.rand(*gt_iou_map.shape)).cuda()
u_smmask = u_mmask * u_smmask
u_smmask = (u_smmask > (1. - r_m)).float()
r_l = num_h / num_l
u_slmask = torch.Tensor(np.random.rand(*gt_iou_map.shape)).cuda()
u_slmask = u_lmask * u_slmask
u_slmask = (u_slmask > (1. - r_l)).float()
weights = u_hmask + u_smmask + u_slmask
loss = F.mse_loss(pred_score* weights, gt_iou_map* weights)
loss = 0.5 * torch.sum(loss*torch.ones(*weights.shape).cuda()) / torch.sum(weights)
return loss
def pem_cls_loss_func(pred_score, gt_iou_map, mask):
pmask = (gt_iou_map > 0.9).float()
nmask = (gt_iou_map <= 0.9).float()
nmask = nmask * mask
num_positive = torch.sum(pmask)
num_entries = num_positive + torch.sum(nmask)
ratio = num_entries / num_positive
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
epsilon = 0.000001
loss_pos = coef_1 * torch.log(pred_score + epsilon) * pmask
loss_neg = coef_0 * torch.log(1.0 - pred_score + epsilon) * nmask
loss = -1 * torch.sum(loss_pos + loss_neg) / num_entries
return loss
| 3,482 | 32.171429 | 90 | py |
SSTAP | SSTAP-main/models.py | # -*- coding: utf-8 -*-
import math
import numpy as np
import torch
import torch.nn as nn
from ipdb import set_trace
import random
import torch.nn.functional as F
class TemporalShift(nn.Module):
def __init__(self, n_segment=3, n_div=8, inplace=False):
super(TemporalShift, self).__init__()
# self.net = net
self.n_segment = n_segment
self.fold_div = n_div
self.inplace = inplace
self.channels_range = list(range(400)) # feature_channels
if inplace:
print('=> Using in-place shift...')
# print('=> Using fold div: {}'.format(self.fold_div))
def forward(self, x):
# self.fold_div = n_div
x = self.shift(x, self.n_segment, fold_div=self.fold_div, inplace=self.inplace, channels_range =self.channels_range)
return x
@staticmethod
def shift(x, n_segment, fold_div=8, inplace=False, channels_range=[1,2]):
x = x.permute(0, 2, 1) # [B,C,T] --> [B, T, C]
# set_trace()
n_batch, T, c = x.size()
# nt, c, h, w = x.size()
# n_batch = nt // n_segment
# x = x.view(n_batch, n_segment, c, h, w)
# x = x.view(n_batch, T, c, h, w)
fold = c // 2*fold_div
# all = random.sample(channels_range, fold*2)
# forward = sorted(all[:fold])
# backward = sorted(all[fold:])
# fixed = list(set(channels_range) - set(all))
# fold = c // fold_div
if inplace:
# Due to some out of order error when performing parallel computing.
# May need to write a CUDA kernel.
raise NotImplementedError
# out = InplaceShift.apply(x, fold)
else:
out = torch.zeros_like(x)
out[:, :-1, :fold] = x[:, 1:, :fold] # shift left
out[:, 1:, fold: 2 * fold] = x[:, :-1, fold: 2 * fold] # shift right
out[:, :, 2 * fold:200] = x[:, :, 2 * fold:200] # not shift
out[:, :-1, 200:200+fold] = x[:, 1:, 200:200+fold] # shift left
out[:, 1:, 200+fold: 200+2 * fold] = x[:, :-1, 200+fold: 200+2 * fold] # shift right
out[:, :, 200+2 * fold:] = x[:, :, 200 + 2 * fold:] # not shift
# out = torch.zeros_like(x)
# out[:, :-1, forward] = x[:, 1:, forward] # shift left
# out[:, 1:, backward] = x[:, :-1, backward] # shift right
# out[:, :, fixed] = x[:, :, fixed] # not shift
# return out.view(nt, c, h, w)
return out.permute(0, 2, 1)
class TemporalShift_random(nn.Module):
def __init__(self, n_segment=3, n_div=8, inplace=False):
super(TemporalShift_random, self).__init__()
# self.net = net
self.n_segment = n_segment
self.fold_div = n_div
self.inplace = inplace
self.channels_range = list(range(400)) # feature_channels
if inplace:
print('=> Using in-place shift...')
# print('=> Using fold div: {}'.format(self.fold_div))
def forward(self, x):
# self.fold_div = n_div
x = self.shift(x, self.n_segment, fold_div=self.fold_div, inplace=self.inplace, channels_range =self.channels_range)
return x
@staticmethod
def shift(x, n_segment, fold_div=8, inplace=False, channels_range=[1,2]):
x = x.permute(0, 2, 1) # [B,C,T] --> [B, T, C]
# set_trace()
n_batch, T, c = x.size()
# nt, c, h, w = x.size()
# n_batch = nt // n_segment
# x = x.view(n_batch, n_segment, c, h, w)
# x = x.view(n_batch, T, c, h, w)
fold = c // fold_div
all = random.sample(channels_range, fold*2)
forward = sorted(all[:fold])
backward = sorted(all[fold:])
fixed = list(set(channels_range) - set(all))
# fold = c // fold_div
if inplace:
# Due to some out of order error when performing parallel computing.
# May need to write a CUDA kernel.
raise NotImplementedError
# out = InplaceShift.apply(x, fold)
else:
# out = torch.zeros_like(x)
# out[:, :-1, :fold] = x[:, 1:, :fold] # shift left
# out[:, 1:, fold: 2 * fold] = x[:, :-1, fold: 2 * fold] # shift right
# out[:, :, 2 * fold:] = x[:, :, 2 * fold:] # not shift
out = torch.zeros_like(x)
out[:, :-1, forward] = x[:, 1:, forward] # shift left
out[:, 1:, backward] = x[:, :-1, backward] # shift right
out[:, :, fixed] = x[:, :, fixed] # not shift
# return out.view(nt, c, h, w)
return out.permute(0, 2, 1)
class InplaceShift(torch.autograd.Function):
# Special thanks to @raoyongming for the help to this function
@staticmethod
def forward(ctx, input, fold):
# not support higher order gradient
# input = input.detach_()
ctx.fold_ = fold
n, t, c, h, w = input.size()
buffer = input.data.new(n, t, fold, h, w).zero_()
buffer[:, :-1] = input.data[:, 1:, :fold]
input.data[:, :, :fold] = buffer
buffer.zero_()
buffer[:, 1:] = input.data[:, :-1, fold: 2 * fold]
input.data[:, :, fold: 2 * fold] = buffer
return input
@staticmethod
def backward(ctx, grad_output):
# grad_output = grad_output.detach_()
fold = ctx.fold_
n, t, c, h, w = grad_output.size()
buffer = grad_output.data.new(n, t, fold, h, w).zero_()
buffer[:, 1:] = grad_output.data[:, :-1, :fold]
grad_output.data[:, :, :fold] = buffer
buffer.zero_()
buffer[:, :-1] = grad_output.data[:, 1:, fold: 2 * fold]
grad_output.data[:, :, fold: 2 * fold] = buffer
return grad_output, None
class BMN(nn.Module):
def __init__(self, opt):
super(BMN, self).__init__()
self.tscale = opt["temporal_scale"] # 100
self.prop_boundary_ratio = opt["prop_boundary_ratio"] # 0.5
self.num_sample = opt["num_sample"] # 32
self.num_sample_perbin = opt["num_sample_perbin"] # 3
self.feat_dim=opt["feat_dim"] # 400
self.tem_best_loss = 10000000
self.hidden_dim_1d = 256
self.hidden_dim_2d = 128
self.hidden_dim_3d = 512
self._get_interp1d_mask()
# Base Module
self.x_1d_b = nn.Sequential(
nn.Conv1d(self.feat_dim, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4),
nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4), # 256
nn.ReLU(inplace=True)
)
self.recons = nn.Sequential(
nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4),
nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, self.feat_dim, kernel_size=3, padding=1, groups=4), # 256
# nn.ReLU(inplace=True)
)
self.clip_order = nn.Sequential(
# nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4),
# nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=3, padding=1), # 256
nn.ReLU(inplace=True)
)
self.clip_order_drop = nn.Dropout(0.5)
self.clip_order_linear = nn.Linear(100, 2)
# Temporal Evaluation Module
self.x_1d_s = nn.Sequential(
nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4),
nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=1),
nn.Sigmoid()
)
self.x_1d_e = nn.Sequential(
nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4),
nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=1),
nn.Sigmoid()
)
# Proposal Evaluation Module
self.x_1d_p = nn.Sequential(
nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1),
nn.ReLU(inplace=True)
)
self.x_3d_p = nn.Sequential(
nn.Conv3d(self.hidden_dim_1d, self.hidden_dim_3d, kernel_size=(self.num_sample, 1, 1), stride=(self.num_sample, 1, 1)), # 512
nn.ReLU(inplace=True)
)
self.x_2d_p = nn.Sequential(
nn.Conv2d(self.hidden_dim_3d, self.hidden_dim_2d, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv2d(self.hidden_dim_2d, self.hidden_dim_2d, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(self.hidden_dim_2d, self.hidden_dim_2d, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(self.hidden_dim_2d, 2, kernel_size=1),
nn.Sigmoid()
)
def forward(self, x, recons=False, clip_order=False): # [B,400,100]
base_feature = self.x_1d_b(x) # [B,256,100]
recons_feature = self.recons(base_feature)
if recons:
return recons_feature
batch_size, C, T = base_feature.size()
if clip_order:
return self.clip_order_linear(self.clip_order_drop(self.clip_order(base_feature).view(batch_size, T)))
start = self.x_1d_s(base_feature).squeeze(1) # [B,1,100]->[B,100] sigmoid()
end = self.x_1d_e(base_feature).squeeze(1)
confidence_map = self.x_1d_p(base_feature) # [B,256,100]———>[B,256,100]+relu()
confidence_map = self._boundary_matching_layer(confidence_map) # [B, 256, 32, 100, 100]
# set_trace()
confidence_map = self.x_3d_p(confidence_map).squeeze(2)
confidence_map = self.x_2d_p(confidence_map) # [B, 2, 100, 100]
return confidence_map, start, end # [B, 2, 100, 100], [B,100],[B,100]
def _boundary_matching_layer(self, x):
input_size = x.size() # [B,256,100]
out = torch.matmul(x, self.sample_mask).reshape(input_size[0],input_size[1],self.num_sample,self.tscale,self.tscale)
return out # sample_mask= [100, 320000]
def _get_interp1d_bin_mask(self, seg_xmin, seg_xmax, tscale, num_sample, num_sample_perbin):
# generate sample mask for a boundary-matching pair
plen = float(seg_xmax - seg_xmin) # during
plen_sample = plen / (num_sample * num_sample_perbin - 1.0)
total_samples = [
seg_xmin + plen_sample * ii
for ii in range(num_sample * num_sample_perbin)
] # num_sample * num_sample_perbin
p_mask = []
for idx in range(num_sample): # 32
bin_samples = total_samples[idx * num_sample_perbin:(idx + 1) * num_sample_perbin]
bin_vector = np.zeros([tscale])
for sample in bin_samples:
sample_upper = math.ceil(sample)
sample_decimal, sample_down = math.modf(sample)
if int(sample_down) <= (tscale - 1) and int(sample_down) >= 0:
bin_vector[int(sample_down)] += 1 - sample_decimal # down
if int(sample_upper) <= (tscale - 1) and int(sample_upper) >= 0:
bin_vector[int(sample_upper)] += sample_decimal # upper
bin_vector = 1.0 / num_sample_perbin * bin_vector
p_mask.append(bin_vector)
p_mask = np.stack(p_mask, axis=1) # 100*32
return p_mask
def _get_interp1d_mask(self):
# generate sample mask for each point in Boundary-Matching Map
mask_mat = []
for start_index in range(self.tscale): # 100
mask_mat_vector = []
for duration_index in range(self.tscale): # 100
if start_index + duration_index < self.tscale: #
p_xmin = start_index # start
p_xmax = start_index + duration_index # end
center_len = float(p_xmax - p_xmin) + 1 # during
sample_xmin = p_xmin - center_len * self.prop_boundary_ratio # sample_start
sample_xmax = p_xmax + center_len * self.prop_boundary_ratio # sample_end
p_mask = self._get_interp1d_bin_mask(
sample_xmin, sample_xmax, self.tscale, self.num_sample, # 32
self.num_sample_perbin)
else:
p_mask = np.zeros([self.tscale, self.num_sample]) # [100,32]
mask_mat_vector.append(p_mask) #
mask_mat_vector = np.stack(mask_mat_vector, axis=2) # [100,32,100]
mask_mat.append(mask_mat_vector)
mask_mat = np.stack(mask_mat, axis=3) # [100,32,100,100]
mask_mat = mask_mat.astype(np.float32)
self.sample_mask = nn.Parameter(torch.Tensor(mask_mat).view(self.tscale, -1), requires_grad=False) # [100,32*100*100]
if __name__ == '__main__':
import opts
opt = opts.parse_opt()
opt = vars(opt)
model=BMN(opt).cuda()
input=torch.randn(2,400,100).cuda()
a,b,c=model(input)
print(a.shape,b.shape,c.shape)
| 13,366 | 43.115512 | 138 | py |
SSTAP | SSTAP-main/data/activitynet_feature_cuhk/ldb_process.py | # -*- coding: utf-8 -*-
"""
Created on Mon May 15 22:31:31 2017
@author: wzmsltw
"""
import caffe
import leveldb
import numpy as np
from caffe.proto import caffe_pb2
import pandas as pd
col_names=[]
for i in range(200):
col_names.append("f"+str(i))
df=pd.read_table("./input_spatial_list.txt",names=['image','frame','label'],sep=" ")
db = leveldb.LevelDB('./LDB')
datum = caffe_pb2.Datum()
i=0
video_name="init"
videoData=np.reshape([],[-1,200])
for key, value in db.RangeIter():
tmp_video_name=df.image.values[i].split('/')[-1]
if tmp_video_name !=video_name:
outDf=pd.DataFrame(videoData,columns=col_names)
outDf.to_csv("./csv_raw/"+video_name+".csv",index=False)
videoData=np.reshape([],[-1,200])
video_name=tmp_video_name
i+=1
datum.ParseFromString(value)
label = datum.label
data = caffe.io.datum_to_array(datum)
data=np.reshape(data,[1,200])
videoData=np.concatenate((videoData,data))
del db
| 983 | 21.883721 | 84 | py |
Graph-Unlearning | Graph-Unlearning-main/main.py | import logging
import torch
from exp.exp_graph_partition import ExpGraphPartition
from exp.exp_node_edge_unlearning import ExpNodeEdgeUnlearning
from exp.exp_unlearning import ExpUnlearning
from exp.exp_attack_unlearning import ExpAttackUnlearning
from parameter_parser import parameter_parser
def config_logger(save_name):
# create logger
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s:%(asctime)s: - %(name)s - : %(message)s')
# create console handler
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
def main(args, exp):
# config the logger
logger_name = "_".join((exp, args['dataset_name'], args['partition_method'], str(args['num_shards']), str(args['test_ratio'])))
config_logger(logger_name)
logging.info(logger_name)
torch.set_num_threads(args["num_threads"])
torch.cuda.set_device(args["cuda"])
os.environ["CUDA_VISIBLE_DEVICES"] = str(args["cuda"])
# subroutine entry for different methods
if exp == 'partition':
ExpGraphPartition(args)
elif exp == 'unlearning':
ExpUnlearning(args)
elif exp == 'node_edge_unlearning':
ExpNodeEdgeUnlearning(args)
elif exp == 'attack_unlearning':
ExpAttackUnlearning(args)
else:
raise Exception('unsupported attack')
if __name__ == "__main__":
args = parameter_parser()
main(args, args['exp'])
| 1,499 | 27.846154 | 131 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_node_embedding/ge/models/sdne.py | # -*- coding:utf-8 -*-
"""
Author:
Weichen Shen,[email protected]
Reference:
[1] Wang D, Cui P, Zhu W. Structural deep network embedding[C]//Proceedings of the 22nd ACM SIGKDD international conference on Knowledge discovery and data mining. ACM, 2016: 1225-1234.(https://www.kdd.org/kdd2016/papers/files/rfp0191-wangAemb.pdf)
"""
import time
import numpy as np
import scipy.sparse as sp
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.callbacks import History
from tensorflow.python.keras.layers import Dense, Input
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.regularizers import l1_l2
from ..utils import preprocess_nxgraph
def l_2nd(beta):
def loss_2nd(y_true, y_pred):
b_ = np.ones_like(y_true)
b_[y_true != 0] = beta
x = K.square((y_true - y_pred) * b_)
t = K.sum(x, axis=-1, )
return K.mean(t)
return loss_2nd
def l_1st(alpha):
def loss_1st(y_true, y_pred):
L = y_true
Y = y_pred
batch_size = tf.to_float(K.shape(L)[0])
return alpha * 2 * tf.linalg.trace(tf.matmul(tf.matmul(Y, L, transpose_a=True), Y)) / batch_size
return loss_1st
def create_model(node_size, hidden_size=[256, 128], l1=1e-5, l2=1e-4):
A = Input(shape=(node_size,))
L = Input(shape=(None,))
fc = A
for i in range(len(hidden_size)):
if i == len(hidden_size) - 1:
fc = Dense(hidden_size[i], activation='relu',
kernel_regularizer=l1_l2(l1, l2), name='1st')(fc)
else:
fc = Dense(hidden_size[i], activation='relu',
kernel_regularizer=l1_l2(l1, l2))(fc)
Y = fc
for i in reversed(range(len(hidden_size) - 1)):
fc = Dense(hidden_size[i], activation='relu',
kernel_regularizer=l1_l2(l1, l2))(fc)
A_ = Dense(node_size, 'relu', name='2nd')(fc)
model = Model(inputs=[A, L], outputs=[A_, Y])
emb = Model(inputs=A, outputs=Y)
return model, emb
class SDNE(object):
def __init__(self, graph, hidden_size=[32, 16], alpha=1e-6, beta=5., nu1=1e-5, nu2=1e-4, ):
self.graph = graph
# self.g.remove_edges_from(self.g.selfloop_edges())
self.idx2node, self.node2idx = preprocess_nxgraph(self.graph)
self.node_size = self.graph.number_of_nodes()
self.hidden_size = hidden_size
self.alpha = alpha
self.beta = beta
self.nu1 = nu1
self.nu2 = nu2
self.A, self.L = self._create_A_L(
self.graph, self.node2idx) # Adj Matrix,L Matrix
self.reset_model()
self.inputs = [self.A, self.L]
self._embeddings = {}
def reset_model(self, opt='adam'):
self.model, self.emb_model = create_model(self.node_size, hidden_size=self.hidden_size, l1=self.nu1,
l2=self.nu2)
self.model.compile(opt, [l_2nd(self.beta), l_1st(self.alpha)])
self.get_embeddings()
def train(self, batch_size=1024, epochs=1, initial_epoch=0, verbose=1):
if batch_size >= self.node_size:
if batch_size > self.node_size:
print('batch_size({0}) > node_size({1}),set batch_size = {1}'.format(
batch_size, self.node_size))
batch_size = self.node_size
return self.model.fit([self.A.todense(), self.L.todense()], [self.A.todense(), self.L.todense()],
batch_size=batch_size, epochs=epochs, initial_epoch=initial_epoch, verbose=verbose,
shuffle=False, )
else:
steps_per_epoch = (self.node_size - 1) // batch_size + 1
hist = History()
hist.on_train_begin()
logs = {}
for epoch in range(initial_epoch, epochs):
start_time = time.time()
losses = np.zeros(3)
for i in range(steps_per_epoch):
index = np.arange(
i * batch_size, min((i + 1) * batch_size, self.node_size))
A_train = self.A[index, :].todense()
L_mat_train = self.L[index][:, index].todense()
inp = [A_train, L_mat_train]
batch_losses = self.model.train_on_batch(inp, inp)
losses += batch_losses
losses = losses / steps_per_epoch
logs['loss'] = losses[0]
logs['2nd_loss'] = losses[1]
logs['1st_loss'] = losses[2]
epoch_time = int(time.time() - start_time)
hist.on_epoch_end(epoch, logs)
if verbose > 0:
print('Epoch {0}/{1}'.format(epoch + 1, epochs))
print('{0}s - loss: {1: .4f} - 2nd_loss: {2: .4f} - 1st_loss: {3: .4f}'.format(
epoch_time, losses[0], losses[1], losses[2]))
return hist
def evaluate(self, ):
return self.model.evaluate(x=self.inputs, y=self.inputs, batch_size=self.node_size)
def get_embeddings(self):
self._embeddings = {}
embeddings = self.emb_model.predict(self.A.todense(), batch_size=self.node_size)
look_back = self.idx2node
for i, embedding in enumerate(embeddings):
self._embeddings[look_back[i]] = embedding
return self._embeddings
def _create_A_L(self, graph, node2idx):
node_size = graph.number_of_nodes()
A_data = []
A_row_index = []
A_col_index = []
for edge in graph.edges():
v1, v2 = edge
edge_weight = graph[v1][v2].get('weight', 1)
A_data.append(edge_weight)
A_row_index.append(node2idx[v1])
A_col_index.append(node2idx[v2])
A = sp.csr_matrix((A_data, (A_row_index, A_col_index)), shape=(node_size, node_size))
A_ = sp.csr_matrix((A_data + A_data, (A_row_index + A_col_index, A_col_index + A_row_index)),
shape=(node_size, node_size))
D = sp.diags(A_.sum(axis=1).flatten().tolist()[0])
L = D - A_
return A, L
| 6,214 | 34.514286 | 252 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_node_embedding/ge/models/line.py | # -*- coding:utf-8 -*-
"""
Author:
Weichen Shen,[email protected]
Reference:
[1] Tang J, Qu M, Wang M, et al. Line: Large-scale information network embedding[C]//Proceedings of the 24th International Conference on World Wide Web. International World Wide Web Conferences Steering Committee, 2015: 1067-1077.(https://arxiv.org/pdf/1503.03578.pdf)
"""
import math
import random
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.layers import Embedding, Input, Lambda
from tensorflow.python.keras.models import Model
from ..alias import create_alias_table, alias_sample
from ..utils import preprocess_nxgraph
def line_loss(y_true, y_pred):
return -K.mean(K.log(K.sigmoid(y_true*y_pred)))
def create_model(numNodes, embedding_size, order='second'):
v_i = Input(shape=(1,))
v_j = Input(shape=(1,))
first_emb = Embedding(numNodes, embedding_size, name='first_emb')
second_emb = Embedding(numNodes, embedding_size, name='second_emb')
context_emb = Embedding(numNodes, embedding_size, name='context_emb')
v_i_emb = first_emb(v_i)
v_j_emb = first_emb(v_j)
v_i_emb_second = second_emb(v_i)
v_j_context_emb = context_emb(v_j)
first = Lambda(lambda x: tf.reduce_sum(
x[0]*x[1], axis=-1, keep_dims=False), name='first_order')([v_i_emb, v_j_emb])
second = Lambda(lambda x: tf.reduce_sum(
x[0]*x[1], axis=-1, keep_dims=False), name='second_order')([v_i_emb_second, v_j_context_emb])
if order == 'first':
output_list = [first]
elif order == 'second':
output_list = [second]
else:
output_list = [first, second]
model = Model(inputs=[v_i, v_j], outputs=output_list)
return model, {'first': first_emb, 'second': second_emb}
class LINE:
def __init__(self, graph, embedding_size=8, negative_ratio=5, order='second',):
"""
:param graph:
:param embedding_size:
:param negative_ratio:
:param order: 'first','second','all'
"""
if order not in ['first', 'second', 'all']:
raise ValueError('mode must be fisrt,second,or all')
self.graph = graph
self.idx2node, self.node2idx = preprocess_nxgraph(graph)
self.use_alias = True
self.rep_size = embedding_size
self.order = order
self._embeddings = {}
self.negative_ratio = negative_ratio
self.order = order
self.node_size = graph.number_of_nodes()
self.edge_size = graph.number_of_edges()
self.samples_per_epoch = self.edge_size*(1+negative_ratio)
self._gen_sampling_table()
self.reset_model()
def reset_training_config(self, batch_size, times):
self.batch_size = batch_size
self.steps_per_epoch = (
(self.samples_per_epoch - 1) // self.batch_size + 1)*times
def reset_model(self, opt='adam'):
self.model, self.embedding_dict = create_model(
self.node_size, self.rep_size, self.order)
self.model.compile(opt, line_loss)
self.batch_it = self.batch_iter(self.node2idx)
def _gen_sampling_table(self):
# create sampling table for vertex
power = 0.75
numNodes = self.node_size
node_degree = np.zeros(numNodes) # out degree
node2idx = self.node2idx
for edge in self.graph.edges():
node_degree[node2idx[edge[0]]
] += self.graph[edge[0]][edge[1]].get('weight', 1.0)
total_sum = sum([math.pow(node_degree[i], power)
for i in range(numNodes)])
norm_prob = [float(math.pow(node_degree[j], power)) /
total_sum for j in range(numNodes)]
self.node_accept, self.node_alias = create_alias_table(norm_prob)
# create sampling table for edge
numEdges = self.graph.number_of_edges()
total_sum = sum([self.graph[edge[0]][edge[1]].get('weight', 1.0)
for edge in self.graph.edges()])
norm_prob = [self.graph[edge[0]][edge[1]].get('weight', 1.0) *
numEdges / total_sum for edge in self.graph.edges()]
self.edge_accept, self.edge_alias = create_alias_table(norm_prob)
def batch_iter(self, node2idx):
edges = [(node2idx[x[0]], node2idx[x[1]]) for x in self.graph.edges()]
data_size = self.graph.number_of_edges()
shuffle_indices = np.random.permutation(np.arange(data_size))
# positive or negative mod
mod = 0
mod_size = 1 + self.negative_ratio
h = []
t = []
sign = 0
count = 0
start_index = 0
end_index = min(start_index + self.batch_size, data_size)
while True:
if mod == 0:
h = []
t = []
for i in range(start_index, end_index):
if random.random() >= self.edge_accept[shuffle_indices[i]]:
shuffle_indices[i] = self.edge_alias[shuffle_indices[i]]
cur_h = edges[shuffle_indices[i]][0]
cur_t = edges[shuffle_indices[i]][1]
h.append(cur_h)
t.append(cur_t)
sign = np.ones(len(h))
else:
sign = np.ones(len(h))*-1
t = []
for i in range(len(h)):
t.append(alias_sample(
self.node_accept, self.node_alias))
if self.order == 'all':
yield ([np.array(h), np.array(t)], [sign, sign])
else:
yield ([np.array(h), np.array(t)], [sign])
mod += 1
mod %= mod_size
if mod == 0:
start_index = end_index
end_index = min(start_index + self.batch_size, data_size)
if start_index >= data_size:
count += 1
mod = 0
h = []
shuffle_indices = np.random.permutation(np.arange(data_size))
start_index = 0
end_index = min(start_index + self.batch_size, data_size)
def get_embeddings(self,):
self._embeddings = {}
if self.order == 'first':
embeddings = self.embedding_dict['first'].get_weights()[0]
elif self.order == 'second':
embeddings = self.embedding_dict['second'].get_weights()[0]
else:
embeddings = np.hstack((self.embedding_dict['first'].get_weights()[
0], self.embedding_dict['second'].get_weights()[0]))
idx2node = self.idx2node
for i, embedding in enumerate(embeddings):
self._embeddings[idx2node[i]] = embedding
return self._embeddings
def train(self, batch_size=1024, epochs=1, initial_epoch=0, verbose=1, times=1):
self.reset_training_config(batch_size, times)
hist = self.model.fit_generator(self.batch_it, epochs=epochs, initial_epoch=initial_epoch, steps_per_epoch=self.steps_per_epoch,
verbose=verbose)
return hist
| 7,184 | 32.574766 | 272 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_utils/utils.py | import os
import errno
import numpy as np
import pandas as pd
import networkx as nx
import torch
from scipy.sparse import coo_matrix
from tqdm import tqdm
def graph_reader(path):
"""
Function to read the graph from the path.
:param path: Path to the edge list.
:return graph: NetworkX object returned.
"""
graph = nx.from_edgelist(pd.read_csv(path).values.tolist())
return graph
def feature_reader(path):
"""
Reading the sparse feature matrix stored as csv from the disk.
:param path: Path to the csv file.
:return features: Dense matrix of features.
"""
features = pd.read_csv(path)
node_index = features["node_id"].values.tolist()
feature_index = features["feature_id"].values.tolist()
feature_values = features["value"].values.tolist()
node_count = max(node_index) + 1
feature_count = max(feature_index) + 1
features = coo_matrix((feature_values, (node_index, feature_index)), shape=(node_count, feature_count)).toarray()
return features
def target_reader(path):
"""
Reading the target vector from disk.
:param path: Path to the target.
:return target: Target vector.
"""
target = np.array(pd.read_csv(path)["target"]).reshape(-1, 1)
return target
def make_adjacency(graph, max_degree, sel=None):
all_nodes = np.array(graph.nodes())
# Initialize w/ links to a dummy node
n_nodes = len(all_nodes)
adj = (np.zeros((n_nodes + 1, max_degree)) + n_nodes).astype(int)
if sel is not None:
# only look at nodes in training set
all_nodes = all_nodes[sel]
for node in tqdm(all_nodes):
neibs = np.array(list(graph.neighbors(node)))
if sel is not None:
neibs = neibs[sel[neibs]]
if len(neibs) > 0:
if len(neibs) > max_degree:
neibs = np.random.choice(neibs, max_degree, replace=False)
elif len(neibs) < max_degree:
extra = np.random.choice(neibs, max_degree - neibs.shape[0], replace=True)
neibs = np.concatenate([neibs, extra])
adj[node, :] = neibs
return adj
def connected_component_subgraphs(graph):
"""
Find all connected subgraphs in a networkx Graph
Args:
graph (Graph): A networkx Graph
Yields:
generator: A subgraph generator
"""
for c in nx.connected_components(graph):
yield graph.subgraph(c)
def check_exist(file_name):
if not os.path.exists(os.path.dirname(file_name)):
try:
os.makedirs(os.path.dirname(file_name))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
def filter_edge_index(edge_index, node_indices, reindex=True):
assert np.all(np.diff(node_indices) >= 0), 'node_indices must be sorted'
if isinstance(edge_index, torch.Tensor):
edge_index = edge_index.cpu()
node_index = np.isin(edge_index, node_indices)
col_index = np.nonzero(np.logical_and(node_index[0], node_index[1]))[0]
edge_index = edge_index[:, col_index]
if reindex:
return np.searchsorted(node_indices, edge_index)
else:
return edge_index
def pyg_to_nx(data):
"""
Convert a torch geometric Data to networkx Graph.
Args:
data (Data): A torch geometric Data.
Returns:
Graph: A networkx Graph.
"""
graph = nx.Graph()
graph.add_nodes_from(np.arange(data.num_nodes))
edge_index = data.edge_index.numpy()
for u, v in np.transpose(edge_index):
graph.add_edge(u, v)
return graph
def edge_index_to_nx(edge_index, num_nodes):
"""
Convert a torch geometric Data to networkx Graph by edge_index.
Args:
edge_index (Data.edge_index): A torch geometric Data.
num_nodes (int): Number of nodes in a graph.
Returns:
Graph: networkx Graph
"""
graph = nx.Graph()
graph.add_nodes_from(np.arange(num_nodes))
edge_index = edge_index.numpy()
for u, v in np.transpose(edge_index):
graph.add_edge(u, v)
return graph
def filter_edge_index_1(data, node_indices):
"""
Remove unnecessary edges from a torch geometric Data, only keep the edges between node_indices.
Args:
data (Data): A torch geometric Data.
node_indices (list): A list of nodes to be deleted from data.
Returns:
data.edge_index: The new edge_index after removing the node_indices.
"""
if isinstance(data.edge_index, torch.Tensor):
data.edge_index = data.edge_index.cpu()
edge_index = data.edge_index
node_index = np.isin(edge_index, node_indices)
col_index = np.nonzero(np.logical_and(node_index[0], node_index[1]))[0]
edge_index = data.edge_index[:, col_index]
return np.searchsorted(node_indices, edge_index)
| 4,851 | 27.046243 | 117 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_aggregator/opt_dataset.py | from torch.utils.data import Dataset
class OptDataset(Dataset):
def __init__(self, posteriors, labels):
self.posteriors = posteriors
self.labels = labels
def __getitem__(self, index):
ret_posterior = {}
for shard, post in self.posteriors.items():
ret_posterior[shard] = post[index]
return ret_posterior, self.labels[index]
def __len__(self):
return self.labels.shape[0]
| 448 | 22.631579 | 51 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_aggregator/optimal_aggregator.py | import copy
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.optim.lr_scheduler import MultiStepLR
from torch.utils.data import DataLoader
from torch_geometric.data import Data
from lib_aggregator.opt_dataset import OptDataset
from lib_dataset.data_store import DataStore
from lib_utils import utils
class OptimalAggregator:
def __init__(self, run, target_model, data, args):
self.logger = logging.getLogger('optimal_aggregator')
self.args = args
self.run = run
self.target_model = target_model
self.data = data
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.num_shards = args['num_shards']
def generate_train_data(self):
data_store = DataStore(self.args)
train_indices, _ = data_store.load_train_test_split()
# sample a set of nodes from train_indices
if self.args["num_opt_samples"] == 1000:
train_indices = np.random.choice(train_indices, size=1000, replace=False)
elif self.args["num_opt_samples"] == 10000:
train_indices = np.random.choice(train_indices, size=int(train_indices.shape[0] * 0.1), replace=False)
elif self.args["num_opt_samples"] == 1:
train_indices = np.random.choice(train_indices, size=int(train_indices.shape[0]), replace=False)
train_indices = np.sort(train_indices)
self.logger.info("Using %s samples for optimization" % (int(train_indices.shape[0])))
x = self.data.x[train_indices]
y = self.data.y[train_indices]
edge_index = utils.filter_edge_index(self.data.edge_index, train_indices)
train_data = Data(x=x, edge_index=torch.from_numpy(edge_index), y=y)
train_data.train_mask = torch.zeros(train_indices.shape[0], dtype=torch.bool)
train_data.test_mask = torch.ones(train_indices.shape[0], dtype=torch.bool)
self.true_labels = y
self.posteriors = {}
for shard in range(self.num_shards):
self.target_model.data = train_data
data_store.load_target_model(self.run, self.target_model, shard)
self.posteriors[shard] = self.target_model.posterior().to(self.device)
def optimization(self):
weight_para = nn.Parameter(torch.full((self.num_shards,), fill_value=1.0 / self.num_shards), requires_grad=True)
optimizer = optim.Adam([weight_para], lr=self.args['opt_lr'])
scheduler = MultiStepLR(optimizer, milestones=[500, 1000], gamma=self.args['opt_lr'])
train_dset = OptDataset(self.posteriors, self.true_labels)
train_loader = DataLoader(train_dset, batch_size=32, shuffle=True, num_workers=0)
min_loss = 1000.0
for epoch in range(self.args['opt_num_epochs']):
loss_all = 0.0
for posteriors, labels in train_loader:
labels = labels.to(self.device)
optimizer.zero_grad()
loss = self._loss_fn(posteriors, labels, weight_para)
loss.backward()
loss_all += loss
optimizer.step()
with torch.no_grad():
weight_para[:] = torch.clamp(weight_para, min=0.0)
scheduler.step()
if loss_all < min_loss:
ret_weight_para = copy.deepcopy(weight_para)
min_loss = loss_all
self.logger.info('epoch: %s, loss: %s' % (epoch, loss_all))
return ret_weight_para / torch.sum(ret_weight_para)
def _loss_fn(self, posteriors, labels, weight_para):
aggregate_posteriors = torch.zeros_like(posteriors[0])
for shard in range(self.num_shards):
aggregate_posteriors += weight_para[shard] * posteriors[shard]
aggregate_posteriors = F.softmax(aggregate_posteriors, dim=1)
loss_1 = F.cross_entropy(aggregate_posteriors, labels)
loss_2 = torch.sqrt(torch.sum(weight_para ** 2))
return loss_1 + loss_2
| 4,054 | 37.990385 | 120 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_aggregator/aggregator.py | import logging
import torch
torch.cuda.empty_cache()
from sklearn.metrics import f1_score
import numpy as np
from lib_aggregator.optimal_aggregator import OptimalAggregator
from lib_dataset.data_store import DataStore
class Aggregator:
def __init__(self, run, target_model, data, shard_data, args):
self.logger = logging.getLogger('Aggregator')
self.args = args
self.data_store = DataStore(self.args)
self.run = run
self.target_model = target_model
self.data = data
self.shard_data = shard_data
self.num_shards = args['num_shards']
def generate_posterior(self, suffix=""):
self.true_label = self.shard_data[0].y[self.shard_data[0]['test_mask']].detach().cpu().numpy()
self.posteriors = {}
for shard in range(self.args['num_shards']):
self.target_model.data = self.shard_data[shard]
self.data_store.load_target_model(self.run, self.target_model, shard, suffix)
self.posteriors[shard] = self.target_model.posterior()
self.logger.info("Saving posteriors.")
self.data_store.save_posteriors(self.posteriors, self.run, suffix)
def aggregate(self):
if self.args['aggregator'] == 'mean':
aggregate_f1_score = self._mean_aggregator()
elif self.args['aggregator'] == 'optimal':
aggregate_f1_score = self._optimal_aggregator()
elif self.args['aggregator'] == 'majority':
aggregate_f1_score = self._majority_aggregator()
else:
raise Exception("unsupported aggregator.")
return aggregate_f1_score
def _mean_aggregator(self):
posterior = self.posteriors[0]
for shard in range(1, self.num_shards):
posterior += self.posteriors[shard]
posterior = posterior / self.num_shards
return f1_score(self.true_label, posterior.argmax(axis=1).cpu().numpy(), average="micro")
def _majority_aggregator(self):
pred_labels = []
for shard in range(self.num_shards):
pred_labels.append(self.posteriors[shard].argmax(axis=1).cpu().numpy())
pred_labels = np.stack(pred_labels)
pred_label = np.argmax(
np.apply_along_axis(np.bincount, axis=0, arr=pred_labels, minlength=self.posteriors[0].shape[1]), axis=0)
return f1_score(self.true_label, pred_label, average="micro")
def _optimal_aggregator(self):
optimal = OptimalAggregator(self.run, self.target_model, self.data, self.args)
optimal.generate_train_data()
weight_para = optimal.optimization()
self.data_store.save_optimal_weight(weight_para, run=self.run)
posterior = self.posteriors[0] * weight_para[0]
for shard in range(1, self.num_shards):
posterior += self.posteriors[shard] * weight_para[shard]
return f1_score(self.true_label, posterior.argmax(axis=1).cpu().numpy(), average="micro")
| 2,958 | 35.9875 | 117 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/metis_partition.py | import numpy as np
import networkx as nx
import pymetis
from torch_geometric.data import ClusterData
from torch_geometric.utils import from_networkx
from lib_graph_partition.partition import Partition
class MetisPartition(Partition):
def __init__(self, args, graph, dataset):
super(MetisPartition, self).__init__(args, graph, dataset)
self.graph = graph
self.args = args
self.data = dataset
def partition(self, recursive=False):
# recursive (bool, optional): If set to :obj:`True`, will use multilevel
# recursive bisection instead of multilevel k-way partitioning.
# (default: :obj:`False`)
# only use train data, not the whole dataset
self.train_data = from_networkx(self.graph)
data = ClusterData(self.train_data, self.args['num_shards'], recursive=recursive)
community_to_node = {}
for i in range(self.args['num_shards']):
community_to_node[i] = [*range(data.partptr[i], data.partptr[i+1], 1)]
# map node back to original graph
for com in range(self.args['num_shards']):
community_to_node[com] = np.array(list(self.graph.nodes))[data.partptr.numpy()[com]:data.partptr.numpy()[com+1]]
return community_to_node
class PyMetisPartition(Partition):
def __init__(self, args, graph, dataset):
super(PyMetisPartition, self).__init__(args, graph, dataset)
self.graph = graph
self.args = args
self.data = dataset
def partition(self, recursive=False):
# recursive (bool, optional): If set to :obj:`True`, will use multilevel
# recursive bisection instead of multilevel k-way partitioning.
# (default: :obj:`False`)
# only use train data, not the whole dataset
# map graph into new graph
mapping = {}
for i, node in enumerate(self.graph.nodes):
mapping[node] = i
partition_graph = nx.relabel_nodes(self.graph, mapping=mapping)
adj_list = []
for line in nx.generate_adjlist(partition_graph):
line_int = list(map(int, line.split()))
adj_list.append(np.array(line_int))
n_cuts, membership = pymetis.part_graph(self.args['num_shards'], adjacency=adj_list)
# map node back to original graph
community_to_node = {}
for shard_index in range(self.args['num_shards']):
community_to_node[shard_index] = np.array([node_id for node_id, node_shard_index in zip(list(mapping.keys()), membership) if node_shard_index == shard_index])
return community_to_node
| 2,609 | 38.545455 | 170 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gnn_base.py | import logging
import pickle
import torch
class GNNBase:
def __init__(self):
self.logger = logging.getLogger('gnn')
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# self.device = torch.device('cpu')
self.model = None
self.embedding_dim = 0
self.data = None
self.subgraph_loader = None
def save_model(self, save_path):
self.logger.info('saving model')
torch.save(self.model.state_dict(), save_path)
def load_model(self, save_path):
self.logger.info('loading model')
device = torch.device('cpu')
self.model.load_state_dict(torch.load(save_path, map_location=device))
def save_paras(self, save_path):
self.logger.info('saving paras')
self.paras = {
'embedding_dim': self.embedding_dim
}
pickle.dump(self.paras, open(save_path, 'wb'))
def load_paras(self, save_path):
self.logger.info('loading paras')
return pickle.load(open(save_path, 'rb'))
def count_parameters(self):
return sum(p.numel() for p in self.model.parameters() if p.requires_grad)
def posterior(self):
self.model.eval()
self.model = self.model.to(self.device)
self.data = self.data.to(self.device)
posteriors = self.model(self.data)
for _, mask in self.data('test_mask'):
posteriors = posteriors[mask]
return posteriors.detach()
| 1,482 | 28.078431 | 82 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/node_classifier.py | import logging
import os
import torch
from sklearn.model_selection import train_test_split
torch.cuda.empty_cache()
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid
from torch_geometric.data import NeighborSampler
from torch_geometric.nn.conv.gcn_conv import gcn_norm
import numpy as np
import config
from lib_gnn_model.gat.gat_net_batch import GATNet
from lib_gnn_model.gin.gin_net_batch import GINNet
from lib_gnn_model.gcn.gcn_net_batch import GCNNet
from lib_gnn_model.graphsage.graphsage_net import SageNet
from lib_gnn_model.gnn_base import GNNBase
from parameter_parser import parameter_parser
from lib_utils import utils
class NodeClassifier(GNNBase):
def __init__(self, num_feats, num_classes, args, data=None):
super(NodeClassifier, self).__init__()
self.args = args
self.logger = logging.getLogger('node_classifier')
self.target_model = args['target_model']
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# self.device = 'cpu'
self.model = self.determine_model(num_feats, num_classes).to(self.device)
self.data = data
def determine_model(self, num_feats, num_classes):
self.logger.info('target model: %s' % (self.args['target_model'],))
if self.target_model == 'SAGE':
self.lr, self.decay = 0.01, 0.001
return SageNet(num_feats, 256, num_classes)
elif self.target_model == 'GAT':
self.lr, self.decay = 0.01, 0.001
return GATNet(num_feats, num_classes)
elif self.target_model == 'GCN':
self.lr, self.decay = 0.05, 0.0001
return GCNNet(num_feats, num_classes)
elif self.target_model == 'GIN':
self.lr, self.decay = 0.01, 0.0001
return GINNet(num_feats, num_classes)
else:
raise Exception('unsupported target model')
def train_model(self):
self.logger.info("training model")
self.model.train()
self.model.reset_parameters()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
self.data.y = self.data.y.squeeze().to(self.device)
self._gen_train_loader()
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.decay)
for epoch in range(self.args['num_epochs']):
self.logger.info('epoch %s' % (epoch,))
for batch_size, n_id, adjs in self.train_loader:
# self.logger.info("batch size: %s"%(batch_size))
# `adjs` holds a list of `(edge_index, e_id, size)` tuples.
adjs = [adj.to(self.device) for adj in adjs]
test_node = np.nonzero(self.data.test_mask.cpu().numpy())[0]
intersect = np.intersect1d(test_node, n_id.numpy())
optimizer.zero_grad()
if self.target_model == 'GCN':
out = self.model(self.data.x[n_id], adjs, self.edge_weight)
else:
out = self.model(self.data.x[n_id], adjs)
loss = F.nll_loss(out, self.data.y[n_id[:batch_size]])
loss.backward()
optimizer.step()
train_acc, test_acc = self.evaluate_model()
self.logger.info(f'Train: {train_acc:.4f}, Test: {test_acc:.4f}')
@torch.no_grad()
def evaluate_model(self):
self.model.eval()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
self._gen_test_loader()
if self.target_model == 'GCN':
out = self.model.inference(self.data.x, self.test_loader, self.edge_weight, self.device)
else:
out = self.model.inference(self.data.x, self.test_loader, self.device)
y_true = self.data.y.cpu().unsqueeze(-1)
y_pred = out.argmax(dim=-1, keepdim=True)
results = []
for mask in [self.data.train_mask, self.data.test_mask]:
results += [int(y_pred[mask].eq(y_true[mask]).sum()) / int(mask.sum())]
return results
def posterior(self):
self.logger.debug("generating posteriors")
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
self.model.eval()
self._gen_test_loader()
if self.target_model == 'GCN':
posteriors = self.model.inference(self.data.x, self.test_loader, self.edge_weight, self.device)
else:
posteriors = self.model.inference(self.data.x, self.test_loader, self.device)
for _, mask in self.data('test_mask'):
posteriors = F.log_softmax(posteriors[mask], dim=-1)
return posteriors.detach()
def generate_embeddings(self):
self.model.eval()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
self._gen_test_loader()
if self.target_model == 'GCN':
logits = self.model.inference(self.data.x, self.test_loader, self.edge_weight, self.device)
else:
logits = self.model.inference(self.data.x, self.test_loader, self.device)
return logits
def _gen_train_loader(self):
self.logger.info("generate train loader")
train_indices = np.nonzero(self.data.train_mask.cpu().numpy())[0]
edge_index = utils.filter_edge_index(self.data.edge_index, train_indices, reindex=False)
if edge_index.shape[1] == 0:
edge_index = torch.tensor([[1, 2], [2, 1]])
self.train_loader = NeighborSampler(
edge_index, node_idx=self.data.train_mask,
sizes=[5, 5], num_nodes=self.data.num_nodes,
batch_size=self.args['batch_size'], shuffle=True,
num_workers=0)
if self.target_model == 'GCN':
_, self.edge_weight = gcn_norm(self.data.edge_index, edge_weight=None, num_nodes=self.data.x.shape[0],
add_self_loops=False)
self.logger.info("generate train loader finish")
def _gen_test_loader(self):
test_indices = np.nonzero(self.data.train_mask.cpu().numpy())[0]
if not self.args['use_test_neighbors']:
edge_index = utils.filter_edge_index(self.data.edge_index, test_indices, reindex=False)
else:
edge_index = self.data.edge_index
if edge_index.shape[1] == 0:
edge_index = torch.tensor([[1, 3], [3, 1]])
self.test_loader = NeighborSampler(
edge_index, node_idx=None,
sizes=[-1], num_nodes=self.data.num_nodes,
# sizes=[5], num_nodes=self.data.num_nodes,
batch_size=self.args['test_batch_size'], shuffle=False,
num_workers=0)
if self.target_model == 'GCN':
_, self.edge_weight = gcn_norm(self.data.edge_index, edge_weight=None, num_nodes=self.data.x.shape[0],
add_self_loops=False)
if __name__ == '__main__':
os.chdir('../')
args = parameter_parser()
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
dataset_name = 'cora'
dataset = Planetoid(config.RAW_DATA_PATH, dataset_name, transform=T.NormalizeFeatures())
data = dataset[0]
train_indices, test_indices = train_test_split(np.arange((data.num_nodes)), test_size=0.2, random_state=100)
data.train_mask, data.test_mask = torch.zeros(data.num_nodes, dtype=torch.bool), torch.zeros(data.num_nodes,
dtype=torch.bool)
data.train_mask[train_indices] = True
data.test_mask[test_indices] = True
graphsage = NodeClassifier(dataset.num_features, dataset.num_classes, args, data)
graphsage.train_model()
| 7,966 | 38.636816 | 114 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gin/gin.py | import os
import logging
import torch
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid, Reddit
from lib_gnn_model.gnn_base import GNNBase
from lib_gnn_model.gin.gin_net import GINNet
import config
class GIN(GNNBase):
def __init__(self, num_feats, num_classes, data=None):
super(GIN, self).__init__()
self.logger = logging.getLogger('gin')
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model = GINNet(num_feats, num_classes).to(self.device)
self.data = data
def train_model(self, num_epochs=100):
self.model.train()
self.model.reset_parameters()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
optimizer = torch.optim.Adam(self.model.parameters(), lr=0.01)
for epoch in range(num_epochs):
self.logger.info('epoch %s' % (epoch,))
optimizer.zero_grad()
output = self.model(self.data)[self.data.train_mask]
loss = F.nll_loss(output, self.data.y[self.data.train_mask])
# loss = F.nll_loss(output, self.data.y.squeeze(1)[self.data.train_mask])
loss.backward()
optimizer.step()
train_acc, test_acc = self.evaluate_model()
self.logger.info('train acc: %s, test acc: %s' % (train_acc, test_acc))
def evaluate_model(self):
self.model.eval()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
logits, accs = self.model(self.data), []
for _, mask in self.data('train_mask', 'test_mask'):
pred = logits[mask].max(1)[1]
acc = pred.eq(self.data.y[mask]).sum().item() / mask.sum().item()
accs.append(acc)
return accs
if __name__ == '__main__':
os.chdir('../../')
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
dataset_name = 'citeseer'
dataset = Planetoid(config.RAW_DATA_PATH, dataset_name, transform=T.NormalizeFeatures())
data = dataset[0]
gin = GIN(dataset.num_features, dataset.num_classes, data)
gin.train_model()
| 2,338 | 31.943662 | 92 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gin/gin_net.py | import torch
import torch.nn.functional as F
from torch.nn import Sequential, Linear, ReLU
from torch_geometric.nn import GINConv
class GINNet(torch.nn.Module):
def __init__(self, num_feats, num_classes):
super(GINNet, self).__init__()
dim = 32
nn1 = Sequential(Linear(num_feats, dim), ReLU(), Linear(dim, dim))
self.conv1 = GINConv(nn1)
self.bn1 = torch.nn.BatchNorm1d(dim)
nn2 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
self.conv2 = GINConv(nn2)
self.bn2 = torch.nn.BatchNorm1d(dim)
nn3 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
self.conv3 = GINConv(nn3)
self.bn3 = torch.nn.BatchNorm1d(dim)
nn4 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
self.conv4 = GINConv(nn4)
self.bn4 = torch.nn.BatchNorm1d(dim)
nn5 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
self.conv5 = GINConv(nn5)
self.bn5 = torch.nn.BatchNorm1d(dim)
self.fc1 = Linear(dim, dim)
self.fc2 = Linear(dim, num_classes)
def forward(self, data, batch=None):
x = F.relu(self.conv1(data.x, data.edge_index))
x = self.bn1(x)
x = F.relu(self.conv2(x, data.edge_index))
x = self.bn2(x)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def reset_parameters(self):
self.conv1.reset_parameters()
self.conv2.reset_parameters()
| 1,558 | 30.18 | 74 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gat/gat_net.py | import torch
import torch.nn.functional as F
from torch_geometric.nn import GATConv
class GATNet(torch.nn.Module):
def __init__(self, num_feats, num_classes, dropout=0.6):
super(GATNet, self).__init__()
self.dropout = dropout
self.conv1 = GATConv(num_feats, 8, heads=8, dropout=self.dropout, add_self_loops=False)
# On the Pubmed dataset, use heads=8 in conv2.
self.conv2 = GATConv(8 * 8, num_classes, heads=1, concat=False, dropout=self.dropout, add_self_loops=False)
# self.conv2 = GATConv(8 * 8, num_classes, heads=8, concat=False, dropout=self.dropout, add_self_loops=False)
self.reset_parameters()
def forward(self, data):
x = F.dropout(data.x, p=self.dropout, training=self.training)
x = F.elu(self.conv1(x, data.edge_index))
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.conv2(x, data.edge_index)
return F.log_softmax(x, dim=1)
def reset_parameters(self):
self.conv1.reset_parameters()
self.conv2.reset_parameters()
| 1,074 | 36.068966 | 117 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gat/gat.py | import logging
import os
import torch
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid
import config
from lib_gnn_model.gnn_base import GNNBase
from lib_gnn_model.gat.gat_net import GATNet
class GAT(GNNBase):
def __init__(self, num_feats, num_classes, data=None):
super(GAT, self).__init__()
self.logger = logging.getLogger('gat')
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model = GATNet(num_feats, num_classes)
self.data = data
def train_model(self, num_epoch=100):
self.model.train()
self.model.reset_parameters()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
optimizer = torch.optim.Adam(self.model.parameters(), lr=0.005, weight_decay=0.0001)
for epoch in range(num_epoch):
self.logger.info('epoch %s' % (epoch,))
optimizer.zero_grad()
output = self.model(self.data)[self.data.train_mask]
loss = F.nll_loss(output, self.data.y[self.data.train_mask])
loss.backward()
optimizer.step()
train_acc, test_acc = self.evaluate_model()
self.logger.info('train acc: %s, test acc: %s' % (train_acc, test_acc))
def evaluate_model(self):
self.model.eval()
# self.model, self.data = self.model.to(self.device), self.data.to(self.device)
logits, accs = self.model(self.data), []
for _, mask in self.data('train_mask', 'test_mask'):
pred = logits[mask].max(1)[1]
acc = pred.eq(self.data.y[mask]).sum().item() / mask.sum().item()
accs.append(acc)
return accs
if __name__ == '__main__':
os.chdir('../../')
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
dataset_name = 'cora'
dataset = Planetoid(config.RAW_DATA_PATH, dataset_name, transform=T.NormalizeFeatures())
data = dataset[0]
gat = GAT(dataset.num_features, dataset.num_classes, data)
gat.train_model()
# gat.evaluate_model()
| 2,273 | 31.028169 | 92 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/graphsage/graphsage.py | import os
import logging
import torch
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid
from torch_geometric.data import NeighborSampler
from lib_gnn_model.graphsage.graphsage_net import SageNet
from lib_gnn_model.gnn_base import GNNBase
import config
class SAGE(GNNBase):
def __init__(self, num_feats, num_classes, data=None):
super(SAGE, self).__init__()
self.logger = logging.getLogger('graphsage')
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# self.device = torch.device('cpu')
self.model = SageNet(num_feats, 256, num_classes).to(self.device)
self.data = data
def train_model(self, num_epochs=100):
self.model.train()
self.model.reset_parameters()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
self.data.y = self.data.y.squeeze().to(self.device)
self._gen_train_loader()
optimizer = torch.optim.Adam(self.model.parameters(), lr=0.01, weight_decay=0.001)
for epoch in range(num_epochs):
self.logger.info('epoch %s' % (epoch,))
for batch_size, n_id, adjs in self.train_loader:
# `adjs` holds a list of `(edge_index, e_id, size)` tuples.
adjs = [adj.to(self.device) for adj in adjs]
optimizer.zero_grad()
out = self.model(self.data.x[n_id], adjs)
loss = F.nll_loss(out, self.data.y[n_id[:batch_size]])
loss.backward()
optimizer.step()
train_acc, test_acc = self.evaluate_model()
self.logger.info(f'Train: {train_acc:.4f}, Test: {test_acc:.4f}')
@torch.no_grad()
def evaluate_model(self):
self.model.eval()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
self._gen_subgraph_loader()
out = self.model.inference(self.data.x, self.subgraph_loader, self.device)
y_true = self.data.y.cpu().unsqueeze(-1)
y_pred = out.argmax(dim=-1, keepdim=True)
results = []
for mask in [self.data.train_mask, self.data.test_mask]:
results += [int(y_pred[mask].eq(y_true[mask]).sum()) / int(mask.sum())]
return results
def posterior(self):
self.model.eval()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
self._gen_subgraph_loader()
posteriors = self.model.inference(self.data.x, self.subgraph_loader, self.device)
for _, mask in self.data('test_mask'):
posteriors = F.log_softmax(posteriors[mask], dim=-1)
return posteriors.detach()
def generate_embeddings(self):
self.model.eval()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
self._gen_subgraph_loader()
logits = self.model.inference(self.data.x, self.subgraph_loader, self.device)
return logits
def _gen_train_loader(self):
if self.data.edge_index.shape[1] == 0:
self.data.edge_index = torch.tensor([[1, 2], [2, 1]])
self.train_loader = NeighborSampler(self.data.edge_index, node_idx=self.data.train_mask,
# sizes=[25, 10], batch_size=128, shuffle=True,
# sizes=[25, 10], num_nodes=self.data.num_nodes,
sizes=[10, 10], num_nodes=self.data.num_nodes,
# sizes=[5, 5], num_nodes=self.data.num_nodes,
# batch_size=128, shuffle=True,
batch_size=64, shuffle=True,
num_workers=0)
def _gen_subgraph_loader(self):
self.subgraph_loader = NeighborSampler(self.data.edge_index, node_idx=None,
# sizes=[-1], num_nodes=self.data.num_nodes,
sizes=[10], num_nodes=self.data.num_nodes,
# batch_size=128, shuffle=False,
batch_size=64, shuffle=False,
num_workers=0)
if __name__ == '__main__':
os.chdir('../../')
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
dataset_name = 'cora'
dataset = Planetoid(config.RAW_DATA_PATH, dataset_name, transform=T.NormalizeFeatures())
data = dataset[0]
graphsage = SAGE(dataset.num_features, dataset.num_classes, data)
graphsage.train_model()
| 4,883 | 39.363636 | 96 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/graphsage/graphsage_net.py | import torch
import torch.nn.functional as F
from torch_geometric.nn import SAGEConv
class SageNet(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels):
super(SageNet, self).__init__()
self.num_layers = 2
self.convs = torch.nn.ModuleList()
self.convs.append(SAGEConv(in_channels, hidden_channels))
self.convs.append(SAGEConv(hidden_channels, out_channels))
def forward(self, x, adjs):
# `train_loader` computes the k-hop neighborhood of a batch of nodes,
# and returns, for each layer, a bipartite graph object, holding the
# bipartite edges `edge_index`, the index `e_id` of the original edges,
# and the size/shape `size` of the bipartite graph.
# Target nodes are also included in the source nodes so that one can
# easily apply skip-connections or add self-loops.
for i, (edge_index, _, size) in enumerate(adjs):
x_target = x[:size[1]] # Target nodes are always placed first.
x = self.convs[i]((x, x_target), edge_index)
if i != self.num_layers - 1:
x = F.relu(x)
x = F.dropout(x, p=0.5, training=self.training)
return F.log_softmax(x, dim=-1)
def inference(self, x_all, subgraph_loader, device):
# Compute representations of nodes layer by layer, using *all*
# available edges. This leads to faster computation in contrast to
# immediately computing the final representations of each batch.
for i in range(self.num_layers):
xs = []
for batch_size, n_id, adj in subgraph_loader:
edge_index, _, size = adj.to(device)
x = x_all[n_id].to(device)
x_target = x[:size[1]]
x = self.convs[i]((x, x_target), edge_index)
if i != self.num_layers - 1:
x = F.relu(x)
xs.append(x.cpu())
x_all = torch.cat(xs, dim=0)
return x_all
def reset_parameters(self):
for i in range(self.num_layers):
self.convs[i].reset_parameters()
| 2,154 | 37.482143 | 79 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gcn/gcn_net.py | import torch
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
class GCNNet(torch.nn.Module):
def __init__(self, num_feats, num_classes):
super(GCNNet, self).__init__()
self.conv1 = GCNConv(num_feats, 16, cached=True, add_self_loops=False)
self.conv2 = GCNConv(16, num_classes, cached=True, add_self_loops=False)
def forward(self, data):
x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
x = F.relu(self.conv1(x, edge_index, edge_weight))
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index, edge_weight)
return F.log_softmax(x, dim=-1)
def reset_parameters(self):
self.conv1.reset_parameters()
self.conv2.reset_parameters()
| 781 | 31.583333 | 80 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gcn/gcn.py | import os
import logging
import torch
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid
from lib_gnn_model.gnn_base import GNNBase
from lib_gnn_model.gcn.gcn_net import GCNNet
import config
class GCN(GNNBase):
def __init__(self, num_feats, num_classes, data=None):
super(GCN, self).__init__()
self.logger = logging.getLogger('gcn')
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model = GCNNet(num_feats, num_classes)
self.data = data
def train_model(self, num_epoch=100):
self.model.train()
self.model.reset_parameters()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
optimizer = torch.optim.Adam(self.model.parameters(), lr=0.01)
for epoch in range(num_epoch):
self.logger.info('epoch %s' % (epoch,))
optimizer.zero_grad()
output = self.model(self.data)[self.data.train_mask]
loss = F.nll_loss(output, self.data.y[self.data.train_mask])
loss.backward()
optimizer.step()
train_acc, test_acc = self.evaluate_model()
self.logger.info('train acc: %s, test acc: %s' % (train_acc, test_acc))
def evaluate_model(self):
self.model.eval()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
logits, accs = self.model(self.data), []
for _, mask in self.data('train_mask', 'test_mask'):
pred = logits[mask].max(1)[1]
acc = pred.eq(self.data.y[mask]).sum().item() / mask.sum().item()
accs.append(acc)
return accs
if __name__ == '__main__':
os.chdir('../../')
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
dataset_name = 'cora'
dataset = Planetoid(config.RAW_DATA_PATH, dataset_name, transform=T.NormalizeFeatures())
data = dataset[0]
gcn = GCN(dataset.num_features, dataset.num_classes, data)
gcn.train_model() | 2,221 | 31.202899 | 92 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/mlp/mlp.py | import os
import logging
import torch
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid
from lib_gnn_model.gnn_base import GNNBase
from lib_gnn_model.mlp.mlpnet import MLPNet
import config
class MLP(GNNBase):
def __init__(self, num_feats, num_classes, data=None):
super(MLP, self).__init__()
self.logger = logging.getLogger(__name__)
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model = MLPNet(num_feats, num_classes)
self.data = data
def train_model(self, num_epoch=100):
self.model.train()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
optimizer = torch.optim.Adam(self.model.parameters(), lr=0.01)
for epoch in range(num_epoch):
self.logger.info('epoch %s' % (epoch,))
optimizer.zero_grad()
output = self.model(self.data.x)[self.data.train_mask]
# loss = F.nll_loss(output, self.data.y[self.data.train_mask])
loss = torch.nn.CrossEntropyLoss(output, self.data.y[self.data.train_mask].squeeze())
loss.backward()
optimizer.step()
train_acc, test_acc = self.evaluate_model()
self.logger.info('train acc: %s, test acc: %s' % (train_acc, test_acc))
def evaluate_model(self):
self.model.eval()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
logits, accs = self.model(self.data.x), []
for _, mask in self.data('train_mask', 'test_mask'):
pred = logits[mask].max(1)[1]
acc = pred.eq(self.data.y[mask]).sum().item() / mask.sum().item()
accs.append(acc)
return accs
def posterior(self):
self.model.eval()
posteriors = self.model(self.data.x)
for _, mask in self.data('test_mask'):
posteriors = posteriors[mask]
return posteriors
if __name__ == '__main__':
os.chdir('../../')
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
dataset_name = 'Cora'
dataset = Planetoid(config.RAW_DATA_PATH + dataset_name, dataset_name, transform=T.NormalizeFeatures())
data = dataset[0]
gcn = MLP(dataset.num_features, dataset.num_classes, data)
gcn.train_model() | 2,518 | 31.294872 | 107 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/mlp/mlpnet.py | from torch import nn
import torch.nn.functional as F
class MLPNet(nn.Module):
def __init__(self, input_size, num_classes):
super(MLPNet, self).__init__()
self.xent = nn.CrossEntropyLoss()
self.layers = nn.Sequential(
nn.Linear(input_size, 250),
nn.Linear(250, 100),
nn.Linear(100, num_classes)
)
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.layers(x)
return F.softmax(x, dim=1)
def loss(self, nodes, labels):
scores = self.forward(nodes)
return self.xent(scores, labels.squeeze())
def reset_parameters(self):
return 0
| 668 | 23.777778 | 50 | py |
Graph-Unlearning | Graph-Unlearning-main/exp/exp_graph_partition.py | import logging
import time
import torch
from sklearn.model_selection import train_test_split
import numpy as np
from torch_geometric.data import Data
import torch_geometric as tg
import networkx as nx
from exp.exp import Exp
from lib_utils.utils import connected_component_subgraphs
from lib_graph_partition.graph_partition import GraphPartition
from lib_utils import utils
class ExpGraphPartition(Exp):
def __init__(self, args):
super(ExpGraphPartition, self).__init__(args)
self.logger = logging.getLogger('exp_graph_partition')
self.load_data()
self.train_test_split()
self.gen_train_graph()
self.graph_partition()
self.generate_shard_data()
def load_data(self):
self.data = self.data_store.load_raw_data()
def train_test_split(self):
if self.args['is_split']:
self.logger.info('splitting train/test data')
self.train_indices, self.test_indices = train_test_split(np.arange((self.data.num_nodes)), test_size=self.args['test_ratio'], random_state=100)
self.data_store.save_train_test_split(self.train_indices, self.test_indices)
self.data.train_mask = torch.from_numpy(np.isin(np.arange(self.data.num_nodes), self.train_indices))
self.data.test_mask = torch.from_numpy(np.isin(np.arange(self.data.num_nodes), self.test_indices))
else:
self.train_indices, self.test_indices = self.data_store.load_train_test_split()
self.data.train_mask = torch.from_numpy(np.isin(np.arange(self.data.num_nodes), self.train_indices))
self.data.test_mask = torch.from_numpy(np.isin(np.arange(self.data.num_nodes), self.test_indices))
def gen_train_graph(self):
# delete ratio of edges and update the train graph
if self.args['ratio_deleted_edges'] != 0:
self.logger.debug("Before edge deletion. train data #.Nodes: %f, #.Edges: %f" % (
self.data.num_nodes, self.data.num_edges))
# self._ratio_delete_edges()
self.data.edge_index = self._ratio_delete_edges(self.data.edge_index)
# decouple train test edges.
edge_index = self.data.edge_index.numpy()
test_edge_indices = np.logical_or(np.isin(edge_index[0], self.test_indices),
np.isin(edge_index[1], self.test_indices))
train_edge_indices = np.logical_not(test_edge_indices)
edge_index_train = edge_index[:, train_edge_indices]
self.train_graph = nx.Graph()
self.train_graph.add_nodes_from(self.train_indices)
# use largest connected graph as train graph
if self.args['is_prune']:
self._prune_train_set()
# reconstruct a networkx train graph
for u, v in np.transpose(edge_index_train):
self.train_graph.add_edge(u, v)
self.logger.debug("After edge deletion. train graph #.Nodes: %f, #.Edges: %f" % (
self.train_graph.number_of_nodes(), self.train_graph.number_of_edges()))
self.logger.debug("After edge deletion. train data #.Nodes: %f, #.Edges: %f" % (
self.data.num_nodes, self.data.num_edges))
self.data_store.save_train_data(self.data)
self.data_store.save_train_graph(self.train_graph)
def graph_partition(self):
if self.args['is_partition']:
self.logger.info('graph partitioning')
start_time = time.time()
partition = GraphPartition(self.args, self.train_graph, self.data)
self.community_to_node = partition.graph_partition()
partition_time = time.time() - start_time
self.logger.info("Partition cost %s seconds." % partition_time)
self.data_store.save_community_data(self.community_to_node)
else:
self.community_to_node = self.data_store.load_community_data()
def generate_shard_data(self):
self.logger.info('generating shard data')
self.shard_data = {}
for shard in range(self.args['num_shards']):
train_shard_indices = list(self.community_to_node[shard])
shard_indices = np.union1d(train_shard_indices, self.test_indices)
x = self.data.x[shard_indices]
y = self.data.y[shard_indices]
edge_index = utils.filter_edge_index_1(self.data, shard_indices)
data = Data(x=x, edge_index=torch.from_numpy(edge_index), y=y)
data.train_mask = torch.from_numpy(np.isin(shard_indices, train_shard_indices))
data.test_mask = torch.from_numpy(np.isin(shard_indices, self.test_indices))
self.shard_data[shard] = data
self.data_store.save_shard_data(self.shard_data)
def _prune_train_set(self):
# extract the the maximum connected component
self.logger.debug("Before Prune... #. of Nodes: %f, #. of Edges: %f" % (
self.train_graph.number_of_nodes(), self.train_graph.number_of_edges()))
self.train_graph = max(connected_component_subgraphs(self.train_graph), key=len)
self.logger.debug("After Prune... #. of Nodes: %f, #. of Edges: %f" % (
self.train_graph.number_of_nodes(), self.train_graph.number_of_edges()))
# self.train_indices = np.array(self.train_graph.nodes)
def _ratio_delete_edges(self, edge_index):
edge_index = edge_index.numpy()
unique_indices = np.where(edge_index[0] < edge_index[1])[0]
unique_indices_not = np.where(edge_index[0] > edge_index[1])[0]
remain_indices = np.random.choice(unique_indices,
int(unique_indices.shape[0] * (1.0 - self.args['ratio_deleted_edges'])),
replace=False)
remain_encode = edge_index[0, remain_indices] * edge_index.shape[1] * 2 + edge_index[1, remain_indices]
unique_encode_not = edge_index[1, unique_indices_not] * edge_index.shape[1] * 2 + edge_index[0, unique_indices_not]
sort_indices = np.argsort(unique_encode_not)
remain_indices_not = unique_indices_not[sort_indices[np.searchsorted(unique_encode_not, remain_encode, sorter=sort_indices)]]
remain_indices = np.union1d(remain_indices, remain_indices_not)
# self.data.edge_index = torch.from_numpy(edge_index[:, remain_indices])
return torch.from_numpy(edge_index[:, remain_indices])
| 6,423 | 44.560284 | 155 | py |
Graph-Unlearning | Graph-Unlearning-main/exp/exp_attack_unlearning.py | import logging
import time
from collections import defaultdict
import numpy as np
import torch
import torch_geometric as tg
from torch_geometric.data import Data
from scipy.spatial import distance
import config
from exp.exp import Exp
from lib_graph_partition.graph_partition import GraphPartition
from lib_gnn_model.node_classifier import NodeClassifier
from lib_aggregator.aggregator import Aggregator
from lib_utils import utils
class ExpAttackUnlearning(Exp):
def __init__(self, args):
super(ExpAttackUnlearning, self).__init__(args)
self.logger = logging.getLogger('exp_attack_unlearning')
# 1. respond to the unlearning requests
self.load_preprocessed_data()
# self.graph_unlearning_request_respond()
if self.args['repartition']:
with open(config.MODEL_PATH + self.args['dataset_name'] + '/' + self.args['target_model']+"_unlearned_indices") as file:
node_unlearning_indices = [line.rstrip() for line in file]
for unlearned_node in node_unlearning_indices:
self.graph_unlearning_request_respond(int(unlearned_node))
else:
self.graph_unlearning_request_respond()
# 2. evalute the attack performance
self.attack_graph_unlearning()
def load_preprocessed_data(self):
self.shard_data = self.data_store.load_shard_data()
self.raw_data = self.data_store.load_raw_data()
self.train_data = self.data_store.load_train_data()
self.train_graph = self.data_store.load_train_graph()
self.train_indices, self.test_indices = self.data_store.load_train_test_split()
self.community_to_node = self.data_store.load_community_data()
num_feats = self.train_data.num_features
num_classes = len(self.train_data.y.unique())
self.target_model = NodeClassifier(num_feats, num_classes, self.args)
def graph_unlearning_request_respond(self, node_unlearning_request=None):
# reindex the node ids
node_to_com = self.data_store.c2n_to_n2c(self.community_to_node)
train_indices_prune = list(node_to_com.keys())
if node_unlearning_request==None:
# generate node unlearning requests
node_unlearning_indices = np.random.choice(train_indices_prune, self.args['num_unlearned_nodes'])
else:
node_unlearning_indices = np.array([node_unlearning_request])
self.num_unlearned_edges =0
unlearning_indices = defaultdict(list)
for node in node_unlearning_indices:
unlearning_indices[node_to_com[node]].append(node)
# delete a list of revoked nodes from train_graph
self.train_graph.remove_nodes_from(node_unlearning_indices)
# delete the revoked nodes from train_data
# by building unlearned data from unlearned train_graph
self.train_data.train_mask = torch.from_numpy(np.isin(np.arange(self.train_data.num_nodes), self.train_indices))
self.train_data.test_mask = torch.from_numpy(np.isin(np.arange(self.train_data.num_nodes), np.append(self.test_indices, node_unlearning_indices)))
# delete the revoked nodes from shard_data
self.shard_data_after_unlearning = {}
self.affected_shard=[]
for shard in range(self.args["num_shards"]):
train_shard_indices = list(self.community_to_node[shard])
# node unlearning
train_shard_indices = np.setdiff1d(train_shard_indices, unlearning_indices[shard])
shard_indices = np.union1d(train_shard_indices, self.test_indices)
x = self.train_data.x[shard_indices]
y = self.train_data.y[shard_indices]
edge_index = utils.filter_edge_index_1(self.train_data, shard_indices)
data = Data(x=x, edge_index=torch.from_numpy(edge_index), y=y)
data.train_mask = torch.from_numpy(np.isin(shard_indices, train_shard_indices))
data.test_mask = torch.from_numpy(np.isin(shard_indices, self.test_indices))
self.shard_data_after_unlearning[shard] = data
self.num_unlearned_edges += self.shard_data[shard].num_edges - self.shard_data_after_unlearning[shard].num_edges
# find the affected shard model
if self.shard_data_after_unlearning[shard].num_nodes != self.shard_data[shard].num_nodes:
self.affected_shard.append(shard)
self.data_store.save_unlearned_data(self.train_graph, 'train_graph')
self.data_store.save_unlearned_data(self.train_data, 'train_data')
self.data_store.save_unlearned_data(self.shard_data_after_unlearning, 'shard_data')
# retrain the correponding shard model
if not self.args['repartition']:
for shard in self.affected_shard:
suffix = "unlearned_"+str(node_unlearning_indices[0])
self._train_shard_model(shard, suffix)
# (if re-partition, re-partition the remaining graph)
# re-train the shard model, save model and optimal weight score
if self.args['repartition']:
suffix="_repartition_unlearned_" + str(node_unlearning_indices[0])
self._repartition(suffix)
for shard in range(self.args["num_shards"]):
self._train_shard_model(shard, suffix)
def _repartition(self, suffix):
# load unlearned train_graph and train_data
train_graph = self.data_store.load_unlearned_data('train_graph')
train_data = self.data_store.load_unlearned_data('train_data')
# repartition
start_time = time.time()
partition = GraphPartition(self.args, train_graph, train_data)
community_to_node = partition.graph_partition()
partition_time = time.time() - start_time
self.logger.info("Partition cost %s seconds." % partition_time)
# save the new partition and shard
self.data_store.save_community_data(community_to_node, suffix)
self._generate_unlearned_repartitioned_shard_data(train_data, community_to_node, self.test_indices)
def _generate_unlearned_repartitioned_shard_data(self, train_data, community_to_node, test_indices):
self.logger.info('generating shard data')
shard_data = {}
for shard in range(self.args['num_shards']):
train_shard_indices = list(community_to_node[shard])
shard_indices = np.union1d(train_shard_indices, test_indices)
x = self.train_data.x[shard_indices]
y = self.train_data.y[shard_indices]
edge_index = utils.filter_edge_index_1(train_data, shard_indices)
data = Data(x=x, edge_index=torch.from_numpy(edge_index), y=y)
data.train_mask = torch.from_numpy(np.isin(shard_indices, train_shard_indices))
data.test_mask = torch.from_numpy(np.isin(shard_indices, test_indices))
shard_data[shard] = data
# self.data_store.save_unlearned_data(shard_data, 'shard_data_repartition')
return shard_data
def _train_shard_model(self, shard, suffix="unlearned"):
self.logger.info('training target models, shard %s' % shard)
# load shard data
self.target_model.data = self.shard_data_after_unlearning[shard]
# retrain shard model
self.target_model.train_model()
# replace shard model
device=torch.device("cpu")
self.target_model.device = device
self.data_store.save_target_model(0, self.target_model, shard, suffix)
# self.data_store.save_unlearned_target_model(0, self.target_model, shard, suffix)
def attack_graph_unlearning(self):
# load unlearned indices
with open(config.MODEL_PATH + self.args['dataset_name'] + "/" + self.args['target_model'] +"_unlearned_indices") as file:
unlearned_indices = [line.rstrip() for line in file]
# member sample query, label as 1
positive_posteriors = self._query_target_model(unlearned_indices, unlearned_indices)
# non-member sample query, label as 0
negative_posteriors = self._query_target_model(unlearned_indices, self.test_indices)
# evaluate attack performance, train multiple shadow models, or calculate posterior entropy, or directly calculate AUC.
self.evaluate_attack_performance(positive_posteriors, negative_posteriors)
def _query_target_model(self, unlearned_indices, test_indices):
# load unlearned data
train_data = self.data_store.load_unlearned_data('train_data')
# load optimal weight score
# optimal_weight=self.data_store.load_optimal_weight(0)
# calculate the final posterior, save as attack feature
self.logger.info('aggregating submodels')
posteriors_a, posteriors_b, posteriors_c =[],[],[]
for i in unlearned_indices:
community_to_node = self.data_store.load_community_data('')
shard_data = self._generate_unlearned_repartitioned_shard_data(train_data, community_to_node, int(i))
posteriors_a.append(self._generate_posteriors(shard_data, ''))
suffix="unlearned_" + str(i)
posteriors_b.append(self._generate_posteriors_unlearned(shard_data, suffix, i))
if self.args['repartition']:
suffix = "_repartition_unlearned_" + str(i)
community_to_node = self.data_store.load_community_data(suffix)
shard_data = self._generate_unlearned_repartitioned_shard_data(train_data, community_to_node, int(i))
suffix = "__repartition_unlearned_" + str(i)
posteriors_c.append(self._generate_posteriors(shard_data, suffix))
return posteriors_a, posteriors_b, posteriors_c
def _generate_posteriors_unlearned(self, shard_data, suffix, unlearned_indice):
import glob
model_path=glob.glob(config.MODEL_PATH+self.args['dataset_name']+"/*_1unlearned_"+str(unlearned_indice))
if not model_path:
self.logger.info("No corresponding unlearned shard model for node %s" % str(unlearned_indice))
return torch.tensor([0]*6)
else:
affected_shard = int(model_path[0].split('/')[-1].split('_')[-4])
posteriors = []
for shard in range(self.args['num_shards']):
if shard == affected_shard:
# load the retrained the shard model
self.data_store.load_target_model(0, self.target_model, shard, suffix)
else:
# self.target_model.model.reset_parameters()
# load unaffected shard model
self.data_store.load_target_model(0, self.target_model, shard, '')
self.device = torch.device('cuda:3' if torch.cuda.is_available() else 'cpu')
self.target_model.model = self.target_model.model.to(self.device)
self.target_model.data = shard_data[shard].to(self.device)
posteriors.append(self.target_model.posterior())
return torch.mean(torch.cat(posteriors, dim=0), dim=0)
def _generate_posteriors(self, shard_data, suffix):
posteriors = []
for shard in range(self.args['num_shards']):
# self.target_model.model.reset_parameters()
self.data_store.load_target_model(0, self.target_model, shard, suffix)
self.device = torch.device('cuda:3' if torch.cuda.is_available() else 'cpu')
self.target_model.model = self.target_model.model.to(self.device)
self.target_model.data = shard_data[shard].to(self.device)
posteriors.append(self.target_model.posterior())
return torch.mean(torch.cat(posteriors, dim=0), dim=0)
def evaluate_attack_performance(self, positive_posteriors, negative_posteriors):
# constrcut attack data
label = torch.cat((torch.ones(len(positive_posteriors[0])), torch.zeros(len(negative_posteriors[0]))))
data={}
for i in range(2):
data[i] = torch.cat((torch.stack(positive_posteriors[i]), torch.stack(negative_posteriors[i])),0)
# calculate l2 distance
model_b_distance = self._calculate_distance(data[0], data[1])
# directly calculate AUC with feature and labels
attack_auc_b = self.evaluate_attack_with_AUC(model_b_distance, label)
if self.args['repartition']:
model_c_distance = self._calculate_distance(data[0], data[2])
attack_auc_c = self.evaluate_attack_with_AUC(model_c_distance, label)
self.logger.info("Attack_Model_B AUC: %s | Attack_Model_C AUC: %s" % (attack_auc_b, attack_auc_c))
def evaluate_attack_with_AUC(self, data, label):
from sklearn.metrics import roc_auc_score
self.logger.info("Directly calculate the attack AUC")
return roc_auc_score(label, data.reshape(-1, 1))
def _calculate_distance(self, data0, data1, distance='l2_norm' ):
if distance == 'l2_norm':
return np.array([np.linalg.norm(data0[i]-data1[i]) for i in range(len(data0))])
elif distance =='direct_diff':
return data0 - data1
else:
raise Exception("Unsupported distance")
| 13,321 | 48.895131 | 154 | py |
Graph-Unlearning | Graph-Unlearning-main/exp/exp_node_edge_unlearning.py | import logging
import pickle
import time
from collections import defaultdict
import numpy as np
import torch
from torch_geometric.data import Data
import config
from exp.exp import Exp
from lib_gnn_model.graphsage.graphsage import SAGE
from lib_gnn_model.gat.gat import GAT
from lib_gnn_model.gin.gin import GIN
from lib_gnn_model.gcn.gcn import GCN
from lib_gnn_model.mlp.mlp import MLP
from lib_gnn_model.node_classifier import NodeClassifier
from lib_aggregator.aggregator import Aggregator
from lib_utils import utils
class ExpNodeEdgeUnlearning(Exp):
def __init__(self, args):
super(ExpNodeEdgeUnlearning, self).__init__(args)
self.logger = logging.getLogger('exp_node_edge_unlearning')
self.target_model_name = self.args['target_model']
self.load_data()
self.determine_target_model()
self.run_exp()
def run_exp(self):
# unlearning efficiency
run_f1 = np.empty((0))
unlearning_time = np.empty((0))
for run in range(self.args['num_runs']):
self.logger.info("Run %f" % run)
self.train_target_models(run)
aggregate_f1_score = self.aggregate(run)
# node_unlearning_time = self.unlearning_time_statistic()
node_unlearning_time = 0
run_f1 = np.append(run_f1, aggregate_f1_score)
unlearning_time = np.append(unlearning_time, node_unlearning_time)
self.num_unlearned_edges = 0
# model utility
self.f1_score_avg = np.average(run_f1)
self.f1_score_std = np.std(run_f1)
self.unlearning_time_avg = np.average(unlearning_time)
self.unlearning_time_std = np.std(unlearning_time)
self.logger.info(
"%s %s %s %s" % (self.f1_score_avg, self.f1_score_std, self.unlearning_time_avg, self.unlearning_time_std))
def load_data(self):
self.shard_data = self.data_store.load_shard_data()
self.raw_data = self.data_store.load_raw_data()
self.train_data = self.data_store.load_train_data()
self.unlearned_shard_data = self.shard_data
def determine_target_model(self):
num_feats = self.train_data.num_features
num_classes = len(self.train_data.y.unique())
if not self.args['is_use_batch']:
if self.target_model_name == 'SAGE':
self.target_model = SAGE(num_feats, num_classes)
elif self.target_model_name == 'GCN':
self.target_model = GCN(num_feats, num_classes)
elif self.target_model_name == 'GAT':
self.target_model = GAT(num_feats, num_classes)
elif self.target_model_name == 'GIN':
self.target_model = GIN(num_feats, num_classes)
else:
raise Exception('unsupported target model')
else:
if self.target_model_name == 'MLP':
self.target_model = MLP(num_feats, num_classes)
else:
self.target_model = NodeClassifier(num_feats, num_classes, self.args)
def train_target_models(self, run):
if self.args['is_train_target_model']:
self.logger.info('training target models')
self.time = {}
for shard in range(self.args['num_shards']):
self.time[shard] = self._train_model(run, shard)
def aggregate(self, run):
self.logger.info('aggregating submodels')
# posteriors, true_label = self.generate_posterior()
aggregator = Aggregator(run, self.target_model, self.train_data, self.unlearned_shard_data, self.args)
aggregator.generate_posterior()
self.aggregate_f1_score = aggregator.aggregate()
self.logger.info("Final Test F1: %s" % (self.aggregate_f1_score,))
return self.aggregate_f1_score
def _generate_unlearning_request(self, num_unlearned="assign"):
node_list = []
for key, value in self.community_to_node.items():
# node_list.extend(value.tolist())
node_list.extend(value)
if num_unlearned == "assign":
num_of_unlearned_nodes = self.args['num_unlearned_nodes']
elif num_unlearned == "ratio":
num_of_unlearned_nodes = int(self.args['ratio_unlearned_nodes'] * len(node_list))
if self.args['unlearning_request'] == 'random':
unlearned_nodes_indices = np.random.choice(node_list, num_of_unlearned_nodes, replace=False)
elif self.args['unlearning_request'] == 'top1':
sorted_shards = sorted(self.community_to_node.items(), key=lambda x: len(x[1]), reverse=True)
unlearned_nodes_indices = np.random.choice(sorted_shards[0][1], num_of_unlearned_nodes, replace=False)
elif self.args['unlearning_request'] == 'adaptive':
sorted_shards = sorted(self.community_to_node.items(), key=lambda x: len(x[1]), reverse=True)
candidate_list = np.concatenate([sorted_shards[i][1] for i in range(int(self.args['num_shards']/2)+1)], axis=0)
unlearned_nodes_indices = np.random.choice(candidate_list, num_of_unlearned_nodes, replace=False)
elif self.args['unlearning_request'] == 'last5':
sorted_shards = sorted(self.community_to_node.items(), key=lambda x: len(x[1]), reverse=False)
candidate_list = np.concatenate([sorted_shards[i][1] for i in range(int(self.args['num_shards']/2)+1)], axis=0)
unlearned_nodes_indices = np.random.choice(candidate_list, num_of_unlearned_nodes, replace=False)
return unlearned_nodes_indices
def unlearning_time_statistic(self):
if self.args['is_train_target_model'] and self.args['num_shards'] != 1:
# random sample 5% nodes, find their belonging communities
unlearned_nodes = self._generate_unlearning_request(num_unlearned="ratio")
belong_community = []
for sample_node in range(len(unlearned_nodes)):
for community, node in self.community_to_node.items():
if np.in1d(unlearned_nodes[sample_node], node).any():
belong_community.append(community)
# calculate the total unlearning time and group unlearning time
group_unlearning_time = []
node_unlearning_time = []
for shard in range(self.args['num_shards']):
if belong_community.count(shard) != 0:
group_unlearning_time.append(self.time[shard])
node_unlearning_time.extend([float(self.time[shard]) for j in range(belong_community.count(shard))])
return node_unlearning_time
elif self.args['is_train_target_model'] and self.args['num_shards'] == 1:
return self.time[0]
else:
return 0
def _train_model(self, run, shard):
self.logger.info('training target models, run %s, shard %s' % (run, shard))
start_time = time.time()
self.target_model.data = self.unlearned_shard_data[shard]
self.target_model.train_model()
train_time = time.time() - start_time
self.data_store.save_target_model(run, self.target_model, shard)
return train_time
| 7,194 | 42.606061 | 123 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_dataset/data_store.py | import os
import pickle
import logging
import shutil
import numpy as np
import torch
from torch_geometric.datasets import Planetoid, Coauthor
import torch_geometric.transforms as T
import config
class DataStore:
def __init__(self, args):
self.logger = logging.getLogger('data_store')
self.args = args
self.dataset_name = self.args['dataset_name']
self.num_features = {
"cora": 1433,
"pubmed": 500,
"citeseer": 3703,
"Coauthor_CS": 6805,
"Coauthor_Phys": 8415
}
self.partition_method = self.args['partition_method']
self.num_shards = self.args['num_shards']
self.target_model = self.args['target_model']
self.determine_data_path()
def determine_data_path(self):
embedding_name = '_'.join(('embedding', self._extract_embedding_method(self.partition_method),
str(self.args['ratio_deleted_edges'])))
community_name = '_'.join(('community', self.partition_method, str(self.num_shards),
str(self.args['ratio_deleted_edges'])))
shard_name = '_'.join(('shard_data', self.partition_method, str(self.num_shards),
str(self.args['shard_size_delta']), str(self.args['ratio_deleted_edges'])))
target_model_name = '_'.join((self.target_model, self.partition_method, str(self.num_shards),
str(self.args['shard_size_delta']), str(self.args['ratio_deleted_edges'])))
optimal_weight_name = '_'.join((self.target_model, self.partition_method, str(self.num_shards),
str(self.args['shard_size_delta']), str(self.args['ratio_deleted_edges'])))
processed_data_prefix = config.PROCESSED_DATA_PATH + self.dataset_name + "/"
self.train_test_split_file = processed_data_prefix + "train_test_split" + str(self.args['test_ratio'])
self.train_data_file = processed_data_prefix + "train_data"
self.train_graph_file = processed_data_prefix + "train_graph"
self.embedding_file = processed_data_prefix + embedding_name
self.community_file = processed_data_prefix + community_name
self.shard_file = processed_data_prefix + shard_name
self.unlearned_file = processed_data_prefix+ '_'.join(('unlearned', str(self.args['num_unlearned_nodes'])))
self.target_model_file = config.MODEL_PATH + self.dataset_name + '/' + target_model_name
self.optimal_weight_file = config.ANALYSIS_PATH + 'optimal/' + self.dataset_name + '/' + optimal_weight_name
self.posteriors_file = config.ANALYSIS_PATH + 'posteriors/' + self.dataset_name + '/' + target_model_name
dir_lists = [s + self.dataset_name for s in [config.PROCESSED_DATA_PATH,
config.MODEL_PATH,
config.ANALYSIS_PATH + 'optimal/',
config.ANALYSIS_PATH + 'posteriors/']]
for dir in dir_lists:
self._check_and_create_dirs(dir)
def _check_and_create_dirs(self, folder):
if not os.path.exists(folder):
try:
self.logger.info("checking directory %s", folder)
os.makedirs(folder, exist_ok=True)
self.logger.info("new directory %s created", folder)
except OSError as error:
self.logger.info("deleting old and creating new empty %s", folder)
shutil.rmtree(folder)
os.mkdir(folder)
self.logger.info("new empty directory %s created", folder)
else:
self.logger.info("folder %s exists, do not need to create again.", folder)
def load_raw_data(self):
self.logger.info('loading raw data')
if not self.args['is_use_node_feature']:
self.transform = T.Compose([
T.OneHotDegree(-2, cat=False) # use only node degree as node feature.
])
else:
self.transform = None
if self.dataset_name in ["cora", "pubmed", "citeseer"]:
dataset = Planetoid(config.RAW_DATA_PATH, self.dataset_name, transform=T.NormalizeFeatures())
labels = np.unique(dataset.data.y.numpy())
elif self.dataset_name in ["Coauthor_CS", "Coauthor_Phys"]:
if self.dataset_name == "Coauthor_Phys":
dataset = Coauthor(config.RAW_DATA_PATH, name="Physics", pre_transform=self.transform)
else:
dataset = Coauthor(config.RAW_DATA_PATH, name="CS", pre_transform=self.transform)
else:
raise Exception('unsupported dataset')
data = dataset[0]
return data
def save_train_data(self, train_data):
self.logger.info('saving train data')
pickle.dump(train_data, open(self.train_data_file, 'wb'))
def load_train_data(self):
self.logger.info('loading train data')
return pickle.load(open(self.train_data_file, 'rb'))
def save_train_graph(self, train_data):
self.logger.info('saving train graph')
pickle.dump(train_data, open(self.train_graph_file, 'wb'))
def load_train_graph(self):
self.logger.info('loading train graph')
return pickle.load(open(self.train_graph_file, 'rb'))
def save_train_test_split(self, train_indices, test_indices):
self.logger.info('saving train test split data')
pickle.dump((train_indices, test_indices), open(self.train_test_split_file, 'wb'))
def load_train_test_split(self):
self.logger.info('loading train test split data')
return pickle.load(open(self.train_test_split_file, 'rb'))
def save_embeddings(self, embeddings):
self.logger.info('saving embedding data')
pickle.dump(embeddings, open(self.embedding_file, 'wb'))
def load_embeddings(self):
self.logger.info('loading embedding data')
return pickle.load(open(self.embedding_file, 'rb'))
def save_community_data(self, community_to_node, suffix=''):
self.logger.info('saving community data')
pickle.dump(community_to_node, open(self.community_file + suffix, 'wb'))
def load_community_data(self, suffix=''):
self.logger.info('loading community data from: %s'%(self.community_file + suffix))
return pickle.load(open(self.community_file + suffix, 'rb'))
def c2n_to_n2c(self, community_to_node):
node_list = []
for i in range(self.num_shards):
node_list.extend(list(community_to_node.values())[i])
node_to_community = {}
for comm, nodes in dict(community_to_node).items():
for node in nodes:
# Map node id back to original graph
# node_to_community[node_list[node]] = comm
node_to_community[node] = comm
return node_to_community
def save_shard_data(self, shard_data):
self.logger.info('saving shard data')
pickle.dump(shard_data, open(self.shard_file, 'wb'))
def load_shard_data(self):
self.logger.info('loading shard data')
return pickle.load(open(self.shard_file, 'rb'))
def load_unlearned_data(self, suffix):
file_path = '_'.join((self.unlearned_file, suffix))
self.logger.info('loading unlearned data from %s' % file_path)
return pickle.load(open(file_path, 'rb'))
def save_unlearned_data(self, data, suffix):
self.logger.info('saving unlearned data %s' % suffix)
pickle.dump(data, open('_'.join((self.unlearned_file, suffix)), 'wb'))
def save_target_model(self, run, model, shard, suffix=''):
if self.args["exp"] in ["node_edge_unlearning", "attack_unlearning"]:
model_path = '_'.join((self.target_model_file, str(shard), str(run), str(self.args['num_unlearned_nodes']))) + suffix
model.save_model(model_path)
else:
model.save_model(self.target_model_file + '_' + str(shard) + '_' + str(run))
# model.save_model(self.target_model_file + '_' + str(shard))
def load_target_model(self, run, model, shard, suffix=''):
if self.args["exp"] == "node_edge_unlearning":
model.load_model(
'_'.join((self.target_model_file, str(shard), str(run), str(self.args['num_unlearned_nodes']))))
elif self.args["exp"] == "attack_unlearning":
model_path = '_'.join((self.target_model_file, str(shard), str(run), str(self.args['num_unlearned_nodes']))) + suffix
print("loading target model from:" + model_path)
device = torch.device('cpu')
model.load_model(model_path)
model.device=device
else:
# model.load_model(self.target_model_file + '_' + str(shard) + '_' + str(run))
model.load_model(self.target_model_file + '_' + str(shard) + '_' + str(0))
def save_optimal_weight(self, weight, run):
torch.save(weight, self.optimal_weight_file + '_' + str(run))
def load_optimal_weight(self, run):
return torch.load(self.optimal_weight_file + '_' + str(run))
def save_posteriors(self, posteriors, run, suffix=''):
torch.save(posteriors, self.posteriors_file + '_' + str(run) + suffix)
def load_posteriors(self, run):
return torch.load(self.posteriors_file + '_' + str(run))
def _extract_embedding_method(self, partition_method):
return partition_method.split('_')[0]
| 9,583 | 44.421801 | 129 | py |
ZINBAE | ZINBAE-master/ZINBAE.py | """
Implementation of ZINBAE model
"""
from time import time
import numpy as np
from keras.models import Model
import keras.backend as K
from keras.engine.topology import Layer, InputSpec
from keras.layers import Dense, Input, GaussianNoise, Layer, Activation, Lambda, Multiply, BatchNormalization, Reshape, Concatenate, RepeatVector, Permute
from keras.models import Model
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils.vis_utils import plot_model
from keras.callbacks import EarlyStopping
from sklearn.cluster import KMeans
from sklearn import metrics
import h5py
import scanpy.api as sc
from layers import ConstantDispersionLayer, SliceLayer, ColWiseMultLayer
from loss import poisson_loss, NB, ZINB, mse_loss_v2
from preprocess import read_dataset, normalize
import tensorflow as tf
from numpy.random import seed
seed(2211)
from tensorflow import set_random_seed
set_random_seed(2211)
MeanAct = lambda x: tf.clip_by_value(K.exp(x), 1e-5, 1e6)
DispAct = lambda x: tf.clip_by_value(tf.nn.softplus(x), 1e-4, 1e4)
def mean_MSE(x_impute, x_real):
return np.mean(np.square(np.log(x_impute+1)-np.log(x_real+1)))
def imputate_error(x_impute, x_real, x_raw):
x_impute_log = np.log(x_impute[(x_raw-x_real)<0]+1)
x_real_log = np.log(x_real[(x_raw-x_real)<0]+1)
return np.sum(np.abs(x_impute_log-x_real_log))/np.sum(x_real_log>0)
def autoencoder(dims, noise_sd=0, init='glorot_uniform', act='relu'):
"""
Fully connected auto-encoder model, symmetric.
Arguments:
dims: list of number of units in each layer of encoder. dims[0] is input dim, dims[-1] is units in hidden layer.
The decoder is symmetric with encoder. So number of layers of the auto-encoder is 2*len(dims)-1
act: activation, not applied to Input, Hidden and Output layers
return:
Model of autoencoder
"""
n_stacks = len(dims) - 1
# input
sf_layer = Input(shape=(1,), name='size_factors')
x = Input(shape=(dims[0],), name='counts')
h = x
h = GaussianNoise(noise_sd, name='input_noise')(h)
# internal layers in encoder
for i in range(n_stacks-1):
h = Dense(dims[i + 1], kernel_initializer=init, name='encoder_%d' % i)(h)
h = BatchNormalization(center=True, scale=False, name='encoder_batchnorm_%d' % i)(h)
h = Activation(act, name='encoder_act_%d' % i)(h)
# hidden layer
h = Dense(dims[-1], kernel_initializer=init, name='encoder_hidden')(h) # hidden layer, features are extracted from here
h = BatchNormalization(center=True, scale=False, name='encoder_hidden_batchnorm_%d' % i)(h)
h = Activation(act, name='encoder_hidden_act')(h)
# internal layers in decoder
for i in range(n_stacks-1, 0, -1):
h = Dense(dims[i], kernel_initializer=init, name='decoder_%d' % i)(h)
h = BatchNormalization(center=True, scale=False, name='decoder_batchnorm_%d' % i)(h)
h = Activation(act, name='decoder_act_%d' % i)(h)
# output
pi = Dense(dims[0], activation='sigmoid', kernel_initializer=init, name='pi')(h)
disp = Dense(dims[0], activation=DispAct, kernel_initializer=init, name='dispersion')(h)
mean = Dense(dims[0], activation=MeanAct, kernel_initializer=init, name='mean')(h)
output = ColWiseMultLayer(name='output')([mean, sf_layer])
output = SliceLayer(0, name='slice')([output, disp, pi])
return Model(inputs=[x, sf_layer], outputs=output)
### Gumbel-softmax layer ###
def sampling_gumbel(shape, eps=1e-8):
u = tf.random_uniform(shape, minval=0., maxval=1)
return -tf.log(-tf.log(u+eps)+eps)
def compute_softmax(logits,temp):
z = logits + sampling_gumbel( K.shape(logits) )
return K.softmax( z / temp )
def gumbel_softmax(args):
logits,temp = args
y = compute_softmax(logits,temp)
return y
class ZINB_AE(object):
def __init__(self,
dims,
noise_sd=0,
ridge=0,
debug=False,
eps = 1e-20):
self.dims = dims
self.input_dim = dims[0]
self.n_stacks = len(self.dims) - 1
self.noise_sd = noise_sd
self.act = 'relu'
self.ridge = ridge
self.debug = debug
self.eps = eps
self.autoencoder = autoencoder(self.dims, noise_sd=self.noise_sd, act = self.act)
pi = self.autoencoder.get_layer(name='pi').output
disp = self.autoencoder.get_layer(name='dispersion').output
zinb = ZINB(pi, theta=disp, ridge_lambda=self.ridge, debug=self.debug)
self.zinb_loss = zinb.loss
# zero-inflated outputs
tau_input = Input(shape=(self.dims[0],), name='tau_input')
pi_ = self.autoencoder.get_layer('pi').output
mean_ = self.autoencoder.output
pi_log_ = Lambda(lambda x:tf.log(x+self.eps))(pi_)
nondrop_pi_log_ = Lambda(lambda x:tf.log(1-x+self.eps))(pi_)
pi_log_ = Reshape( target_shape=(self.dims[0],1) )(pi_log_)
nondrop_pi_log_ = Reshape( target_shape=(self.dims[0],1) )(nondrop_pi_log_)
logits = Concatenate(axis=-1)([pi_log_,nondrop_pi_log_])
temp_ = RepeatVector( 2 )(tau_input)
temp_ = Permute( (2,1) )(temp_)
samples_ = Lambda( gumbel_softmax,output_shape=(self.dims[0],2,) )( [logits,temp_] )
samples_ = Lambda( lambda x:x[:,:,1] )(samples_)
samples_ = Reshape( target_shape=(self.dims[0],) )(samples_)
output_ = Multiply(name='ZI_output')([mean_, samples_])
self.model = Model(inputs=[self.autoencoder.input[0], self.autoencoder.input[1], tau_input],
outputs=[output_, self.autoencoder.output])
def pretrain(self, x, x_count, batch_size=256, epochs=200, optimizer='adam', ae_file='ae_weights.h5'):
print('...Pretraining autoencoder...')
self.autoencoder.compile(loss=self.zinb_loss, optimizer=optimizer)
es = EarlyStopping(monitor="loss", patience=50, verbose=1)
self.autoencoder.fit(x=x, y=x_count, batch_size=batch_size, epochs=epochs, callbacks=[es], shuffle=True)
self.autoencoder.save_weights(ae_file)
print('Pretrained weights are saved to ./' + str(ae_file))
self.pretrained = True
def fit(self, x, x_count, batch_size=256, maxiter=2e3, ae_weights=None,
loss_weights=[0.01, 1], optimizer='adam', model_file='model_weight.h5'):
self.model.compile(loss={'ZI_output': mse_loss_v2, 'slice': self.zinb_loss}, loss_weights=loss_weights, optimizer=optimizer)
if not self.pretrained and ae_weights is None:
print('...pretraining autoencoders using default hyper-parameters:')
print(' optimizer=\'adam\'; epochs=200')
self.pretrain(x, x_count, batch_size)
self.pretrained = True
elif ae_weights is not None:
self.autoencoder.load_weights(ae_weights)
print('ae_weights is loaded successfully.')
# anneal tau
tau0 = 1.
min_tau = 0.5
anneal_rate = 0.0003
tau = tau0
# es = EarlyStopping(monitor="loss", patience=20, verbose=1)
for e in range(maxiter):
if e % 100 == 0:
tau = max( tau0*np.exp( -anneal_rate * e),min_tau )
tau_in = np.ones( x[0].shape,dtype='float32' ) * tau
print(tau)
print("Epoch %d/%d" % (e, maxiter))
self.model.fit(x=[x[0], x[1], tau_in], y=x_count, batch_size=batch_size, epochs=1, shuffle=True)
self.model.save_weights(model_file)
if __name__ == "__main__":
# setting the hyper parameters
import argparse
parser = argparse.ArgumentParser(description='train',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--batch_size', default=256, type=int)
parser.add_argument('--data_file', default='data.h5')
parser.add_argument('--pretrain_epochs', default=300, type=int)
parser.add_argument('--max_iters', default=2000, type=int)
parser.add_argument('--gamma', default=.01, type=float)
parser.add_argument('--ae_weights', default=None)
parser.add_argument('--ae_weight_file', default='ae_weights.h5')
parser.add_argument('--model_weight_file', default='model_weights.h5')
args = parser.parse_args()
# load dataset
optimizer = Adam(amsgrad=True)
data_mat = h5py.File(args.data_file)
x = np.array(data_mat['X'])
y = np.array(data_mat['Y'])
true_count = np.array(data_mat['true_count'])
data_mat.close()
x = np.floor(x)
# preprocessing scRNA-seq read counts matrix
adata = sc.AnnData(x)
adata.obs['Group'] = y
adata = read_dataset(adata,
transpose=False,
test_split=False,
copy=True)
adata = normalize(adata,
size_factors=True,
normalize_input=True,
logtrans_input=True)
input_size = adata.n_vars
print(adata.X.shape)
print(y.shape)
x_sd = adata.X.std(0)
x_sd_median = np.median(x_sd)
print("median of gene sd: %.5f" % x_sd_median)
print(args)
zinbae_model = ZINB_AE(dims=[input_size, 64, 32], noise_sd=2.5)
zinbae_model.autoencoder.summary()
zinbae_model.model.summary()
# Pretrain autoencoders before clustering
if args.ae_weights is None:
zinbae_model.pretrain(x=[adata.X, adata.obs.size_factors], x_count=adata.raw.X, batch_size=args.batch_size, epochs=args.pretrain_epochs,
optimizer=optimizer, ae_file=args.ae_weight_file)
zinbae_model.fit(x=[adata.X, adata.obs.size_factors], x_count=[adata.raw.X, adata.raw.X], batch_size=args.batch_size, ae_weights=args.ae_weights,
maxiter=args.max_iters, loss_weights=[args.gamma, 1], optimizer=optimizer, model_file=args.model_weight_file)
# Impute error
x_impute = zinbae_model.autoencoder.predict(x=[adata.X, adata.obs.size_factors])
raw_error = imputate_error(adata.raw.X, true_count, x_raw=adata.raw.X)
imputation_error = imputate_error(x_impute, true_count, x_raw=adata.raw.X)
print("Before imputation error: %.4f, after imputation error: %.4f" % (raw_error, imputation_error))
| 10,280 | 39.636364 | 154 | py |
ZINBAE | ZINBAE-master/loss.py | import numpy as np
import tensorflow as tf
from keras import backend as K
def _nan2zero(x):
return tf.where(tf.is_nan(x), tf.zeros_like(x), x)
def _nan2inf(x):
return tf.where(tf.is_nan(x), tf.zeros_like(x)+np.inf, x)
def _nelem(x):
nelem = tf.reduce_sum(tf.cast(~tf.is_nan(x), tf.float32))
return tf.cast(tf.where(tf.equal(nelem, 0.), 1., nelem), x.dtype)
def _reduce_mean(x):
nelem = _nelem(x)
x = _nan2zero(x)
return tf.divide(tf.reduce_sum(x), nelem)
def mse_loss(y_true, y_pred):
ret = tf.square(y_pred - y_true)
return _reduce_mean(ret)
def mse_loss_v2(y_true, y_pred):
y_true = tf.log(y_true+1)
y_pred = tf.log(y_pred+1)
ret = tf.square(y_pred - y_true)
return _reduce_mean(ret)
class NB(object):
def __init__(self, theta=None, masking=False, scope='nbinom_loss/',
scale_factor=1.0, debug=False):
# for numerical stability
self.eps = 1e-10
self.scale_factor = scale_factor
self.debug = debug
self.scope = scope
self.masking = masking
self.theta = theta
def loss(self, y_true, y_pred, mean=True):
scale_factor = self.scale_factor
eps = self.eps
with tf.name_scope(self.scope):
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32) * scale_factor
if self.masking:
nelem = _nelem(y_true)
y_true = _nan2zero(y_true)
# Clip theta
theta = tf.minimum(self.theta, 1e6)
t1 = tf.lgamma(theta+eps) + tf.lgamma(y_true+1.0) - tf.lgamma(y_true+theta+eps)
t2 = (theta+y_true) * tf.log(1.0 + (y_pred/(theta+eps))) + (y_true * (tf.log(theta+eps) - tf.log(y_pred+eps)))
if self.debug:
assert_ops = [
tf.verify_tensor_all_finite(y_pred, 'y_pred has inf/nans'),
tf.verify_tensor_all_finite(t1, 't1 has inf/nans'),
tf.verify_tensor_all_finite(t2, 't2 has inf/nans')]
tf.summary.histogram('t1', t1)
tf.summary.histogram('t2', t2)
with tf.control_dependencies(assert_ops):
final = t1 + t2
else:
final = t1 + t2
final = _nan2inf(final)
if mean:
if self.masking:
final = tf.divide(tf.reduce_sum(final), nelem)
else:
final = tf.reduce_mean(final)
return final
class ZINB(NB):
def __init__(self, pi, ridge_lambda=0.0, scope='zinb_loss/', **kwargs):
super().__init__(scope=scope, **kwargs)
self.pi = pi
self.ridge_lambda = ridge_lambda
def loss(self, y_true, y_pred, mean=True):
scale_factor = self.scale_factor
eps = self.eps
with tf.name_scope(self.scope):
# reuse existing NB neg.log.lik.
# mean is always False here, because everything is calculated
# element-wise. we take the mean only in the end
nb_case = super().loss(y_true, y_pred, mean=False) - tf.log(1.0-self.pi+eps)
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32) * scale_factor
theta = tf.minimum(self.theta, 1e6)
zero_nb = tf.pow(theta/(theta+y_pred+eps), theta)
zero_case = -tf.log(self.pi + ((1.0-self.pi)*zero_nb)+eps)
result = tf.where(tf.less(y_true, 1e-8), zero_case, nb_case)
ridge = self.ridge_lambda*tf.square(self.pi)
result += ridge
if mean:
if self.masking:
result = _reduce_mean(result)
else:
result = tf.reduce_mean(result)
result = _nan2inf(result)
if self.debug:
tf.summary.histogram('nb_case', nb_case)
tf.summary.histogram('zero_nb', zero_nb)
tf.summary.histogram('zero_case', zero_case)
tf.summary.histogram('ridge', ridge)
return result
| 4,141 | 30.142857 | 122 | py |
ZINBAE | ZINBAE-master/layers.py | from keras.engine.topology import Layer
from keras.layers import Lambda
from keras import backend as K
import tensorflow as tf
class ConstantDispersionLayer(Layer):
'''
An identity layer which allows us to inject extra parameters
such as dispersion to Keras models
'''
def __init__(self, **kwargs):
super().__init__(**kwargs)
def build(self, input_shape):
self.theta = self.add_weight(shape=(1, input_shape[1]),
initializer='zeros',
trainable=True,
name='theta')
self.theta_exp = tf.clip_by_value(K.exp(self.theta), 1e-3, 1e4)
super().build(input_shape)
def call(self, x):
return tf.identity(x)
def compute_output_shape(self, input_shape):
return input_shape
class SliceLayer(Layer):
def __init__(self, index, **kwargs):
self.index = index
super().__init__(**kwargs)
def build(self, input_shape):
if not isinstance(input_shape, list):
raise ValueError('Input should be a list')
super().build(input_shape)
def call(self, x):
assert isinstance(x, list), 'SliceLayer input is not a list'
return x[self.index]
def compute_output_shape(self, input_shape):
return input_shape[self.index]
nan2zeroLayer = Lambda(lambda x: tf.where(tf.is_nan(x), tf.zeros_like(x), x))
ColWiseMultLayer = lambda name: Lambda(lambda l: l[0]*(tf.matmul(tf.reshape(l[1], (-1,1)),
tf.ones((1, l[0].get_shape()[1]),
dtype=l[1].dtype))),
name=name)
| 1,798 | 32.314815 | 98 | py |
ZINBAE | ZINBAE-master/ZINBAE0.py | """
Implementation of scDeepCluster for scRNA-seq data
"""
from time import time
import numpy as np
from keras.models import Model
import keras.backend as K
from keras.engine.topology import Layer, InputSpec
from keras.layers import Dense, Input, GaussianNoise, Layer, Activation, Lambda, Multiply, BatchNormalization, Reshape, Concatenate, RepeatVector, Permute
from keras.models import Model
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils.vis_utils import plot_model
from keras.callbacks import EarlyStopping
from sklearn.cluster import KMeans
from sklearn import metrics
import h5py
import scanpy.api as sc
from layers import ConstantDispersionLayer, SliceLayer, ColWiseMultLayer
from loss import poisson_loss, NB, ZINB, mse_loss_v2
from preprocess import read_dataset, normalize
import tensorflow as tf
from numpy.random import seed
seed(2211)
from tensorflow import set_random_seed
set_random_seed(2211)
MeanAct = lambda x: tf.clip_by_value(K.exp(x), 1e-5, 1e6)
DispAct = lambda x: tf.clip_by_value(tf.nn.softplus(x), 1e-4, 1e4)
def mean_MSE(x_impute, x_real):
return np.mean(np.square(np.log(x_impute+1)-np.log(x_real+1)))
def imputate_error(x_impute, x_real, x_raw):
x_impute_log = np.log(x_impute[(x_raw-x_real)<0]+1)
x_real_log = np.log(x_real[(x_raw-x_real)<0]+1)
return np.sum(np.abs(x_impute_log-x_real_log))/np.sum(x_real_log>0)
def autoencoder(dims, noise_sd=0, init='glorot_uniform', act='relu'):
"""
Fully connected auto-encoder model, symmetric.
Arguments:
dims: list of number of units in each layer of encoder. dims[0] is input dim, dims[-1] is units in hidden layer.
The decoder is symmetric with encoder. So number of layers of the auto-encoder is 2*len(dims)-1
act: activation, not applied to Input, Hidden and Output layers
return:
Model of autoencoder
"""
n_stacks = len(dims) - 1
# input
sf_layer = Input(shape=(1,), name='size_factors')
x = Input(shape=(dims[0],), name='counts')
h = x
h = GaussianNoise(noise_sd, name='input_noise')(h)
# internal layers in encoder
for i in range(n_stacks-1):
h = Dense(dims[i + 1], kernel_initializer=init, name='encoder_%d' % i)(h)
h = BatchNormalization(center=True, scale=False, name='encoder_batchnorm_%d' % i)(h)
h = Activation(act, name='encoder_act_%d' % i)(h)
# hidden layer
h = Dense(dims[-1], kernel_initializer=init, name='encoder_hidden')(h) # hidden layer, features are extracted from here
h = BatchNormalization(center=True, scale=False, name='encoder_hidden_batchnorm_%d' % i)(h)
h = Activation(act, name='encoder_hidden_act')(h)
# internal layers in decoder
for i in range(n_stacks-1, 0, -1):
h = Dense(dims[i], kernel_initializer=init, name='decoder_%d' % i)(h)
h = BatchNormalization(center=True, scale=False, name='decoder_batchnorm_%d' % i)(h)
h = Activation(act, name='decoder_act_%d' % i)(h)
# output
pi = Dense(dims[0], activation='sigmoid', kernel_initializer=init, name='pi')(h)
disp = Dense(dims[0], activation=DispAct, kernel_initializer=init, name='dispersion')(h)
mean = Dense(dims[0], activation=MeanAct, kernel_initializer=init, name='mean')(h)
output = ColWiseMultLayer(name='output')([mean, sf_layer])
output = SliceLayer(0, name='slice')([output, disp, pi])
return Model(inputs=[x, sf_layer], outputs=output)
class ZINB_AE0(object):
def __init__(self,
dims,
noise_sd=0,
ridge=0,
debug=False,
eps = 1e-20):
self.dims = dims
self.input_dim = dims[0]
self.n_stacks = len(self.dims) - 1
self.noise_sd = noise_sd
self.act = 'relu'
self.ridge = ridge
self.debug = debug
self.eps = eps
self.autoencoder = autoencoder(self.dims, noise_sd=self.noise_sd, act = self.act)
self.pi = pi = self.autoencoder.get_layer(name='pi').output
self.disp = disp = self.autoencoder.get_layer(name='dispersion').output
zinb = ZINB(pi, theta=disp, ridge_lambda=self.ridge, debug=self.debug)
self.zinb_loss = zinb.loss
self.model = Model(inputs=[self.autoencoder.input[0], self.autoencoder.input[1]],
outputs=self.autoencoder.output)
def pretrain(self, x, x_count, batch_size=256, epochs=200, optimizer='adam', ae_file='ae_weights.h5'):
print('...Pretraining autoencoder...')
self.autoencoder.compile(loss=self.zinb_loss, optimizer=optimizer)
es = EarlyStopping(monitor="loss", patience=50, verbose=1)
self.autoencoder.fit(x=x, y=x_count, batch_size=batch_size, epochs=epochs, callbacks=[es], shuffle=True)
self.autoencoder.save_weights(ae_file)
print('Pretrained weights are saved to ./' + str(ae_file))
self.pretrained = True
def fit(self, x, x_count, batch_size=256, maxiter=2e3, ae_weights=None,
loss_weights=0.1, optimizer='adam', model_file='model_weight.h5'):
class custom_loss(object):
def __init__(self, pi=None, zinb_loss=None):
self.pi = pi
self.zinb_loss = zinb_loss
def custom_loss(self, y_true, y_pred):
loss1 = mse_loss_v2(y_true, (1-self.pi)*y_pred)
loss2 = self.zinb_loss(y_true, y_pred)
return loss1*loss_weights + loss2
loss = custom_loss(self.pi, self.zinb_loss)
self.model.compile(loss=loss.custom_loss, optimizer=optimizer)
if not self.pretrained and ae_weights is None:
print('...pretraining autoencoders using default hyper-parameters:')
print(' optimizer=\'adam\'; epochs=200')
self.pretrain(x, x_count, batch_size)
self.pretrained = True
elif ae_weights is not None:
self.autoencoder.load_weights(ae_weights)
print('ae_weights is loaded successfully.')
self.model.fit(x=[x[0], x[1]], y=x_count, batch_size=batch_size, epochs=maxiter, shuffle=True)
self.model.save_weights(model_file)
if __name__ == "__main__":
# setting the hyper parameters
import argparse
parser = argparse.ArgumentParser(description='train',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--batch_size', default=256, type=int)
parser.add_argument('--data_file', default='data.h5')
parser.add_argument('--pretrain_epochs', default=300, type=int)
parser.add_argument('--max_iters', default=500, type=int)
parser.add_argument('--gamma', default=.01, type=float)
parser.add_argument('--ae_weights', default=None)
parser.add_argument('--ae_weight_file', default='ae_weights.h5')
parser.add_argument('--model_weight_file', default='model_weights.h5')
args = parser.parse_args()
# load dataset
optimizer = Adam(amsgrad=True)
data_mat = h5py.File(args.data_file)
x = np.array(data_mat['X'])
y = np.array(data_mat['Y'])
true_count = np.array(data_mat['true_count'])
data_mat.close()
x = np.floor(x)
# preprocessing scRNA-seq read counts matrix
adata = sc.AnnData(x)
adata.obs['Group'] = y
adata = read_dataset(adata,
transpose=False,
test_split=False,
copy=True)
adata = normalize(adata,
size_factors=True,
normalize_input=True,
logtrans_input=True)
input_size = adata.n_vars
print(adata.X.shape)
print(y.shape)
x_sd = adata.X.std(0)
x_sd_median = np.median(x_sd)
print("median of gene sd: %.5f" % x_sd_median)
print(args)
zinbae0_model = ZINB_AE(dims=[input_size, 64, 32], noise_sd=2.5)
zinbae0_model.autoencoder.summary()
zinbae0_model.model.summary()
# Pretrain autoencoders before clustering
if args.ae_weights is None:
zinbae0_model.pretrain(x=[adata.X, adata.obs.size_factors], x_count=adata.raw.X, batch_size=args.batch_size, epochs=args.pretrain_epochs,
optimizer=optimizer, ae_file=args.ae_weight_file)
zinbae0_model.fit(x=[adata.X, adata.obs.size_factors], x_count=adata.raw.X, batch_size=args.batch_size, ae_weights=args.ae_weights,
maxiter=args.max_iters, loss_weights=args.gamma, optimizer=optimizer, model_file=args.model_weight_file)
# Impute error
x_impute = zinbae0_model.autoencoder.predict(x=[adata.X, adata.obs.size_factors])
raw_error = imputate_error(adata.raw.X, true_count, x_raw=adata.raw.X)
imputation_error = imputate_error(x_impute, true_count, x_raw=adata.raw.X)
print("Before imputation error: %.4f, after imputation error: %.4f" % (raw_error, imputation_error))
| 8,888 | 39.040541 | 154 | py |
pyterpol | pyterpol-master/docs/conf.py | import os
# -*- coding: utf-8 -*-
#
# Pyterpol documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 26 12:34:08 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pyterpol'
copyright = u'2016, Nemravova Jana'
author = u'Nemravova Jana'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Pyterpol v0.0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pyterpoldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Pyterpol.tex', u'Pyterpol Documentation',
u'Nemravova Jana', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyterpol', u'Pyterpol Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Pyterpol', u'Pyterpol Documentation',
author, 'Pyterpol', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# add absolute path
os.path.abspath('../')
| 10,169 | 27.647887 | 80 | py |
mapalignment | mapalignment-master/projects/mapalign/mapalign_multires/loss_utils.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
sys.path.append("../../utils")
import tf_utils
def displacement_error(gt, preds, level_loss_coefs, polygon_map, disp_loss_params):
"""
:param gt: Groundtruth displacement map bounded between -1 and 1. Shape [batch, height, width, channels (3)]
:param preds: Predicted displacement maps bounded between -1 and 1. Shape [batch, levels, height, width, channels (2)]
:param level_loss_coefs: Loss coefficients to apply to each level
:param polygon_map: Used as mask for fill, outline and vertex. Shape [batch, height, width, channels (3)]
:return: error
"""
height, width, _ = gt.get_shape().as_list()[1:]
with tf.name_scope("euclidean_error"):
# Compute weight mask
cropped_polygon_map = tf.image.resize_image_with_crop_or_pad(polygon_map, height, width)
# TODO: normalize correction_weights
correction_weights = 1 / (
tf.reduce_sum(tf.reduce_sum(cropped_polygon_map, axis=1), axis=1) + tf.keras.backend.epsilon())
weigths = tf.constant(
[disp_loss_params["fill_coef"], disp_loss_params["edge_coef"], disp_loss_params["vertex_coef"]],
dtype=tf.float32)
corrected_weights = weigths * correction_weights
corrected_weights = tf.expand_dims(tf.expand_dims(corrected_weights, axis=1), axis=1)
weighted_mask = tf.reduce_sum(cropped_polygon_map * corrected_weights, axis=-1)
weighted_mask = tf.expand_dims(weighted_mask, axis=1) # Add levels dimension
# Compute errors
gt = tf.expand_dims(gt, axis=1) # Add levels dimension
pixelwise_euclidean_error = tf.reduce_sum(tf.square(gt - preds), axis=-1)
masked_pixelwise_euclidean_error = pixelwise_euclidean_error * weighted_mask
# Sum errors
summed_error = tf.reduce_sum(masked_pixelwise_euclidean_error, axis=0) # Batch sum
summed_error = tf.reduce_sum(summed_error, axis=-1) # Col/Width sum
summed_error = tf.reduce_sum(summed_error, axis=-1) # Row/Height sum
summed_error = summed_error * level_loss_coefs # Apply Level loss coefficients
summed_error = tf.reduce_sum(summed_error)
# Sum weights
summed_weighted_mask = tf.reduce_sum(weighted_mask)
loss = summed_error / (summed_weighted_mask + tf.keras.backend.epsilon())
return loss
def segmentation_error(seg_gt, seg_pred_logits, level_loss_coefs, seg_loss_params):
"""
:param seg_gt:
:param seg_pred_logits:
:param level_loss_coefs:
:return:
"""
# print("--- segmentation_error ---")
_, levels, height, width, _ = seg_pred_logits.get_shape().as_list()
# Crop seg_gt to match resolution of seg_pred_logits
seg_gt = tf.image.resize_image_with_crop_or_pad(seg_gt, height, width)
# Add background class to gt segmentation
if tf_utils.get_tf_version() == "1.4.0":
seg_gt_bg = tf.reduce_prod(1 - seg_gt, axis=-1,
keep_dims=True) # Equals 0 if pixel is either fill, outline or vertex. Equals 1 otherwise
else:
seg_gt_bg = tf.reduce_prod(1 - seg_gt, axis=-1,
keepdims=True) # Equals 0 if pixel is either fill, outline or vertex. Equals 1 otherwise
seg_gt = tf.concat([seg_gt_bg, seg_gt], axis=-1)
# Compute weight mask
# class_sums = tf.reduce_sum(tf.reduce_sum(seg_gt, axis=1), axis=1)
# seg_class_balance_weights = 1 / (
# class_sums + tf.keras.backend.epsilon())
seg_class_weights = tf.constant([[seg_loss_params["background_coef"], seg_loss_params["fill_coef"],
seg_loss_params["edge_coef"], seg_loss_params["vertex_coef"]]],
dtype=tf.float32)
# balanced_class_weights = seg_class_balance_weights * seg_class_weights
balanced_class_weights = seg_class_weights
balanced_class_weights = tf.expand_dims(balanced_class_weights, axis=1) # Add levels dimension
balanced_class_weights = tf.tile(balanced_class_weights, multiples=[1, levels, 1]) # Repeat on levels dimension
level_loss_coefs = tf.expand_dims(level_loss_coefs, axis=-1) # Add channels dimension
final_weights = balanced_class_weights * level_loss_coefs
final_weights = tf.expand_dims(tf.expand_dims(final_weights, axis=2), axis=2) # Add spatial dimensions
# Adapt seg_gt shape to seg_pred_logits
seg_gt = tf.expand_dims(seg_gt, axis=1) # Add levels dimension
seg_gt = tf.tile(seg_gt, multiples=[1, levels, 1, 1, 1]) # Add levels dimension
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=seg_gt, logits=seg_pred_logits)
# Now apply the various weights
weighted_loss = loss * final_weights
final_loss = tf.reduce_mean(weighted_loss)
return final_loss
def laplacian_penalty(preds, level_loss_coefs):
in_channels = preds.shape[-1]
with tf.name_scope("laplacian_penalty"):
laplace_k = tf_utils.make_depthwise_kernel([[0.5, 1.0, 0.5],
[1.0, -6., 1.0],
[0.5, 1.0, 0.5]], in_channels)
# Reshape preds to respect the input format of the depthwise_conv2d op
shape = [preds.shape[0] * preds.shape[1]] + preds.get_shape().as_list()[2:]
reshaped_preds = tf.reshape(preds, shape)
laplacians = tf.nn.depthwise_conv2d(reshaped_preds, laplace_k, [1, 1, 1, 1], padding='SAME')
penalty_map = tf.reduce_sum(tf.square(laplacians), axis=-1)
# Reshape penalty_map to shape compatible with preds
shape = preds.get_shape().as_list()[:-1]
reshaped_penalty_map = tf.reshape(penalty_map, shape)
# Compute mean penalty per level over spatial dimension as well as over batches
level_penalties = tf.reduce_mean(reshaped_penalty_map, axis=0) # Batch mean
level_penalties = tf.reduce_mean(level_penalties, axis=-1) # Col/Width mean
level_penalties = tf.reduce_mean(level_penalties, axis=-1) # Row/Height mean
# Apply level_loss_coefs
weighted_penalties = level_penalties * level_loss_coefs
penalty = tf.reduce_mean(weighted_penalties) # Levels mean
return penalty
def main(_):
batch_size = 1
levels = 2
patch_inner_res = 3
patch_outer_res = 5
disp_ = tf.placeholder(tf.float32, [batch_size, patch_inner_res, patch_inner_res, 2])
disps = tf.placeholder(tf.float32, [batch_size, levels, patch_inner_res, patch_inner_res, 2])
seg_ = tf.placeholder(tf.float32, [batch_size, patch_inner_res, patch_inner_res, 3])
seg_logits = tf.placeholder(tf.float32, [batch_size, levels, patch_inner_res, patch_inner_res, 3])
level_loss_coefs = tf.placeholder(tf.float32, [levels])
mask = tf.placeholder(tf.float32, [batch_size, patch_outer_res, patch_outer_res, 3])
disp_loss = displacement_error(disp_, disps, level_loss_coefs, mask)
seg_loss = segmentation_error(seg_, seg_logits, level_loss_coefs)
penalty = laplacian_penalty(disps, level_loss_coefs)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
disp_gt = np.zeros([batch_size, patch_inner_res, patch_inner_res, 2])
disp_gt[0, 0, 0, 0] = 1
disp_preds = np.zeros([batch_size, levels, patch_inner_res, patch_inner_res, 2])
disp_preds[0, 0, 0, 0, 0] = 1
disp_preds[0, 1, 0, 0, 0] = 1
seg_gt = np.zeros([batch_size, patch_inner_res, patch_inner_res, 3])
# seg_gt += 0.5
seg_gt[0, 0, 0, 0] = 1.0
seg_gt[0, 0, 1, 1] = 1.0
seg_gt[0, 0, 2, 2] = 1.0
seg_gt[0, 1, 0, 0] = 1.0
seg_gt[0, 1, 1, 1] = 1.0
seg_gt[0, 1, 2, 2] = 1.0
seg_pred_logits = np.zeros([batch_size, levels, patch_inner_res, patch_inner_res, 3])
seg_pred_logits += -100
seg_pred_logits[0, 0, 0, 0, 0] = 100
seg_pred_logits[0, 0, 0, 1, 1] = 100
seg_pred_logits[0, 0, 0, 2, 2] = -100
seg_pred_logits[0, 1, 0, 0, 0] = 100
seg_pred_logits[0, 1, 0, 1, 1] = 100
seg_pred_logits[0, 1, 0, 2, 2] = -100
seg_pred_logits[0, 0, 1, 0, 0] = 100
seg_pred_logits[0, 0, 1, 1, 1] = 100
seg_pred_logits[0, 0, 1, 2, 2] = -100
seg_pred_logits[0, 1, 1, 0, 0] = 100
seg_pred_logits[0, 1, 1, 1, 1] = 100
seg_pred_logits[0, 1, 1, 2, 2] = -100
coefs = np.array([1, 0.5])
poly_mask = np.zeros([batch_size, patch_outer_res, patch_outer_res, 3])
poly_mask[0, 1, 1, 0] = 1
computed_disp_loss, computed_seg_loss, computed_penalty = sess.run(
[disp_loss, seg_loss, penalty], feed_dict={disp_: disp_gt, disps: disp_preds,
seg_: seg_gt, seg_logits: seg_pred_logits,
level_loss_coefs: coefs, mask: poly_mask})
print("computed_disp_loss:")
print(computed_disp_loss)
print("computed_seg_loss:")
print(computed_seg_loss)
print("computed_penalty:")
print(computed_penalty)
if __name__ == '__main__':
tf.app.run(main=main)
| 9,420 | 44.73301 | 125 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/imitation/behavior_cloning_tf2.py | import copy
import os
import pickle
import numpy as np
import tensorflow as tf
from ray.rllib.policy import Policy as RllibPolicy
from tensorflow import keras
from tensorflow.compat.v1.keras.backend import get_session, set_session
from human_aware_rl.data_dir import DATA_DIR
from human_aware_rl.human.process_dataframes import (
get_human_human_trajectories,
get_trajs_from_data,
)
from human_aware_rl.rllib.rllib import (
RlLibAgent,
evaluate,
get_base_ae,
softmax,
)
from human_aware_rl.static import CLEAN_2019_HUMAN_DATA_TRAIN
from human_aware_rl.utils import get_flattened_keys, recursive_dict_update
from overcooked_ai_py.mdp.actions import Action
from overcooked_ai_py.mdp.overcooked_env import DEFAULT_ENV_PARAMS
#################
# Configuration #
#################
BC_SAVE_DIR = os.path.join(DATA_DIR, "bc_runs")
DEFAULT_DATA_PARAMS = {
"layouts": ["cramped_room"],
"check_trajectories": False,
"featurize_states": True,
"data_path": CLEAN_2019_HUMAN_DATA_TRAIN,
}
DEFAULT_MLP_PARAMS = {
# Number of fully connected layers to use in our network
"num_layers": 2,
# Each int represents a layer of that hidden size
"net_arch": [64, 64],
}
DEFAULT_TRAINING_PARAMS = {
"epochs": 100,
"validation_split": 0.15,
"batch_size": 64,
"learning_rate": 1e-3,
"use_class_weights": False,
}
DEFAULT_EVALUATION_PARAMS = {
"ep_length": 400,
"num_games": 1,
"display": False,
}
DEFAULT_BC_PARAMS = {
"eager": True,
"use_lstm": False,
"cell_size": 256,
"data_params": DEFAULT_DATA_PARAMS,
"mdp_params": {"layout_name": "cramped_room", "old_dynamics": False},
"env_params": DEFAULT_ENV_PARAMS,
"mdp_fn_params": {},
"mlp_params": DEFAULT_MLP_PARAMS,
"training_params": DEFAULT_TRAINING_PARAMS,
"evaluation_params": DEFAULT_EVALUATION_PARAMS,
"action_shape": (len(Action.ALL_ACTIONS),),
}
# Boolean indicating whether all param dependencies have been loaded. Used to prevent re-loading unceccesarily
_params_initalized = False
def _get_base_ae(bc_params):
return get_base_ae(bc_params["mdp_params"], bc_params["env_params"])
def _get_observation_shape(bc_params):
"""
Helper function for creating a dummy environment from "mdp_params" and "env_params" specified
in bc_params and returning the shape of the observation space
"""
base_ae = _get_base_ae(bc_params)
base_env = base_ae.env
dummy_state = base_env.mdp.get_standard_start_state()
obs_shape = base_env.featurize_state_mdp(dummy_state)[0].shape
return obs_shape
# For lazily loading the default params. Prevents loading on every import of this module
def get_bc_params(**args_to_override):
"""
Loads default bc params defined globally. For each key in args_to_override, overrides the default with the
value specified for that key. Recursively checks all children. If key not found, creates new top level parameter.
Note: Even though children can share keys, for simplicity, we enforce the condition that all keys at all levels must be distict
"""
global _params_initalized, DEFAULT_BC_PARAMS
if not _params_initalized:
DEFAULT_BC_PARAMS["observation_shape"] = _get_observation_shape(
DEFAULT_BC_PARAMS
)
_params_initalized = False
params = copy.deepcopy(DEFAULT_BC_PARAMS)
for arg, val in args_to_override.items():
updated = recursive_dict_update(params, arg, val)
if not updated:
print(
"WARNING, no value for specified bc argument {} found in schema. Adding as top level parameter".format(
arg
)
)
all_keys = get_flattened_keys(params)
if len(all_keys) != len(set(all_keys)):
raise ValueError(
"Every key at every level must be distict for BC params!"
)
return params
##############
# Model code #
##############
class LstmStateResetCallback(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
self.model.reset_states()
def _pad(sequences, maxlen=None, default=0):
if not maxlen:
maxlen = max([len(seq) for seq in sequences])
for seq in sequences:
pad_len = maxlen - len(seq)
seq.extend([default] * pad_len)
return sequences
def load_data(bc_params, verbose=False):
processed_trajs = get_human_human_trajectories(
**bc_params["data_params"], silent=not verbose
)
inputs, targets = (
processed_trajs["ep_states"],
processed_trajs["ep_actions"],
)
if bc_params["use_lstm"]:
seq_lens = np.array([len(seq) for seq in inputs])
seq_padded = _pad(
inputs,
default=np.zeros(
(
len(
inputs[0][0],
)
)
),
)
targets_padded = _pad(targets, default=np.zeros(1))
seq_t = np.dstack(seq_padded).transpose((2, 0, 1))
targets_t = np.dstack(targets_padded).transpose((2, 0, 1))
return seq_t, seq_lens, targets_t
else:
return np.vstack(inputs), None, np.vstack(targets)
def build_bc_model(use_lstm=True, eager=False, **kwargs):
if not eager:
tf.compat.v1.disable_eager_execution()
if use_lstm:
return _build_lstm_model(**kwargs)
else:
return _build_model(**kwargs)
def train_bc_model(model_dir, bc_params, verbose=False):
inputs, seq_lens, targets = load_data(bc_params, verbose)
training_params = bc_params["training_params"]
if training_params["use_class_weights"]:
# Get class counts, and use these to compute balanced class weights
classes, counts = np.unique(targets.flatten(), return_counts=True)
weights = sum(counts) / counts
class_weights = dict(zip(classes, weights))
else:
# Default is uniform class weights
class_weights = None
# Retrieve un-initialized keras model
model = build_bc_model(
**bc_params, max_seq_len=np.max(seq_lens), verbose=verbose
)
# Initialize the model
# Note: have to use lists for multi-output model support and not dicts because of tensorlfow 2.0.0 bug
if bc_params["use_lstm"]:
loss = [
keras.losses.SparseCategoricalCrossentropy(from_logits=True),
None,
None,
]
metrics = [["sparse_categorical_accuracy"], [], []]
else:
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metrics = ["sparse_categorical_accuracy"]
model.compile(
optimizer=keras.optimizers.Adam(training_params["learning_rate"]),
loss=loss,
metrics=metrics,
)
# Customize our training loop with callbacks
callbacks = [
# Early terminate training if loss doesn't improve for "patience" epochs
keras.callbacks.EarlyStopping(monitor="loss", patience=20),
# Reduce lr by "factor" after "patience" epochs of no improvement in loss
keras.callbacks.ReduceLROnPlateau(
monitor="loss", patience=3, factor=0.1
),
# Log all metrics model was compiled with to tensorboard every epoch
keras.callbacks.TensorBoard(
log_dir=os.path.join(model_dir, "logs"), write_graph=False
),
# Save checkpoints of the models at the end of every epoch (saving only the best one so far)
keras.callbacks.ModelCheckpoint(
filepath=os.path.join(model_dir, "checkpoints"),
monitor="loss",
save_best_only=True,
),
]
## Actually train our model
# Create input dict for both models
N = inputs.shape[0]
inputs = {"Overcooked_observation": inputs}
targets = {"logits": targets}
# Inputs unique to lstm model
if bc_params["use_lstm"]:
inputs["seq_in"] = seq_lens
inputs["hidden_in"] = np.zeros((N, bc_params["cell_size"]))
inputs["memory_in"] = np.zeros((N, bc_params["cell_size"]))
# Batch size doesn't include time dimension (seq_len) so it should be smaller for rnn model
batch_size = 1 if bc_params["use_lstm"] else training_params["batch_size"]
model.fit(
inputs,
targets,
callbacks=callbacks,
batch_size=batch_size,
epochs=training_params["epochs"],
validation_split=training_params["validation_split"],
class_weight=class_weights,
verbose=2 if verbose else 0,
)
# Save the model
save_bc_model(model_dir, model, bc_params, verbose=verbose)
return model
def save_bc_model(model_dir, model, bc_params, verbose=False):
"""
Saves the specified model under the directory model_dir. This creates three items
assets/ stores information essential to reconstructing the context and tf graph
variables/ stores the model's trainable weights
saved_model.pd the saved state of the model object
Additionally, saves a pickled dictionary containing all the parameters used to construct this model
at model_dir/metadata.pickle
"""
if verbose:
print("Saving bc model at ", model_dir)
model.save(model_dir, save_format="tf")
with open(os.path.join(model_dir, "metadata.pickle"), "wb") as f:
pickle.dump(bc_params, f)
def load_bc_model(model_dir, verbose=False):
"""
Returns the model instance (including all compilation data like optimizer state) and a dictionary of parameters
used to create the model
"""
if verbose:
print("Loading bc model from ", model_dir)
model = keras.models.load_model(model_dir, custom_objects={"tf": tf})
with open(os.path.join(model_dir, "metadata.pickle"), "rb") as f:
bc_params = pickle.load(f)
return model, bc_params
def evaluate_bc_model(model, bc_params, verbose=False):
"""
Creates an AgentPair object containing two instances of BC Agents, whose policies are specified by `model`. Runs
a rollout using AgentEvaluator class in an environment specified by bc_params
Arguments
- model (tf.keras.Model) A function that maps featurized overcooked states to action logits
- bc_params (dict) Specifies the environemnt in which to evaluate the agent (i.e. layout, reward_shaping_param)
as well as the configuration for the rollout (rollout_length)
Returns
- reward (int) Total sparse reward achieved by AgentPair during rollout
"""
evaluation_params = bc_params["evaluation_params"]
mdp_params = bc_params["mdp_params"]
# Get reference to state encoding function used by bc agents, with compatible signature
base_ae = _get_base_ae(bc_params)
base_env = base_ae.env
def featurize_fn(state):
return base_env.featurize_state_mdp(state)
# Wrap Keras models in rllib policies
agent_0_policy = BehaviorCloningPolicy.from_model(
model, bc_params, stochastic=True
)
agent_1_policy = BehaviorCloningPolicy.from_model(
model, bc_params, stochastic=True
)
# Compute the results of the rollout(s)
results = evaluate(
eval_params=evaluation_params,
mdp_params=mdp_params,
outer_shape=None,
agent_0_policy=agent_0_policy,
agent_1_policy=agent_1_policy,
agent_0_featurize_fn=featurize_fn,
agent_1_featurize_fn=featurize_fn,
verbose=verbose,
)
# Compute the average sparse return obtained in each rollout
reward = np.mean(results["ep_returns"])
return reward
def _build_model(observation_shape, action_shape, mlp_params, **kwargs):
## Inputs
inputs = keras.Input(
shape=observation_shape, name="Overcooked_observation"
)
x = inputs
## Build fully connected layers
assert (
len(mlp_params["net_arch"]) == mlp_params["num_layers"]
), "Invalid Fully Connected params"
for i in range(mlp_params["num_layers"]):
units = mlp_params["net_arch"][i]
x = keras.layers.Dense(
units, activation="relu", name="fc_{0}".format(i)
)(x)
## output layer
logits = keras.layers.Dense(action_shape[0], name="logits")(x)
return keras.Model(inputs=inputs, outputs=logits)
def _build_lstm_model(
observation_shape,
action_shape,
mlp_params,
cell_size,
max_seq_len=20,
**kwargs
):
## Inputs
obs_in = keras.Input(
shape=(None, *observation_shape), name="Overcooked_observation"
)
seq_in = keras.Input(shape=(), name="seq_in", dtype=tf.int32)
h_in = keras.Input(shape=(cell_size,), name="hidden_in")
c_in = keras.Input(shape=(cell_size,), name="memory_in")
x = obs_in
## Build fully connected layers
assert (
len(mlp_params["net_arch"]) == mlp_params["num_layers"]
), "Invalid Fully Connected params"
for i in range(mlp_params["num_layers"]):
units = mlp_params["net_arch"][i]
x = keras.layers.TimeDistributed(
keras.layers.Dense(
units, activation="relu", name="fc_{0}".format(i)
)
)(x)
mask = keras.layers.Lambda(
lambda x: tf.sequence_mask(x, maxlen=max_seq_len)
)(seq_in)
## LSTM layer
lstm_out, h_out, c_out = keras.layers.LSTM(
cell_size,
return_sequences=True,
return_state=True,
stateful=False,
name="lstm",
)(inputs=x, mask=mask, initial_state=[h_in, c_in])
## output layer
logits = keras.layers.TimeDistributed(
keras.layers.Dense(action_shape[0]), name="logits"
)(lstm_out)
return keras.Model(
inputs=[obs_in, seq_in, h_in, c_in], outputs=[logits, h_out, c_out]
)
################
# Rllib Policy #
################
class NullContextManager:
"""
No-op context manager that does nothing
"""
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
class TfContextManager:
"""
Properly sets the execution graph and session of the keras backend given a "session" object as input
Used for isolating tf execution in graph mode. Do not use with eager models or with eager mode on
"""
def __init__(self, session):
self.session = session
def __enter__(self):
self.ctx = self.session.graph.as_default()
self.ctx.__enter__()
set_session(self.session)
def __exit__(self, *args):
self.ctx.__exit__(*args)
class BehaviorCloningPolicy(RllibPolicy):
def __init__(self, observation_space, action_space, config):
"""
RLLib compatible constructor for initializing a behavior cloning model
observation_space (gym.Space|tuple) Shape of the featurized observations
action_space (gym.space|tuple) Shape of the action space (len(Action.All_ACTIONS),)
config (dict) Dictionary of relavant bc params
- model_dir (str) Path to pickled keras.Model used to map observations to action logits
- stochastic (bool) Whether action should return logit argmax or sample over distribution
- bc_model (keras.Model) Pointer to loaded policy model. Overrides model_dir
- bc_params (dict) Dictionary of parameters used to train model. Required if "model" is present
- eager (bool) Whether the model should run in eager (or graph) mode. Overrides bc_params['eager'] if present
"""
super(BehaviorCloningPolicy, self).__init__(
observation_space, action_space, config
)
if "bc_model" in config and config["bc_model"]:
assert (
"bc_params" in config
), "must specify params in addition to model"
assert issubclass(
type(config["bc_model"]), keras.Model
), "model must be of type keras.Model"
model, bc_params = config["bc_model"], config["bc_params"]
else:
assert (
"model_dir" in config
), "must specify model directory if model not specified"
model, bc_params = load_bc_model(config["model_dir"])
# Save the session that the model was loaded into so it is available at inference time if necessary
self._sess = get_session()
self._setup_shapes()
# Basic check to make sure model dimensions match
assert self.observation_shape == bc_params["observation_shape"]
assert self.action_shape == bc_params["action_shape"]
self.model = model
self.stochastic = config["stochastic"]
self.use_lstm = bc_params["use_lstm"]
self.cell_size = bc_params["cell_size"]
self.eager = (
config["eager"] if "eager" in config else bc_params["eager"]
)
self.context = self._create_execution_context()
def _setup_shapes(self):
# This is here to make the class compatible with both tuples or gym.Space objs for the spaces
# Note: action_space = (len(Action.ALL_ACTIONS,)) is technically NOT the action space shape, which would be () since actions are scalars
self.observation_shape = (
self.observation_space
if type(self.observation_space) == tuple
else self.observation_space.shape
)
self.action_shape = (
self.action_space
if type(self.action_space) == tuple
else (self.action_space.n,)
)
@classmethod
def from_model_dir(cls, model_dir, stochastic=True):
model, bc_params = load_bc_model(model_dir)
config = {
"bc_model": model,
"bc_params": bc_params,
"stochastic": stochastic,
}
return cls(
bc_params["observation_shape"], bc_params["action_shape"], config
)
@classmethod
def from_model(cls, model, bc_params, stochastic=True):
config = {
"bc_model": model,
"bc_params": bc_params,
"stochastic": stochastic,
}
return cls(
bc_params["observation_shape"], bc_params["action_shape"], config
)
def compute_actions(
self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
**kwargs
):
"""
Computes sampled actions for each of the corresponding OvercookedEnv states in obs_batch
Args:
obs_batch (np.array): batch of pre-process (lossless state encoded) observations
Returns:
actions (list|np.array): batch of output actions shape [BATCH_SIZE, ACTION_SHAPE]
state_outs (list): only necessary for rnn hidden states
infos (dict): dictionary of extra feature batches { "action_dist_inputs" : [BATCH_SIZE, ...] }
"""
# Cast to np.array if list (no-op if already np.array)
obs_batch = np.array(obs_batch)
# Run the model
with self.context:
action_logits, states = self._forward(obs_batch, state_batches)
# Softmax in numpy to convert logits to probabilities
action_probs = softmax(action_logits)
if self.stochastic:
# Sample according to action_probs for each row in the output
actions = np.array(
[
np.random.choice(self.action_shape[0], p=action_probs[i])
for i in range(len(action_probs))
]
)
else:
actions = np.argmax(action_logits, axis=1)
return actions, states, {"action_dist_inputs": action_logits}
def get_initial_state(self):
"""
Returns the initial hidden and memory states for the model if it is recursive
Note, this shadows the rllib.Model.get_initial_state function, but had to be added here as
keras does not allow mixins in custom model classes
Also note, either this function or self.model.get_initial_state (if it exists) must be called at
start of an episode
"""
if self.use_lstm:
return [
np.zeros(
self.cell_size,
),
np.zeros(
self.cell_size,
),
]
return []
def get_weights(self):
"""
No-op to keep rllib from breaking, won't be necessary in future rllib releases
"""
pass
def set_weights(self, weights):
"""
No-op to keep rllib from breaking
"""
pass
def learn_on_batch(self, samples):
"""
Static policy requires no learning
"""
return {}
def _forward(self, obs_batch, state_batches):
if self.use_lstm:
obs_batch = np.expand_dims(obs_batch, 1)
seq_lens = np.ones(len(obs_batch))
model_out = self.model.predict(
[obs_batch, seq_lens] + state_batches
)
logits, states = model_out[0], model_out[1:]
logits = logits.reshape((logits.shape[0], -1))
return logits, states
else:
return self.model.predict(obs_batch, verbose=0), []
def _create_execution_context(self):
"""
Creates a private execution context for the model
Necessary if using with rllib in order to isolate this policy model from others
"""
if self.eager:
return NullContextManager()
return TfContextManager(self._sess)
if __name__ == "__main__":
params = get_bc_params()
model = train_bc_model(
os.path.join(BC_SAVE_DIR, "default"), params, verbose=True
)
# Evaluate our model's performance in a rollout
evaluate_bc_model(model, params)
| 22,092 | 31.925484 | 144 | py |
overcooked_ai | overcooked_ai-master/src/human_aware_rl/ppo/ppo_rllib.py | import numpy as np
import tensorflow as tf
from ray.rllib.models.tf.recurrent_net import RecurrentNetwork
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
class RllibPPOModel(TFModelV2):
"""
Model that will map environment states to action probabilities. Will be shared across agents
"""
def __init__(
self,
obs_space,
action_space,
num_outputs,
model_config,
name,
**kwargs
):
super(RllibPPOModel, self).__init__(
obs_space, action_space, num_outputs, model_config, name
)
# params we got to pass in from the call to "run"
custom_params = model_config["custom_model_config"]
## Parse custom network params
num_hidden_layers = custom_params["NUM_HIDDEN_LAYERS"]
size_hidden_layers = custom_params["SIZE_HIDDEN_LAYERS"]
num_filters = custom_params["NUM_FILTERS"]
num_convs = custom_params["NUM_CONV_LAYERS"]
d2rl = custom_params["D2RL"]
assert type(d2rl) == bool
## Create graph of custom network. It will under a shared tf scope such that all agents
## use the same model
self.inputs = tf.keras.Input(
shape=obs_space.shape, name="observations"
)
out = self.inputs
# Apply initial conv layer with a larger kenel (why?)
if num_convs > 0:
y = tf.keras.layers.Conv2D(
filters=num_filters,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.leaky_relu,
name="conv_initial",
)
out = y(out)
# Apply remaining conv layers, if any
for i in range(0, num_convs - 1):
padding = "same" if i < num_convs - 2 else "valid"
out = tf.keras.layers.Conv2D(
filters=num_filters,
kernel_size=[3, 3],
padding=padding,
activation=tf.nn.leaky_relu,
name="conv_{}".format(i),
)(out)
# Apply dense hidden layers, if any
conv_out = tf.keras.layers.Flatten()(out)
out = conv_out
for i in range(num_hidden_layers):
if i > 0 and d2rl:
out = tf.keras.layers.Concatenate()([out, conv_out])
out = tf.keras.layers.Dense(size_hidden_layers)(out)
out = tf.keras.layers.LeakyReLU()(out)
# Linear last layer for action distribution logits
layer_out = tf.keras.layers.Dense(self.num_outputs)(out)
# Linear last layer for value function branch of model
value_out = tf.keras.layers.Dense(1)(out)
self.base_model = tf.keras.Model(self.inputs, [layer_out, value_out])
def forward(self, input_dict, state=None, seq_lens=None):
model_out, self._value_out = self.base_model(input_dict["obs"])
return model_out, state
def value_function(self):
return tf.reshape(self._value_out, [-1])
class RllibLSTMPPOModel(RecurrentNetwork):
"""
Model that will map encoded environment observations to action logits
|_______|
/-> | value |
___________ _________ ________ / |_______|
state -> | conv_net | -> | fc_net | -> | lstm |
|__________| |________| |______| \\ |_______________|
/ \\ \\-> | action_logits |
h_in c_in |_______________|
"""
def __init__(
self,
obs_space,
action_space,
num_outputs,
model_config,
name,
**kwargs
):
super(RllibLSTMPPOModel, self).__init__(
obs_space, action_space, num_outputs, model_config, name
)
# params we passed in from rllib client
custom_params = model_config["custom_model_config"]
## Parse custom network params
num_hidden_layers = custom_params["NUM_HIDDEN_LAYERS"]
size_hidden_layers = custom_params["SIZE_HIDDEN_LAYERS"]
num_filters = custom_params["NUM_FILTERS"]
num_convs = custom_params["NUM_CONV_LAYERS"]
cell_size = custom_params["CELL_SIZE"]
### Create graph of the model ###
flattened_dim = np.prod(obs_space.shape)
# Need an extra batch dimension (None) for time dimension
flattened_obs_inputs = tf.keras.Input(
shape=(None, flattened_dim), name="input"
)
lstm_h_in = tf.keras.Input(shape=(cell_size,), name="h_in")
lstm_c_in = tf.keras.Input(shape=(cell_size,), name="c_in")
seq_in = tf.keras.Input(shape=(), name="seq_in", dtype=tf.int32)
# Restore initial observation shape
obs_inputs = tf.keras.layers.Reshape(
target_shape=(-1, *obs_space.shape)
)(flattened_obs_inputs)
out = obs_inputs
## Initial "vision" network
# Apply initial conv layer with a larger kenel (why?)
if num_convs > 0:
out = tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
filters=num_filters,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.leaky_relu,
name="conv_initial",
)
)(out)
# Apply remaining conv layers, if any
for i in range(0, num_convs - 1):
padding = "same" if i < num_convs - 2 else "valid"
out = tf.keras.layers.TimeDistributed(
tf.keras.layers.Conv2D(
filters=num_filters,
kernel_size=[3, 3],
padding=padding,
activation=tf.nn.leaky_relu,
name="conv_{}".format(i),
)
)(out)
# Flatten spatial features
out = tf.keras.layers.TimeDistributed(tf.keras.layers.Flatten())(out)
# Apply dense hidden layers, if any
for i in range(num_hidden_layers):
out = tf.keras.layers.TimeDistributed(
tf.keras.layers.Dense(
units=size_hidden_layers,
activation=tf.nn.leaky_relu,
name="fc_{0}".format(i),
)
)(out)
## LSTM network
lstm_out, h_out, c_out = tf.keras.layers.LSTM(
cell_size, return_sequences=True, return_state=True, name="lstm"
)(
inputs=out,
mask=tf.sequence_mask(seq_in),
initial_state=[lstm_h_in, lstm_c_in],
)
# Linear last layer for action distribution logits
layer_out = tf.keras.layers.Dense(self.num_outputs, name="logits")(
lstm_out
)
# Linear last layer for value function branch of model
value_out = tf.keras.layers.Dense(1, name="values")(lstm_out)
self.cell_size = cell_size
self.base_model = tf.keras.Model(
inputs=[flattened_obs_inputs, seq_in, lstm_h_in, lstm_c_in],
outputs=[layer_out, value_out, h_out, c_out],
)
def forward_rnn(self, inputs, state, seq_lens):
"""
Run the forward pass of the model
Arguments:
inputs: np.array of shape [BATCH, T, obs_shape]
state: list of np.arrays [h_in, c_in] each of shape [BATCH, self.cell_size]
seq_lens: np.array of shape [BATCH] where the ith element is the length of the ith sequence
Output:
model_out: tensor of shape [BATCH, T, self.num_outputs] representing action logits
state: list of tensors [h_out, c_out] each of shape [BATCH, self.cell_size]
"""
model_out, self._value_out, h_out, c_out = self.base_model(
[inputs, seq_lens, state]
)
return model_out, [h_out, c_out]
def value_function(self):
"""
Returns a tensor of shape [BATCH * T] representing the value function for the most recent forward pass
"""
return tf.reshape(self._value_out, [-1])
def get_initial_state(self):
"""
Returns the initial hidden state for the LSTM
"""
return [
np.zeros(self.cell_size, np.float32),
np.zeros(self.cell_size, np.float32),
]
| 8,450 | 34.508403 | 110 | py |
CBA | CBA-main/vignette.py | #this file is to teach you how to use CBA
"""
Created on Fri Mar 27 18:58:59 2020
@author: 17b90
"""
import kBET
import scipy
import random
import keras as K
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sns
import scipy.io as sio
import tensorflow as tf
from keras import layers
from ywb_function import *
import sklearn.metrics as sm
from collections import Counter
import matplotlib.pyplot as plt
from keras.regularizers import l2
from sklearn import preprocessing
from keras.layers.core import Lambda
from keras.callbacks import TensorBoard
from imblearn.over_sampling import SMOTE,ADASYN
from keras.callbacks import LearningRateScheduler
from sklearn.cluster import AgglomerativeClustering
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import RandomOverSampler
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.cluster.hierarchy import dendrogram, linkage
###############################################################################
#input the data
RAWseries1= #batch one, gene * cell
RAWseries2= #batch two, gene * cell
#input the label
choose_seriestype1= #cluster1, cell * 1, the element should be like 'gamma', not number
choose_seriestype2= #cluster2, cell * 1, the element should be like 'gamma', not number
#input the gene name
genename= #gene name, (gene * none)
fromname= #input your code name
#we choose some parameters
min_cells= #remove some genes, expressed in less than 50 cells
pca_dim= #the number of PCs, you can choose as you like
minnumberofcluster= #this parameter is used for doing Louvain clustering again
#because sometimes obtained clusters by Louvain are quite big, you can do Louvain again for each obtained cluster
#no rule, if you think the clusters are big, you can do it, judged by yourself
#clusters with more than $minnumberofcluster$ cells will be clustered again to make them smaller
#I think this hardly influence the result, just make it beautiful, so you can choose it!
clusternumber= #the number of neighboors when doing the cluster matching, we choose one neighbor, but you can choose more
chosen_cluster= #select your target cell types, like ['alpha','beta','ductal','acinar','delta','gamma','endothelial','epsilon']
cluster_index2= #give each cell type an index, like {'alpha':0,'beta':1,'ductal':2,'acinar':3,'delta':4,'gamma':5,'endothelial':6,'epsilon':7}
###############################################################################
#merge them
Alldata=np.concatenate([RAWseries1.T,RAWseries2.T])
Alllabel=np.concatenate([choose_seriestype1,choose_seriestype2])
Allbatch=np.concatenate([np.zeros(choose_seriestype1.shape[0]),np.zeros(choose_seriestype2.shape[0])+1])
###############################################################################
#ok, we select some interesting cell types
chosen_index=np.arange(Alllabel.shape[0])
for i in range(Alllabel.shape[0]):
if Alllabel[i] in chosen_cluster:
chosen_index[i]=1
else:
chosen_index[i]=0
Alldata=Alldata[chosen_index==1,:]
Allbatch=Allbatch[chosen_index==1]
Alllabel=Alllabel[chosen_index==1]
###############################################################################
#and them, use numbers to replace the name of cell types
Numlabel=np.zeros(Alllabel.shape[0])
for i in range(Alllabel.shape[0]):
Numlabel[i]=cluster_index2[Alllabel[i][0]]
###############################################################################
#use Scanpy!!!
anndata=sc.AnnData(pd.DataFrame(Alldata,columns=genename))
sc.pp.filter_genes(anndata,min_cells=min_cells)
sc.pp.normalize_per_cell(anndata,counts_per_cell_after=1e4)
sc.pp.log1p(anndata)
sc.pp.highly_variable_genes(anndata)
sc.pl.highly_variable_genes(anndata)
anndata=anndata[:,anndata.var['highly_variable']]
sc.pl.highest_expr_genes(anndata,n_top=20)
sc.tl.pca(anndata,n_comps=100,svd_solver='arpack')
sc.pl.pca(anndata)
sc.pl.pca_variance_ratio(anndata,log=True,n_pcs=100,save=[True,'pancreas'])
#after prepossessing, we rename these datasets
Alldata_aft=anndata.obsm['X_pca'][:,0:pca_dim]
#this is for the preparation of deep learning training, the training is hard if you don't do that
Alldata_aft=preprocessing.StandardScaler().fit_transform(Alldata_aft)
Alldata_aft=preprocessing.MinMaxScaler().fit_transform(Alldata_aft)
PCAseries1=Alldata_aft[Allbatch==0,:][Numlabel[Allbatch==0].argsort()]
PCAseries2=Alldata_aft[Allbatch==1,:][Numlabel[Allbatch==1].argsort()]
choose_seriestype1=Numlabel[Allbatch==0][Numlabel[Allbatch==0].argsort()].astype('int')
choose_seriestype2=Numlabel[Allbatch==1][Numlabel[Allbatch==1].argsort()].astype('int')
###############################################################################
#do Louvain clustering
cluster_series1=sc.AnnData(PCAseries1)
cluster_series2=sc.AnnData(PCAseries2)
sc.pp.neighbors(cluster_series1,n_pcs=0)
sc.pp.neighbors(cluster_series2,n_pcs=0)
sc.tl.umap(cluster_series1)
sc.tl.umap(cluster_series2)
sc.tl.louvain(cluster_series1)
sc.tl.louvain(cluster_series2)
sc.pl.umap(cluster_series1,color='louvain',size=30)
sc.pl.umap(cluster_series2,color='louvain',size=30)
cluster1=np.array(list(map(int,cluster_series1.obs['louvain'])))
cluster2=np.array(list(map(int,cluster_series2.obs['louvain'])))
###############################################################################
#ok, as you like, you can do clustering for each cluster, or not
recluster1=np.zeros(cluster1.shape[0])
recluster2=np.zeros(cluster2.shape[0])
palsecluster1=cluster1
count_cluster1=pd.value_counts(cluster_series1.obs['louvain'])
for i in range(1000000000000000):#until there are no clusters with more than $minnumberofcluster$ cells
if count_cluster1.max()<minnumberofcluster:
break
else:
print(count_cluster1.max())
recluster1=np.zeros(cluster1.shape[0])
recluster1_number=0
for i in np.unique(palsecluster1):
index=palsecluster1==i
if index.sum()<minnumberofcluster:
thisrecluster=np.zeros(index.sum())
recluster1[index]=thisrecluster+recluster1_number
recluster1_number=len(np.unique(recluster1))
else:
data=PCAseries1[index]
anndata=sc.AnnData(data)
sc.pp.neighbors(anndata,n_pcs=0)
sc.tl.louvain(anndata)
thisrecluster=np.array(list(map(int,anndata.obs['louvain'])))
recluster1[index]=thisrecluster+recluster1_number
recluster1_number=len(np.unique(recluster1))
palsecluster1=recluster1.astype('int')
count_cluster1=pd.value_counts(palsecluster1)
palsecluster2=cluster2
count_cluster2=pd.value_counts(cluster_series2.obs['louvain'])
for i in range(1000000000000000):
if count_cluster2.max()<minnumberofcluster:
break
else:
print(count_cluster2.max())
recluster2=np.zeros(cluster2.shape[0])
recluster2_number=0
for i in np.unique(palsecluster2):
index=palsecluster2==i
if index.sum()<minnumberofcluster:
thisrecluster=np.zeros(index.sum())
recluster2[index]=thisrecluster+recluster2_number
recluster2_number=len(np.unique(recluster2))
else:
data=PCAseries2[index]
anndata=sc.AnnData(data)
sc.pp.neighbors(anndata,n_pcs=0)
sc.tl.louvain(anndata)
thisrecluster=np.array(list(map(int,anndata.obs['louvain'])))
recluster2[index]=thisrecluster+recluster2_number
recluster2_number=len(np.unique(recluster2))
palsecluster2=recluster2.astype('int')
count_cluster2=pd.value_counts(palsecluster2)
recluster1=palsecluster1
recluster2=palsecluster2
###############################################################################
#show the Louvain results
series1=sc.AnnData(PCAseries1)
series2=sc.AnnData(PCAseries2)
sc.pp.neighbors(series1,n_pcs=0)
sc.pp.neighbors(series2,n_pcs=0)
sc.tl.umap(series1)
sc.tl.umap(series2)
df1=pd.DataFrame(choose_seriestype1)
df1=pd.Series(np.reshape(df1.values,df1.values.shape[0]), dtype="category")
series1.obs['real']=df1.values
df2=pd.DataFrame(choose_seriestype2)
df2=pd.Series(np.reshape(df2.values,df2.values.shape[0]), dtype="category")
series2.obs['real']=df2.values
sc.pl.umap(series1,color='real',size=30)
sc.pl.umap(series2,color='real',size=30)
df1=pd.DataFrame(recluster1.astype('int'))
df1=pd.Series(np.reshape(df1.values,df1.values.shape[0]), dtype="category")
series1.obs['recluster']=df1.values
df2=pd.DataFrame(recluster2.astype('int'))
df2=pd.Series(np.reshape(df2.values,df2.values.shape[0]), dtype="category")
series2.obs['recluster']=df2.values
sc.pl.umap(series1,color='recluster',size=30)
sc.pl.umap(series2,color='recluster',size=30)
###############################################################################
#this is used to select the metric when selecting neighbor clusters
def dis(P,Q,distance_method):
if distance_method==0:#euclidean distance
return np.sqrt(np.sum(np.square(P-Q)))
if distance_method==1:#cos distance
return 1-(np.multiply(P,Q).sum()/(np.sqrt(np.sum(np.square(P)))*np.sqrt(np.sum(np.square(Q)))))
###############################################################################
#you can choose change their turn or not
if len(np.unique(recluster1))>=len(np.unique(recluster2)):
a=PCAseries1
PCAseries1=PCAseries2
PCAseries2=a
b=choose_seriestype1
choose_seriestype1=choose_seriestype2
choose_seriestype2=b
c=cluster1
cluster1=cluster2
cluster2=c
d=recluster1
recluster1=recluster2
recluster2=d
###############################################################################
#ok, let's calculate the similarity of cells/clusters
correlation_recluster=np.zeros([len(np.unique(recluster1)),len(np.unique(recluster2))])
correlation_recluster_cell=np.zeros([recluster1.shape[0],recluster2.shape[0]])
for i in range(len(np.unique(recluster1))):
for j in range(len(np.unique(recluster2))):
print(i,j)
index_series1=np.where(recluster1==i)[0]
index_series2=np.where(recluster2==j)[0]
cell_series1=PCAseries1[index_series1,:]
cell_series2=PCAseries2[index_series2,:]
mean1=0
for iq in range(cell_series1.shape[0]):
for jq in range(cell_series2.shape[0]):
mean1+=dis(cell_series1[iq,:],cell_series2[jq,:],1)
correlation_recluster[i,j]=mean1/(cell_series1.shape[0]*cell_series2.shape[0])
for ii in range(cell_series1.shape[0]):
for jj in range(cell_series2.shape[0]):
mean2=dis(cell_series1[ii,:],cell_series2[jj,:],0)
correlation_recluster_cell[index_series1[ii],index_series2[jj]]=mean2
plt.imshow(correlation_recluster)
plt.imshow(correlation_recluster_cell)
correlation_recluster_div=-np.log10(correlation_recluster)
correlation_recluster_cell_div=-np.log10(correlation_recluster_cell)
correlation_recluster_norm=(correlation_recluster_div-correlation_recluster_div.min())/(correlation_recluster_div.max()-correlation_recluster_div.min())
correlation_recluster_cell_norm=(correlation_recluster_cell_div-correlation_recluster_cell_div.min())/(correlation_recluster_cell_div.max()-correlation_recluster_cell_div.min())
#show them
plt.imshow(correlation_recluster_norm)
plt.imshow(correlation_recluster_cell_norm)
###############################################################################
#remove bad parts, do the matching
correlation_recluster_select=np.zeros(correlation_recluster_norm.shape)
recluster_mid=np.zeros(recluster1.shape)
for kk in range(correlation_recluster_norm.shape[0]):
ind=np.sort(correlation_recluster_norm[kk,:])
select=correlation_recluster_norm[kk,:]<ind[-clusternumber]
select=(select==False)
recluster_mid[recluster1==kk]+=int(np.where(select==True)[0])
correlation_recluster_select[kk,:]=correlation_recluster_norm[kk,:]*select
plt.imshow(correlation_recluster_select)
correlation_recluster_cell_final=correlation_recluster_cell*0
for i in range(correlation_recluster_cell_norm.shape[0]):
for j in range(correlation_recluster_cell_norm.shape[1]):
label1=recluster1[i]
label2=recluster2[j]
mean1=correlation_recluster_select[label1,label2]
mean2=correlation_recluster_cell_norm[i,j]
if mean1==0:
correlation_recluster_cell_final[i,j]=0
else:
correlation_recluster_cell_final[i,j]=mean2
plt.imshow(correlation_recluster_select)
plt.imshow(correlation_recluster_cell_final)
recluster1=recluster_mid.astype('int')
sort_correlation_recluster_cell_final=correlation_recluster_cell_final[recluster1.argsort(),:]
sort_correlation_recluster_cell_final=sort_correlation_recluster_cell_final[:,recluster2.argsort()]
###############################################################################
#heatmap
heatmap(correlation_recluster_cell_final,choose_seriestype1,choose_seriestype2,save=False,name='pancreasmatrix')
heatmap(sort_correlation_recluster_cell_final,np.sort(recluster1)+9,np.sort(recluster2)+9,save=False,name='ourpancreasmatrix')
###############################################################################
#ok, I use keras, cells in each input are randomly selected, I don't know how to match cells with their similarity
#I also don't know how to match the cell part with their distance, so I design the following inputs
#It will waste some time, it's not easy and unclear for readers, but it works!
x_input1=np.zeros([PCAseries1.shape[0],PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]+recluster2.max()+1])
x_input2=np.zeros([PCAseries2.shape[0],PCAseries2.shape[1]+PCAseries2.shape[0]+PCAseries1.shape[0]+recluster2.max()+1])
for i in range(PCAseries1.shape[0]):
print(i)
x_input1[i,0:PCAseries1.shape[1]]=PCAseries1[i,:]
x_input1[i,PCAseries1.shape[1]:PCAseries1.shape[1]+PCAseries1.shape[0]]=K.utils.np_utils.to_categorical(i,PCAseries1.shape[0])
x_input1[i,PCAseries1.shape[1]+PCAseries1.shape[0]:PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]]=correlation_recluster_cell_final[i,:]
x_input1[i,PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]:]=K.utils.np_utils.to_categorical(recluster1[i],recluster2.max()+1)
for j in range(PCAseries2.shape[0]):
print(j)
x_input2[j,0:PCAseries2.shape[1]]=PCAseries2[j,:]
x_input2[j,PCAseries2.shape[1]:PCAseries2.shape[1]+PCAseries2.shape[0]]=K.utils.np_utils.to_categorical(j,PCAseries2.shape[0])
x_input2[j,PCAseries2.shape[1]+PCAseries2.shape[0]:PCAseries2.shape[1]+PCAseries2.shape[0]+PCAseries1.shape[0]]=correlation_recluster_cell_final[:,j]
x_input2[j,PCAseries2.shape[1]+PCAseries2.shape[0]+PCAseries1.shape[0]:]=K.utils.np_utils.to_categorical(recluster2[j],recluster2.max()+1)
###############################################################################
#interesting, I need to make two batches have the same number of cells, so I have to copy cells again and again
if x_input1.shape[0]>=x_input2.shape[0]:
x_test1=x_input1
y_test1=recluster1
y_testreal1=choose_seriestype1
repeat_num=int(np.ceil(x_input1.shape[0]/x_input2.shape[0]))
x_test2=np.tile(x_input2,(repeat_num,1))
y_test2=np.tile(recluster2,repeat_num)
y_testreal2=np.tile(choose_seriestype2,repeat_num)
x_test2=x_test2[0:x_test1.shape[0],:]
y_test2=y_test2[0:x_test1.shape[0]]
y_testreal2=y_testreal2[0:x_test1.shape[0]]
elif x_input1.shape[0]<x_input2.shape[0]:
x_test2=x_input2
y_test2=recluster2
y_testreal2=choose_seriestype2
repeat_num=int(np.ceil(x_input2.shape[0]/x_input1.shape[0]))
x_test1=np.tile(x_input1,(repeat_num,1))
y_test1=np.tile(recluster1,repeat_num)
y_testreal1=np.tile(choose_seriestype1,repeat_num)
x_test1=x_test1[0:x_test2.shape[0],:]
y_test1=y_test1[0:x_test2.shape[0]]
y_testreal1=y_testreal1[0:x_test2.shape[0]]
###############################################################################
def choose_info(x,info_number):
return x[:,0:info_number]
def choose_index(x,info_number,x_samplenumber):
return x[:,info_number:info_number+x_samplenumber]
def choose_corrlation(x,info_number,x_samplenumber,cor_number):
return x[:,info_number+x_samplenumber:info_number+x_samplenumber+cor_number]
def choose_relabel(x,info_number,x_samplenumber,cor_number):
return x[:,info_number+x_samplenumber+cor_number:]
def slic(input_):
return input_[:,0]
###############################################################################
activation='relu'
info_number=PCAseries1.shape[1]
layer=PCAseries1.shape[1]
input1=K.Input(shape=(x_test1.shape[1],))#line1 species1
input2=K.Input(shape=(x_test2.shape[1],))#line1 species2
input3=K.Input(shape=(x_test1.shape[1],))#line2 species1
input4=K.Input(shape=(x_test2.shape[1],))#line2 species2
Data1=Lambda(choose_info,arguments={'info_number':info_number})(input1)
Data2=Lambda(choose_info,arguments={'info_number':info_number})(input2)
Data3=Lambda(choose_info,arguments={'info_number':info_number})(input3)
Data4=Lambda(choose_info,arguments={'info_number':info_number})(input4)
Index1=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0]})(input1)
Index2=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0]})(input2)
Index3=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0]})(input3)
Index4=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0]})(input4)
Cor1=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input1)
Cor2=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input2)
Cor3=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input3)
Cor4=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input4)
Relabel1=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input1)
Relabel2=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input2)
Relabel3=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input3)
Relabel4=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input4)
x_concat1=layers.concatenate([Data1,Data3])#batch1
x_concat2=layers.concatenate([Data2,Data4])#batch2
x1=layers.Dense(layer,activation=activation)(Data1)
x2=layers.Dense(layer,activation=activation)(Data2)
x3=layers.Dense(layer,activation=activation)(Data3)
x4=layers.Dense(layer,activation=activation)(Data4)
x1=layers.BatchNormalization()(x1)
x2=layers.BatchNormalization()(x2)
x3=layers.BatchNormalization()(x3)
x4=layers.BatchNormalization()(x4)
x1_mid1=layers.Dense(layer,activation=activation)(layers.concatenate([x1,x2]))
x2_mid1=layers.Dense(layer,activation=activation)(layers.concatenate([x1,x2]))
x1_mid2=layers.Dense(layer,activation=activation)(layers.concatenate([x3,x4]))
x2_mid2=layers.Dense(layer,activation=activation)(layers.concatenate([x3,x4]))
x1_mid1=layers.BatchNormalization()(x1_mid1)
x2_mid1=layers.BatchNormalization()(x2_mid1)
x1_mid2=layers.BatchNormalization()(x1_mid2)
x2_mid2=layers.BatchNormalization()(x2_mid2)
layer_classify=layers.Dense(recluster2.max()+1,activation='relu')
y1=layer_classify(x1_mid1)
y2=layer_classify(x2_mid1)
y3=layer_classify(x1_mid2)
y4=layer_classify(x2_mid2)
x1=layers.concatenate([x1_mid1,x1_mid2])#batch1
x2=layers.concatenate([x2_mid1,x2_mid2])#batch2
output1=layers.Dense(2*layer,activation=activation)(x1)
output2=layers.Dense(2*layer,activation=activation)(x2)
output1=layers.BatchNormalization()(output1)
output2=layers.BatchNormalization()(output2)
def loss_weight(input_):
return tf.reduce_sum(tf.multiply(input_[0],input_[1]),axis=-1)
def MSE(input_):
return tf.reduce_mean(tf.square(input_[0]-input_[1]),axis=-1)
def multi_classification_loss(input_):
return tf.keras.losses.categorical_crossentropy(input_[0],input_[1])
AE_loss_1=Lambda(MSE)([output1,x_concat1])
AE_loss_2=Lambda(MSE)([output2,x_concat2])
cls_loss_1=Lambda(MSE)([y1,Relabel1])
cls_loss_2=Lambda(MSE)([y2,Relabel2])
cls_loss_3=Lambda(MSE)([y3,Relabel3])
cls_loss_4=Lambda(MSE)([y4,Relabel4])
interweight1=Lambda(loss_weight)([Index1,Cor2])
interweight4=Lambda(loss_weight)([Index3,Cor4])
interloss_1=Lambda(MSE)([x1_mid1,x2_mid1])
interloss_4=Lambda(MSE)([x1_mid2,x2_mid2])
interloss_1=layers.Multiply()([interweight1,interloss_1])
interloss_4=layers.Multiply()([interweight4,interloss_4])
intraweight1=Lambda(loss_weight)([Relabel1,Relabel3])
intraweight2=Lambda(loss_weight)([Relabel2,Relabel4])
intraloss_1=Lambda(MSE)([x1_mid1,x1_mid2])
intraloss_2=Lambda(MSE)([x2_mid1,x2_mid2])
intraloss_1=layers.Multiply()([intraweight1,intraloss_1])
intraloss_2=layers.Multiply()([intraweight2,intraloss_2])
Loss1=Lambda(lambda x:(x[0]*1+x[1]*1)/2,name='loss1')([AE_loss_1,AE_loss_2])
Loss2=Lambda(lambda x:(x[0]*1+x[1]*1+x[2]*1+x[3]*1)/4,name='loss2')([cls_loss_1,cls_loss_2,cls_loss_3,cls_loss_4])
Loss3=Lambda(lambda x:(x[0]*1+x[1]*1)/2,name='loss3')([interloss_1,interloss_4])
Loss4=Lambda(lambda x:(x[0]*1+x[1]*1)/2,name='loss4')([intraloss_1,intraloss_2])
###############################################################################
network_train=K.models.Model([input1,input2,input3,input4],[Loss1,Loss2,Loss3,Loss4])
network_train.summary()
###############################################################################
intra_data1={}
inter_data1={}
for i in range(x_test1.shape[0]):
label_i=y_test1[i]
intra_data1[i]=np.where(y_test1==label_i)
inter_data1[i]=np.where(y_test1!=label_i)
intra_data2={}
inter_data2={}
for i in range(x_test2.shape[0]):
label_i=y_test2[i]
intra_data2[i]=np.where(y_test2==label_i)
inter_data2[i]=np.where(y_test2!=label_i)
###############################################################################
batch_size=256
train_loss=[]
loss1=[]
loss2=[]
loss3=[]
loss4=[]
###############################################################################
iterations=10000000
lr=1e-4
optimizer=K.optimizers.Adam(lr=lr)
loss_weights=[1,1,1,1]
#these four parts will not converge at the same speed, I don't know how to resolve it
#so I choose a hard strategy, if either one is too small, stop the training, enlarge its weight, do training again
#I think you can train this model better...or maybe you can teach me how to auto-balance the weight, thank you!
network_train.compile(optimizer=optimizer,
loss=[lambda y_true,y_pred: y_pred,
lambda y_true,y_pred: y_pred,
lambda y_true,y_pred: y_pred,
lambda y_true,y_pred: y_pred],
loss_weights=loss_weights)
for i in range(iterations):
x_input1_series1_train=np.zeros(x_test1.shape)
index0=np.zeros(x_input1_series1_train.shape[0])
x_input1_series2_train=np.zeros(x_test2.shape)
index1=np.zeros(x_input1_series2_train.shape[0])
x_input2_series1_train=np.zeros(x_test1.shape)
index2=np.zeros(x_input2_series1_train.shape[0])
x_input2_series2_train=np.zeros(x_test2.shape)
index3=np.zeros(x_input2_series2_train.shape[0])
for ii in range(x_test1.shape[0]):
index0[ii]=random.choice(range(x_test1.shape[0]))
rand1=random.random()
in_rand1=np.where(x_test1[ii,:][PCAseries1.shape[1]+PCAseries1.shape[0]:PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]]>0)[0]
out_rand1=np.where(x_test1[ii,:][PCAseries1.shape[1]+PCAseries1.shape[0]:PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]]<=0)[0]
if rand1>=0.5:
index1[ii]=random.choice(in_rand1)
elif rand1<0.5:
index1[ii]=random.choice(out_rand1)
rand2=random.random()
if rand2>=0.5:
index2[ii]=random.choice(intra_data1[index0[ii]][0])
elif rand2<0.5:
index2[ii]=random.choice(inter_data1[index0[ii]][0])
rand3=random.random()
if rand3>=0.5:
index3[ii]=random.choice(intra_data2[index1[ii]][0])
elif rand3<0.5:
index3[ii]=random.choice(inter_data2[index1[ii]][0])
train1=x_test1[index0.astype('int'),:]
train2=x_test2[index1.astype('int'),:]
train3=x_test1[index2.astype('int'),:]
train4=x_test2[index3.astype('int'),:]
Train=network_train.fit([train1,train2,train3,train4],
[np.zeros([train1.shape[0],1]),
np.zeros([train1.shape[0],1]),
np.zeros([train1.shape[0],1]),
np.zeros([train1.shape[0],1])],
batch_size=batch_size,shuffle=True)
train_loss.append(Train.history['loss'][:][0])
loss1.append(Train.history['loss1_loss'][:][0]*loss_weights[0])
loss2.append(Train.history['loss2_loss'][:][0]*loss_weights[1])
loss3.append(Train.history['loss3_loss'][:][0]*loss_weights[2])
loss4.append(Train.history['loss4_loss'][:][0]*loss_weights[3])
print(i,'loss=',
Train.history['loss'][:][0],
Train.history['loss1_loss'][:][0]*loss_weights[0],
Train.history['loss2_loss'][:][0]*loss_weights[1],
Train.history['loss3_loss'][:][0]*loss_weights[2],
Train.history['loss4_loss'][:][0]*loss_weights[3])
if i>500:
plt.plot(train_loss[:])
plt.plot(loss1[:])
plt.plot(loss2[:])
plt.plot(loss3[:])
plt.plot(loss4[:])
plt.title('Model loss')
plt.ylabel('Loss')
plt.ylim(0,max(max(train_loss[i-500:],loss1[i-500:],loss2[i-500:],loss3[i-500:],loss4[i-500:])))
plt.xlim(i-500,i)
plt.xlabel('Epoch')
plt.legend(['Train','loss1','loss2','loss3','loss4'],loc='upper left')
plt.show()
plt.plot(train_loss[:])
plt.plot(loss1[:])
plt.plot(loss2[:])
plt.plot(loss3[:])
plt.plot(loss4[:])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train','loss1','loss2','loss3','loss4'],loc='upper left')
plt.show()
else:
plt.plot(train_loss[500:])
plt.plot(loss1[500:])
plt.plot(loss2[500:])
plt.plot(loss3[500:])
plt.plot(loss4[500:])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train','loss1','loss2','loss3','loss4'],loc='upper left')
plt.show()
###############################################################################
network_predict=K.models.Model([input1,input2,input3,input4],[x1_mid1,x2_mid1,x1_mid2,x2_mid2])
[low_dim1,low_dim2,low_dim3,low_dim4]=network_predict.predict([x_test1,x_test2,x_test1,x_test2])
low_dim1=low_dim1[0:x_input1.shape[0]]
low_dim2=low_dim2[0:x_input2.shape[0]]
low_dim3=low_dim3[0:x_input1.shape[0]]
low_dim4=low_dim4[0:x_input2.shape[0]]
low_dim1=np.concatenate([low_dim1,low_dim3],axis=1)
low_dim2=np.concatenate([low_dim2,low_dim4],axis=1)
y_real_no1=y_testreal1[0:x_input1.shape[0]]
y_recluster_no1=recluster1[0:x_input1.shape[0]]
y_real_no2=y_testreal2[0:x_input2.shape[0]]
y_recluster_no2=recluster2[0:x_input2.shape[0]]
total_real_type=np.concatenate([y_real_no1,y_real_no2])
total_recluster_type=np.concatenate([y_recluster_no1,y_recluster_no2])
###############################################################################
series1=sc.AnnData(low_dim1)
series2=sc.AnnData(low_dim2)
mergedata=series1.concatenate(series2)
mergedata.obsm['NN']=mergedata.X
sc.pp.neighbors(mergedata,n_pcs=0)
sc.tl.louvain(mergedata)
sc.tl.leiden(mergedata)
sc.tl.umap(mergedata)
df=pd.DataFrame(total_real_type.astype('int'))
df=pd.Series(np.reshape(df.values,df.values.shape[0]), dtype="category")
mergedata.obs['real']=df.values
sc.pl.umap(mergedata,color='louvain',size=30)
sc.pl.umap(mergedata,color='leiden',size=30)
sc.pl.umap(mergedata,color='batch',size=30)
sc.pl.umap(mergedata,color='real',size=30)
type_louvain=mergedata.obs['louvain']
type_leiden=mergedata.obs['leiden']
type_batch=mergedata.obs['batch']
type_real=mergedata.obs['real']
###############################################################################
umapdata=pd.DataFrame(mergedata.obsm['X_umap'].T,index=['tSNE1','tSNE2'])
umapdata1=pd.DataFrame(mergedata.obsm['X_umap'][0:PCAseries1.shape[0],:].T,index=['tSNE1','tSNE2'])
umapdata2=pd.DataFrame(mergedata.obsm['X_umap'][PCAseries1.shape[0]:,:].T,index=['tSNE1','tSNE2'])
###############################################################################
plot_tSNE_batchclusters(umapdata1,umapdata2,choose_seriestype1,choose_seriestype2,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'batch1')
plot_tSNE_batchclusters(umapdata2,umapdata1,choose_seriestype2,choose_seriestype1,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'batch2')
plot_tSNE_clusters(umapdata,list(map(int,type_batch)), cluster_colors=cluster_colors,save=False,name=fromname+'batch')
plot_tSNE_sepclusters(umapdata1,umapdata2,choose_seriestype1,choose_seriestype2,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'label1')
plot_tSNE_sepclusters(umapdata2,umapdata1,choose_seriestype2,choose_seriestype1,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'label2')
plot_tSNE_clusters(umapdata,list(map(int,type_real)), cluster_colors=cluster_colors,save=False, name=fromname+'label')
#sio.savemat('pancreas_ourdata.mat',{'mergedata':mergedata.X,'umapdata':umapdata.values})#you need to see whether two batches are changed in turn, if so do changing again by yourself!!! | 30,327 | 46.76063 | 185 | py |
CBA | CBA-main/evaluation/evaluation_pancreas.py | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 27 18:58:59 2020
@author: 17b90
"""
import keras as K
import pandas as pd
from keras import layers
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.decomposition import PCA
import scanpy as sc
import scipy
import pickle
from sklearn.manifold import TSNE
from keras.layers.core import Lambda
import scipy.io as sio
import seaborn as sns
import umap
import numpy as np
import metrics
from ywb_function import *
import scanorama
import sklearn.metrics as sm
import kBET
we_use=[1,2]#we try to integrate pancreas1 and pancreas2
#input the data
RAWseries1=pd.read_csv('RAWseries_'+str(we_use[0])+'.csv',header=None)[1:].values.astype('single')
RAWseries2=pd.read_csv('RAWseries_'+str(we_use[1])+'.csv',header=None)[1:].values.astype('single')
#input the label
choose_seriestype1=pd.read_csv('realseries_'+str(we_use[0])+'.csv',header=None)[1:].values
choose_seriestype2=pd.read_csv('realseries_'+str(we_use[1])+'.csv',header=None)[1:].values
Alldata=np.concatenate([RAWseries1.T,RAWseries2.T])
Alllabel=np.concatenate([choose_seriestype1,choose_seriestype2])
Allbatch=np.concatenate([np.zeros(choose_seriestype1.shape[0]),np.zeros(choose_seriestype2.shape[0])+1])
###############################################################################
chosen_cluster=['alpha','beta','ductal','acinar','delta','gamma','endothelial','epsilon']
chosen_index=np.arange(Alllabel.shape[0])
for i in range(Alllabel.shape[0]):
if Alllabel[i] in chosen_cluster:
chosen_index[i]=1
else:
chosen_index[i]=0
Alldata=Alldata[chosen_index==1,:]
Allbatch=Allbatch[chosen_index==1]
Alllabel=Alllabel[chosen_index==1]
###############################################################################
Numlabel=np.zeros(Alllabel.shape[0])
cluster_index2={'alpha':0,'beta':1,'ductal':2,'acinar':3,'delta':4,'gamma':5,'endothelial':6,'epsilon':7}
for i in range(Alllabel.shape[0]):
Numlabel[i]=cluster_index2[Alllabel[i][0]]
###############################################################################
choose_seriestype1=Numlabel[Allbatch==0][Numlabel[Allbatch==0].argsort()].astype('int')
choose_seriestype2=Numlabel[Allbatch==1][Numlabel[Allbatch==1].argsort()].astype('int')
Numlabel[Allbatch==0]=choose_seriestype1
Numlabel[Allbatch==1]=choose_seriestype2
total_given_type=Numlabel
merge=sio.loadmat('pancreas_ourdata')['mergedata']
#here is hard, you need to check which one is batch1 and which one is batch2, I do that manually
mergedata=sc.AnnData(merge)
total_batch_type=np.concatenate([choose_seriestype1*0,choose_seriestype2*0+1])
total_batch_type=np.reshape(total_batch_type,total_batch_type.shape[0])
mergedata.obs['batch']=total_batch_type
zero_type=np.concatenate([choose_seriestype1*0,choose_seriestype2*0])
zero_type=np.reshape(zero_type,zero_type.shape[0])
mergedata.obs['zero']=zero_type
total_given_type=np.concatenate([choose_seriestype1,choose_seriestype2])
total_given_type=np.reshape(total_given_type,total_given_type.shape[0])
mergedata.obs['real']=total_given_type
mergedata.obsm["embedding"]=mergedata.X
sc.pp.neighbors(mergedata,n_pcs=0)
mergedata.obsm['NN']=mergedata.X
sc.tl.louvain(mergedata,resolution=0.5)
sc.tl.umap(mergedata)
sc.pl.umap(mergedata,color=['batch','louvain','real'])
type_louvain=mergedata.obs['louvain']
type_batch=mergedata.obs['batch']
type_real=mergedata.obs['real']
type_batch=type_batch.replace('ref',0)
type_batch=type_batch.replace('new',1)
###############################################################################
kBET_score=kBET.kbet(mergedata,'batch','real',embed='embedding')
for i in range(8):
print(kBET_score['kBET'][i])
print(kBET_score.mean()[1])
kBET_score_whole=kBET.kbet(mergedata,'batch','zero',embed='embedding')
print(kBET_score_whole.mean()[1])
S_score=kBET.silhouette(mergedata,'real',metric='euclidean',embed='embedding')
print(S_score)
NMI_louvain=kBET.nmi(mergedata,'louvain','real')
print(NMI_louvain)
ARI_louvain=kBET.ari(mergedata,'louvain','real')
print(ARI_louvain)
FMI_louvain=sm.fowlkes_mallows_score(type_real,type_louvain)
print(FMI_louvain)
###############################################################################
umapdata=pd.DataFrame(mergedata.obsm['X_umap'].T,index=['tSNE1','tSNE2'])
umapdata1=pd.DataFrame(mergedata.obsm['X_umap'][0:(Allbatch==0).sum(),:].T,index=['tSNE1','tSNE2'])
umapdata2=pd.DataFrame(mergedata.obsm['X_umap'][(Allbatch==0).sum():,:].T,index=['tSNE1','tSNE2'])
##############################################################################
fromname='do'
plot_tSNE_clusters(umapdata,list(map(int,type_real)), cluster_colors=cluster_colors,save=False, name=fromname+'label') | 4,698 | 38.487395 | 118 | py |
CBA | CBA-main/lung/ywb_function.py | import scipy
import random
import keras as K
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sns
import scipy.io as sio
import tensorflow as tf
from keras import layers
from ywb_function import *
from collections import Counter
import matplotlib.pyplot as plt
from keras.regularizers import l2
from sklearn import preprocessing
from keras.layers.core import Lambda
from keras.callbacks import TensorBoard
from keras.callbacks import LearningRateScheduler
from sklearn.cluster import AgglomerativeClustering
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import RandomOverSampler
from mpl_toolkits.axes_grid1 import make_axes_locatable
def color(value):
digit = list(map(str, range(10))) + list("ABCDEF")
if isinstance(value, tuple):
string = '#'
for i in value:
a1 = i // 16
a2 = i % 16
string += digit[a1] + digit[a2]
return string
elif isinstance(value, str):
a1 = digit.index(value[1]) * 16 + digit.index(value[2])
a2 = digit.index(value[3]) * 16 + digit.index(value[4])
a3 = digit.index(value[5]) * 16 + digit.index(value[6])
return (a1, a2, a3)
cluster_colors=[
color((213,94,0)),
color((0,114,178)),
color((204,121,167)),
color((0,158,115)),
color((86,180,233)),
color((230,159,0)),
color((240,228,66)),
color((0,0,0)),
'#D3D3D3',
'#FF00FF',
'#aec470',
'#b3ee3d',
'#de4726',
'#f69149',
'#f81919',
'#ff49b0',
'#f05556',
'#fadf0b',
'#f8c495',
'#ffc1c1',
'#ffc125',
'#ffc0cb',
'#ffbbff',
'#ffb90f',
'#ffb6c1',
'#ffb5c5',
'#ff83fa',
'#ff8c00',
'#ff4040',
'#ff3030',
'#ff34b3',
'#00fa9a',
'#ca4479',
'#eead0e',
'#ff1493',
'#0ab4e4',
'#1e6a87',
'#800080',
'#00e5ee',
'#c71585',
'#027fd0',
'#004dba',
'#0a9fb4',
'#004b71',
'#285528',
'#2f7449',
'#21b183',
'#3e4198',
'#4e14a6',
'#5dd73d',
'#64a44e',
'#6787d6',
'#6c6b6b',
'#6c6b6b',
'#7759a4',
'#78edff',
'#762a14',
'#9805cc',
'#9b067d',
'#af7efe',
'#a7623d']
def plot_tSNE_clusters(df_tSNE,labels,cluster_colors=None,s=6,save=False,name=None):
fig,ax=plt.subplots(figsize=(4, 4))
ax.scatter(df_tSNE.loc['tSNE1'], df_tSNE.loc['tSNE2'],s=s,alpha=0.8,lw=0,c=[cluster_colors[i] for i in labels])
ax.axis('equal')
ax.set_axis_off()
if save==True:
plt.savefig('{}.eps'.format(name),dpi=600,format='eps')
def plot_tSNE_batchclusters(df_tSNE1,df_tSNE2,labels1,labels2,cluster_colors=None,s=0.8,save=False,name=None):
fig,ax=plt.subplots(figsize=(4, 4))
ax.scatter(df_tSNE2.loc['tSNE1'], df_tSNE2.loc['tSNE2'],s=s,alpha=0.8,lw=0,c='#D3D3D3')
ax.scatter(df_tSNE1.loc['tSNE1'], df_tSNE1.loc['tSNE2'],s=s,alpha=0.8,lw=0,c=[cluster_colors[1] for i in labels1])
ax.axis('equal')
ax.set_axis_off()
if save==True:
plt.savefig('{}.eps'.format(name),dpi=600,format='eps')
def plot_tSNE_sepclusters(df_tSNE1,df_tSNE2,labels1,labels2,cluster_colors=None,s=0.8,save=False,name=None):
fig,ax=plt.subplots(figsize=(4, 4))
ax.scatter(df_tSNE2.loc['tSNE1'], df_tSNE2.loc['tSNE2'],s=s,alpha=0.8,lw=0,c='#D3D3D3')
ax.scatter(df_tSNE1.loc['tSNE1'], df_tSNE1.loc['tSNE2'],s=s,alpha=0.8,lw=0,c=[cluster_colors[i] for i in labels1])
ax.axis('equal')
ax.set_axis_off()
if save==True:
plt.savefig('{}.eps'.format(name),dpi=600,format='eps')
def plot_tSNE_cluster(df_tSNE,labels,cluster_colors=None,s=6,save=False,name=None):
index=[[] for i in range(np.max(labels)+1)]
for i in range(len(labels)):
index[int(labels[i])].append(i)
index=[i for i in index if i!=[]]
for i in range(len(np.unique(labels))):
color=np.array(labels)[index[i]][0]
fig,ax=plt.subplots()
ax.scatter(df_tSNE.loc['tSNE1'], df_tSNE.loc['tSNE2'],c='#D3D3D3',s=s,lw=0)
ax.scatter(df_tSNE.loc['tSNE1'].iloc[index[i]],df_tSNE.loc['tSNE2'].iloc[index[i]],c=[cluster_colors[k] for k in np.array(labels)[index[i]]],s=s,lw=0)
ax.axis('equal')
ax.set_axis_off()
if save == True:
plt.savefig('{}.eps'.format(name+str(color)), dpi=600,format='eps')
def gen_labels(df, model):
if str(type(model)).startswith("<class 'sklearn.cluster"):
cell_labels = dict(zip(df.columns, model.labels_))
label_cells = {}
for l in np.unique(model.labels_):
label_cells[l] = []
for i, label in enumerate(model.labels_):
label_cells[label].append(df.columns[i])
cellID = list(df.columns)
labels = list(model.labels_)
labels_a = model.labels_
elif type(model) == np.ndarray:
cell_labels = dict(zip(df.columns, model))
label_cells = {}
for l in np.unique(model):
label_cells[l] = []
for i, label in enumerate(model):
label_cells[label].append(df.columns[i])
cellID = list(df.columns)
labels = list(model)
labels_a = model
else:
print('Error wrong input type')
return cell_labels, label_cells, cellID, labels, labels_a
def heatmap(correlation_recluster_cell_final,choose_seriestype1,choose_seriestype2,save=False,name=''):
df=pd.DataFrame(correlation_recluster_cell_final)
labels1=np.array(choose_seriestype1)
labels2=np.array(choose_seriestype2)
cell_labels1,label_cells1,cellID1,labels1,labels_a1=gen_labels(df.T,np.array(labels1))
cell_labels2,label_cells2,cellID2,labels2,labels_a2=gen_labels(df,np.array(labels2))
optimal_order=np.unique(np.concatenate([labels1,labels2]))
cl,lc=gen_labels(df,np.array(labels2))[:2]
optimal_sort_cells=sum([lc[i] for i in np.unique(labels2)],[])
optimal_sort_labels=[cl[i] for i in optimal_sort_cells]
fig,axHM=plt.subplots(figsize=(9,5))
df_full=df.copy()
z=df_full.values
z=pd.DataFrame(z, index=df_full.index,columns=df_full.columns)
z=z.loc[:,optimal_sort_cells].values
im=axHM.pcolormesh(z,cmap='viridis',vmax=1)
plt.gca().invert_yaxis()
plt.xlim(xmax=len(labels2))
plt.xticks([])
plt.yticks([])
divider=make_axes_locatable(axHM)
axLabel1=divider.append_axes("top",.3,pad=0,sharex=axHM)
axLabel2=divider.append_axes("left",.3,pad=0,sharex=axHM)
counter2=Counter(labels2)
counter1=Counter(labels1)
pos2=0
pos1=0
for l in optimal_order:
axLabel1.barh(y=0,left=pos2,width=counter2[l],color=cluster_colors[l],linewidth=0.5,edgecolor=cluster_colors[l])
pos2+=counter2[l]
optimal_order=np.flipud(optimal_order)
for l in optimal_order:
axLabel2.bar(x=0,bottom=pos1,height=counter1[l],color=cluster_colors[l],linewidth=50,edgecolor=cluster_colors[l])
pos1+=counter1[l]
axLabel1.set_xlim(xmax=len(labels2))
axLabel1.axis('off')
axLabel2.set_ylim(ymax=len(labels1))
axLabel2.axis('off')
cax=fig.add_axes([.91,0.13,0.01,0.22])
colorbar=fig.colorbar(im,cax=cax,ticks=[0,1])
colorbar.set_ticklabels(['0','max'])
plt.savefig('{}.jpg'.format(name),dpi=600,format='jpg') | 7,512 | 33.782407 | 158 | py |
CBA | CBA-main/lung/lung_main.py | """
Created on Fri Mar 27 18:58:59 2020
@author: 17b90
"""
import kBET
import scipy
import random
import keras as K
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sns
import scipy.io as sio
import tensorflow as tf
from keras import layers
from ywb_function import *
import sklearn.metrics as sm
from collections import Counter
import matplotlib.pyplot as plt
from keras.regularizers import l2
from sklearn import preprocessing
from keras.layers.core import Lambda
from keras.callbacks import TensorBoard
from imblearn.over_sampling import SMOTE,ADASYN
from keras.callbacks import LearningRateScheduler
from sklearn.cluster import AgglomerativeClustering
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import RandomOverSampler
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.cluster.hierarchy import dendrogram, linkage
we_use=[1]
RAWseries1=pd.read_csv('RAWlung_'+str(we_use[0])+'.csv',header=None)[1:].values.astype('single')
choose_seriestype1=pd.read_csv('reallung_'+str(we_use[0])+'.csv',header=None)[1:].values
row1=pd.read_csv('rowgenelung_'+str(we_use[0])+'.csv',header=None)[1:].values
csv_data=pd.read_csv("Lung-countsFACS.csv",header=None)
cellname=csv_data.iloc[0][1:]
csv_data=csv_data[1:]
csv_df=pd.DataFrame(csv_data)
row2=csv_df[0].values
RAWseries2=csv_df.drop(labels=0,axis=1).values.astype('int')
batch2dict=pd.read_csv('annotations_FACS.csv',header=None)[1:]
dictbatch=pd.DataFrame(batch2dict[2].values,index=batch2dict[0].values)
choose_seriestype2=[]
for i in cellname:
if i in batch2dict[0].values:
choose_seriestype2.append(dictbatch.loc[i][0])
else:
choose_seriestype2.append('0')
choose_seriestype2=np.array(choose_seriestype2)
choose_seriestype2=np.reshape(choose_seriestype2,[choose_seriestype2.shape[0],1])
cob_gene=[]
for i in row1:
if i in row2:
cob_gene.append(i)
line1=np.zeros(len(cob_gene))
line2=np.zeros(len(cob_gene))
index=0
for i in cob_gene:
line1[index]=np.where(row1==i[0])[0][0]
line2[index]=np.where(row2==i[0])[0][0]
index+=1
RAWseries1=RAWseries1[line1.astype('int'),:]
RAWseries2=RAWseries2[line2.astype('int'),:]
fromname='lung'+str(we_use[0])
Alldata=np.concatenate([RAWseries1.T,RAWseries2.T])
Alllabel=np.concatenate([choose_seriestype1,choose_seriestype2])
Allbatch=np.concatenate([np.zeros(choose_seriestype1.shape[0]),np.zeros(choose_seriestype2.shape[0])+1])
for i in np.unique(Alllabel):
print(i,(choose_seriestype1==i).sum(),(choose_seriestype2==i).sum())
chosen_cluster=['269',
'268',
'275',
'277',
'265',
'287',
'266',
'273',
'282',
'B cell',
'T cell',
'dendritic cell',
'endothelial cell',
'stromal cell'
]
chosen_index=np.arange(Alllabel.shape[0])
for i in range(Alllabel.shape[0]):
if Alllabel[i] in chosen_cluster:
chosen_index[i]=1
else:
chosen_index[i]=0
Alldata=Alldata[chosen_index==1,:]
Allbatch=Allbatch[chosen_index==1]
Alllabel=Alllabel[chosen_index==1]
###############################################################################
Numlabel=np.zeros(Alllabel.shape[0])
cluster_index2={'269':0,
'268':1,
'275':2,
'277':3,
'265':3,
'287':3,
'266':4,
'273':4,
'282':4,
'B cell':0,
'T cell':1,
'dendritic cell':2,
'endothelial cell':3,
'stromal cell':4
}
for i in range(Alllabel.shape[0]):
Numlabel[i]=cluster_index2[Alllabel[i][0]]
###############################################################################
choose_seriestype1=Numlabel[Allbatch==0][Numlabel[Allbatch==0].argsort()].astype('int')
choose_seriestype2=Numlabel[Allbatch==1][Numlabel[Allbatch==1].argsort()].astype('int')
###############################################################################
min_cells=100
pca_dim=15
minnumberofcluster=10000000000
clusternumber=1
###############################################################################
anndata=sc.AnnData(pd.DataFrame(Alldata))
sc.pp.filter_genes(anndata,min_cells=min_cells)
sc.pp.normalize_per_cell(anndata,counts_per_cell_after=1e4)
sc.pp.log1p(anndata)
sc.pp.highly_variable_genes(anndata)
sc.pl.highly_variable_genes(anndata)
anndata=anndata[:,anndata.var['highly_variable']]
sc.pl.highest_expr_genes(anndata,n_top=20)
sc.tl.pca(anndata,n_comps=100,svd_solver='arpack')
sc.pl.pca(anndata)
sc.pl.pca_variance_ratio(anndata,log=True,n_pcs=100,save=[True,'pancreas'])
Alldata_aft=anndata.obsm['X_pca'][:,0:pca_dim]
Alldata_aft=preprocessing.StandardScaler().fit_transform(Alldata_aft)
Alldata_aft=preprocessing.MinMaxScaler().fit_transform(Alldata_aft)
PCAseries1=Alldata_aft[Allbatch==0,:][Numlabel[Allbatch==0].argsort()]
PCAseries2=Alldata_aft[Allbatch==1,:][Numlabel[Allbatch==1].argsort()]
choose_seriestype1=Numlabel[Allbatch==0][Numlabel[Allbatch==0].argsort()].astype('int')
choose_seriestype2=Numlabel[Allbatch==1][Numlabel[Allbatch==1].argsort()].astype('int')
###############################################################################
cluster_series1=sc.AnnData(PCAseries1)
cluster_series2=sc.AnnData(PCAseries2)
sc.pp.neighbors(cluster_series1,n_pcs=0)
sc.pp.neighbors(cluster_series2,n_pcs=0)
sc.tl.umap(cluster_series1)
sc.tl.umap(cluster_series2)
sc.tl.louvain(cluster_series1,resolution=0.5)
sc.pl.umap(cluster_series1,color='louvain',size=30)
sc.tl.louvain(cluster_series2,resolution=0.5)
sc.pl.umap(cluster_series2,color='louvain',size=30)
cluster1=np.array(list(map(int,cluster_series1.obs['louvain'])))
cluster2=np.array(list(map(int,cluster_series2.obs['louvain'])))
###############################################################################
recluster1=np.zeros(cluster1.shape[0])
recluster2=np.zeros(cluster2.shape[0])
palsecluster1=cluster1
count_cluster1=pd.value_counts(cluster_series1.obs['louvain'])
for i in range(1000000000000000):
if count_cluster1.max()<minnumberofcluster:
break
else:
print(count_cluster1.max())
recluster1=np.zeros(cluster1.shape[0])
recluster1_number=0
for i in np.unique(palsecluster1):
index=palsecluster1==i
if index.sum()<minnumberofcluster:
thisrecluster=np.zeros(index.sum())
recluster1[index]=thisrecluster+recluster1_number
recluster1_number=len(np.unique(recluster1))
else:
data=PCAseries1[index]
anndata=sc.AnnData(data)
sc.pp.neighbors(anndata,n_pcs=0)
sc.tl.louvain(anndata)
thisrecluster=np.array(list(map(int,anndata.obs['louvain'])))
recluster1[index]=thisrecluster+recluster1_number
recluster1_number=len(np.unique(recluster1))
palsecluster1=recluster1.astype('int')
count_cluster1=pd.value_counts(palsecluster1)
palsecluster2=cluster2
count_cluster2=pd.value_counts(cluster_series2.obs['louvain'])
for i in range(1000000000000000):
if count_cluster2.max()<minnumberofcluster:
break
else:
print(count_cluster2.max())
recluster2=np.zeros(cluster2.shape[0])
recluster2_number=0
for i in np.unique(palsecluster2):
index=palsecluster2==i
if index.sum()<minnumberofcluster:
thisrecluster=np.zeros(index.sum())
recluster2[index]=thisrecluster+recluster2_number
recluster2_number=len(np.unique(recluster2))
else:
data=PCAseries2[index]
anndata=sc.AnnData(data)
sc.pp.neighbors(anndata,n_pcs=0)
sc.tl.louvain(anndata)
thisrecluster=np.array(list(map(int,anndata.obs['louvain'])))
recluster2[index]=thisrecluster+recluster2_number
recluster2_number=len(np.unique(recluster2))
palsecluster2=recluster2.astype('int')
count_cluster2=pd.value_counts(palsecluster2)
recluster1=palsecluster1
recluster2=palsecluster2
###############################################################################
series1=sc.AnnData(PCAseries1)
series2=sc.AnnData(PCAseries2)
sc.pp.neighbors(series1,n_pcs=0)
sc.pp.neighbors(series2,n_pcs=0)
sc.tl.umap(series1)
sc.tl.umap(series2)
df1=pd.DataFrame(choose_seriestype1)
df1=pd.Series(np.reshape(df1.values,df1.values.shape[0]), dtype="category")
series1.obs['real']=df1.values
df2=pd.DataFrame(choose_seriestype2)
df2=pd.Series(np.reshape(df2.values,df2.values.shape[0]), dtype="category")
series2.obs['real']=df2.values
sc.pl.umap(series1,color='real',size=30)
sc.pl.umap(series2,color='real',size=30)
df1=pd.DataFrame(recluster1.astype('int'))
df1=pd.Series(np.reshape(df1.values,df1.values.shape[0]), dtype="category")
series1.obs['recluster']=df1.values
df2=pd.DataFrame(recluster2.astype('int'))
df2=pd.Series(np.reshape(df2.values,df2.values.shape[0]), dtype="category")
series2.obs['recluster']=df2.values
sc.pl.umap(series1,color='recluster',size=30)
sc.pl.umap(series2,color='recluster',size=30)
###############################################################################
def dis(P,Q,distance_method):
if distance_method==0:
return np.sqrt(np.sum(np.square(P-Q)))
if distance_method==1:
return 1-(np.multiply(P,Q).sum()/(np.sqrt(np.sum(np.square(P)))*np.sqrt(np.sum(np.square(Q)))))
###############################################################################
if len(np.unique(recluster1))<=len(np.unique(recluster2)):
a=PCAseries1
PCAseries1=PCAseries2
PCAseries2=a
b=choose_seriestype1
choose_seriestype1=choose_seriestype2
choose_seriestype2=b
c=cluster1
cluster1=cluster2
cluster2=c
d=recluster1
recluster1=recluster2
recluster2=d
###############################################################################
correlation_recluster=np.zeros([len(np.unique(recluster1)),len(np.unique(recluster2))])
correlation_recluster_cell=np.zeros([recluster1.shape[0],recluster2.shape[0]])
for i in range(len(np.unique(recluster1))):
for j in range(len(np.unique(recluster2))):
print(i,j)
index_series1=np.where(recluster1==i)[0]
index_series2=np.where(recluster2==j)[0]
cell_series1=PCAseries1[index_series1,:]
cell_series2=PCAseries2[index_series2,:]
mean1=0
for iq in range(cell_series1.shape[0]):
for jq in range(cell_series2.shape[0]):
mean1+=dis(cell_series1[iq,:],cell_series2[jq,:],1)
correlation_recluster[i,j]=mean1/(cell_series1.shape[0]*cell_series2.shape[0])
for ii in range(cell_series1.shape[0]):
for jj in range(cell_series2.shape[0]):
mean2=dis(cell_series1[ii,:],cell_series2[jj,:],0)
correlation_recluster_cell[index_series1[ii],index_series2[jj]]=mean2
plt.imshow(correlation_recluster)
plt.imshow(correlation_recluster_cell)
correlation_recluster_div=-np.log10(correlation_recluster)
correlation_recluster_cell_div=-np.log10(correlation_recluster_cell)
correlation_recluster_norm=(correlation_recluster_div-correlation_recluster_div.min())/(correlation_recluster_div.max()-correlation_recluster_div.min())
correlation_recluster_cell_norm=(correlation_recluster_cell_div-correlation_recluster_cell_div.min())/(correlation_recluster_cell_div.max()-correlation_recluster_cell_div.min())
plt.imshow(correlation_recluster_norm)
plt.imshow(correlation_recluster_cell_norm)
###############################################################################
correlation_recluster_select=np.zeros(correlation_recluster_norm.shape)
recluster_mid=np.zeros(recluster1.shape)
for kk in range(correlation_recluster_norm.shape[0]):
ind=np.sort(correlation_recluster_norm[kk,:])
select=correlation_recluster_norm[kk,:]<ind[-clusternumber]
select=(select==False)
recluster_mid[recluster1==kk]+=int(np.where(select==True)[0])
correlation_recluster_select[kk,:]=correlation_recluster_norm[kk,:]*select
plt.imshow(correlation_recluster_select)
correlation_recluster_cell_final=correlation_recluster_cell*0
for i in range(correlation_recluster_cell_norm.shape[0]):
for j in range(correlation_recluster_cell_norm.shape[1]):
label1=recluster1[i]
label2=recluster2[j]
mean1=correlation_recluster_select[label1,label2]
mean2=correlation_recluster_cell_norm[i,j]
if mean1==0:
correlation_recluster_cell_final[i,j]=0
else:
correlation_recluster_cell_final[i,j]=mean2
plt.imshow(correlation_recluster_select)
plt.imshow(correlation_recluster_cell_final)
recluster1=recluster_mid.astype('int')
sort_correlation_recluster_cell_final=correlation_recluster_cell_final[recluster1.argsort(),:]
sort_correlation_recluster_cell_final=sort_correlation_recluster_cell_final[:,recluster2.argsort()]
###############################################################################
heatmap(correlation_recluster_cell_final,choose_seriestype1,choose_seriestype2,save=False,name='pancreasmatrix')
################################################################################
x_input1=np.zeros([PCAseries1.shape[0],PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]+recluster2.max()+1])
x_input2=np.zeros([PCAseries2.shape[0],PCAseries2.shape[1]+PCAseries2.shape[0]+PCAseries1.shape[0]+recluster2.max()+1])
for i in range(PCAseries1.shape[0]):
print(i)
x_input1[i,0:PCAseries1.shape[1]]=PCAseries1[i,:]
x_input1[i,PCAseries1.shape[1]:PCAseries1.shape[1]+PCAseries1.shape[0]]=K.utils.np_utils.to_categorical(i,PCAseries1.shape[0])
x_input1[i,PCAseries1.shape[1]+PCAseries1.shape[0]:PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]]=correlation_recluster_cell_final[i,:]
x_input1[i,PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]:]=K.utils.np_utils.to_categorical(recluster1[i],recluster2.max()+1)
for j in range(PCAseries2.shape[0]):
print(j)
x_input2[j,0:PCAseries2.shape[1]]=PCAseries2[j,:]
x_input2[j,PCAseries2.shape[1]:PCAseries2.shape[1]+PCAseries2.shape[0]]=K.utils.np_utils.to_categorical(j,PCAseries2.shape[0])
x_input2[j,PCAseries2.shape[1]+PCAseries2.shape[0]:PCAseries2.shape[1]+PCAseries2.shape[0]+PCAseries1.shape[0]]=correlation_recluster_cell_final[:,j]
x_input2[j,PCAseries2.shape[1]+PCAseries2.shape[0]+PCAseries1.shape[0]:]=K.utils.np_utils.to_categorical(recluster2[j],recluster2.max()+1)
###############################################################################
x_input1_new=x_input1
recluster1_new=recluster1
x_input2_new=x_input2
recluster2_new=recluster2
###############################################################################
if x_input1_new.shape[0]>=x_input2_new.shape[0]:
x_test1=x_input1_new
y_test1=recluster1_new
y_testreal1=choose_seriestype1
repeat_num=int(np.ceil(x_input1_new.shape[0]/x_input2_new.shape[0]))
x_test2=np.tile(x_input2_new,(repeat_num,1))
y_test2=np.tile(recluster2_new,repeat_num)
y_testreal2=np.tile(choose_seriestype2,repeat_num)
x_test2=x_test2[0:x_test1.shape[0],:]
y_test2=y_test2[0:x_test1.shape[0]]
y_testreal2=y_testreal2[0:x_test1.shape[0]]
elif x_input1_new.shape[0]<x_input2_new.shape[0]:
x_test2=x_input2_new
y_test2=recluster2_new
y_testreal2=choose_seriestype2
repeat_num=int(np.ceil(x_input2_new.shape[0]/x_input1_new.shape[0]))
x_test1=np.tile(x_input1_new,(repeat_num,1))
y_test1=np.tile(recluster1_new,repeat_num)
y_testreal1=np.tile(choose_seriestype1,repeat_num)
x_test1=x_test1[0:x_test2.shape[0],:]
y_test1=y_test1[0:x_test2.shape[0]]
y_testreal1=y_testreal1[0:x_test2.shape[0]]
###############################################################################
def choose_info(x,info_number):
return x[:,0:info_number]
def choose_index(x,info_number,x_samplenumber):
return x[:,info_number:info_number+x_samplenumber]
def choose_corrlation(x,info_number,x_samplenumber,cor_number):
return x[:,info_number+x_samplenumber:info_number+x_samplenumber+cor_number]
def choose_relabel(x,info_number,x_samplenumber,cor_number):
return x[:,info_number+x_samplenumber+cor_number:]
def slic(input_):
return input_[:,0]
###############################################################################
activation='relu'
info_number=PCAseries1.shape[1]
layer=PCAseries1.shape[1]
layer2=layer
input1=K.Input(shape=(x_test1.shape[1],))#line1 species1
input2=K.Input(shape=(x_test2.shape[1],))#line1 species2
input3=K.Input(shape=(x_test1.shape[1],))#line2 species1
input4=K.Input(shape=(x_test2.shape[1],))#line2 species2
Data1=Lambda(choose_info,arguments={'info_number':info_number})(input1)
Data2=Lambda(choose_info,arguments={'info_number':info_number})(input2)
Data3=Lambda(choose_info,arguments={'info_number':info_number})(input3)
Data4=Lambda(choose_info,arguments={'info_number':info_number})(input4)
Index1=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0]})(input1)
Index2=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0]})(input2)
Index3=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0]})(input3)
Index4=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0]})(input4)
Cor1=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input1)
Cor2=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input2)
Cor3=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input3)
Cor4=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input4)
Relabel1=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input1)
Relabel2=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input2)
Relabel3=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input3)
Relabel4=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input4)
x_concat1=layers.concatenate([Data1,Data3])#batch1
x_concat2=layers.concatenate([Data2,Data4])#batch2
x1=layers.Dense(layer2,activation=activation)(Data1)
x2=layers.Dense(layer2,activation=activation)(Data2)
x3=layers.Dense(layer2,activation=activation)(Data3)
x4=layers.Dense(layer2,activation=activation)(Data4)
x1=layers.BatchNormalization()(x1)
x2=layers.BatchNormalization()(x2)
x3=layers.BatchNormalization()(x3)
x4=layers.BatchNormalization()(x4)
x1_mid1=layers.Dense(layer2,activation=activation)(layers.concatenate([x1,x2]))
x2_mid1=layers.Dense(layer2,activation=activation)(layers.concatenate([x1,x2]))
x1_mid2=layers.Dense(layer2,activation=activation)(layers.concatenate([x3,x4]))
x2_mid2=layers.Dense(layer2,activation=activation)(layers.concatenate([x3,x4]))
x1_mid1=layers.BatchNormalization()(x1_mid1)
x2_mid1=layers.BatchNormalization()(x2_mid1)
x1_mid2=layers.BatchNormalization()(x1_mid2)
x2_mid2=layers.BatchNormalization()(x2_mid2)
layer_classify=layers.Dense(recluster2_new.max()+1,activation='relu')
y1=layer_classify(x1_mid1)
y2=layer_classify(x2_mid1)
y3=layer_classify(x1_mid2)
y4=layer_classify(x2_mid2)
x1=layers.concatenate([x1_mid1,x1_mid2])#batch1
x2=layers.concatenate([x2_mid1,x2_mid2])#batch2
output1=layers.Dense(2*layer,activation=activation)(x1)
output2=layers.Dense(2*layer,activation=activation)(x2)
output1=layers.BatchNormalization()(output1)
output2=layers.BatchNormalization()(output2)
def loss_weight(input_):
return tf.reduce_sum(tf.multiply(input_[0],input_[1]),axis=-1)
def MSE(input_):
return tf.reduce_mean(tf.square(input_[0]-input_[1]),axis=-1)
def multi_classification_loss(input_):
return tf.keras.losses.categorical_crossentropy(input_[0],input_[1])
#loss1
AE_loss_1=Lambda(MSE)([output1,x_concat1])
AE_loss_2=Lambda(MSE)([output2,x_concat2])
#loss2
cls_loss_1=Lambda(MSE)([y1,Relabel1])
cls_loss_2=Lambda(MSE)([y2,Relabel2])
cls_loss_3=Lambda(MSE)([y3,Relabel3])
cls_loss_4=Lambda(MSE)([y4,Relabel4])
#loss3
interweight1=Lambda(loss_weight)([Index1,Cor2])
interweight4=Lambda(loss_weight)([Index3,Cor4])
interloss_1=Lambda(MSE)([x1_mid1,x2_mid1])
interloss_4=Lambda(MSE)([x1_mid2,x2_mid2])
interloss_1=layers.Multiply()([interweight1,interloss_1])
interloss_4=layers.Multiply()([interweight4,interloss_4])
#loss4
intraweight1=Lambda(loss_weight)([Relabel1,Relabel3])
intraweight2=Lambda(loss_weight)([Relabel2,Relabel4])
intraloss_1=Lambda(MSE)([x1_mid1,x1_mid2])
intraloss_2=Lambda(MSE)([x2_mid1,x2_mid2])
intraloss_1=layers.Multiply()([intraweight1,intraloss_1])
intraloss_2=layers.Multiply()([intraweight2,intraloss_2])
Loss1=Lambda(lambda x:(x[0]*1+x[1]*1)/2,name='loss1')([AE_loss_1,AE_loss_2])
Loss2=Lambda(lambda x:(x[0]*1+x[1]*1+x[2]*1+x[3]*1)/4,name='loss2')([cls_loss_1,cls_loss_2,cls_loss_3,cls_loss_4])
Loss3=Lambda(lambda x:(x[0]*1+x[1]*1)/2,name='loss3')([interloss_1,interloss_4])
Loss4=Lambda(lambda x:(x[0]*1+x[1]*1)/2,name='loss4')([intraloss_1,intraloss_2])
###############################################################################
network_train=K.models.Model([input1,input2,input3,input4],
[Loss1,Loss2,Loss3,Loss4])
network_train.summary()
###############################################################################
intra_data1={}
inter_data1={}
for i in range(x_test1.shape[0]):
label_i=y_test1[i]
intra_data1[i]=np.where(y_test1==label_i)
inter_data1[i]=np.where(y_test1!=label_i)
intra_data2={}
inter_data2={}
for i in range(x_test2.shape[0]):
label_i=y_test2[i]
intra_data2[i]=np.where(y_test2==label_i)
inter_data2[i]=np.where(y_test2!=label_i)
###############################################################################
batch_size=128
train_loss=[]
loss1=[]
loss2=[]
loss3=[]
loss4=[]
###############################################################################
iterations=1000000000
lr=1e-3
optimizer=K.optimizers.Adam(lr=lr)
loss_weights=[1,1,1,1]
network_train.compile(optimizer=optimizer,
loss=[lambda y_true,y_pred: y_pred,
lambda y_true,y_pred: y_pred,
lambda y_true,y_pred: y_pred,
lambda y_true,y_pred: y_pred],
loss_weights=loss_weights)
for i in range(iterations):
x_input1_series1_train=np.zeros(x_test1.shape)
index0=np.zeros(x_input1_series1_train.shape[0])
x_input1_series2_train=np.zeros(x_test2.shape)
index1=np.zeros(x_input1_series2_train.shape[0])
x_input2_series1_train=np.zeros(x_test1.shape)
index2=np.zeros(x_input2_series1_train.shape[0])
x_input2_series2_train=np.zeros(x_test2.shape)
index3=np.zeros(x_input2_series2_train.shape[0])
for ii in range(x_test1.shape[0]):
index0[ii]=random.choice(range(x_test1.shape[0]))
rand1=random.random()
in_rand1=np.where(x_test1[ii,:][PCAseries1.shape[1]+PCAseries1.shape[0]:PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]]>0)[0]
out_rand1=np.where(x_test1[ii,:][PCAseries1.shape[1]+PCAseries1.shape[0]:PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]]<=0)[0]
if rand1>=0.5:
index1[ii]=random.choice(in_rand1)
elif rand1<0.5:
index1[ii]=random.choice(out_rand1)
rand2=random.random()
if rand2>=0.5:
index2[ii]=random.choice(intra_data1[index0[ii]][0])
elif rand2<0.5:
index2[ii]=random.choice(inter_data1[index0[ii]][0])
rand3=random.random()
if rand3>=0.5:
index3[ii]=random.choice(intra_data2[index1[ii]][0])
elif rand3<0.5:
index3[ii]=random.choice(inter_data2[index1[ii]][0])
train1=x_test1[index0.astype('int'),:]
train2=x_test2[index1.astype('int'),:]
train3=x_test1[index2.astype('int'),:]
train4=x_test2[index3.astype('int'),:]
Train=network_train.fit([train1,train2,train3,train4],
[np.zeros([train1.shape[0],1]),
np.zeros([train1.shape[0],1]),
np.zeros([train1.shape[0],1]),
np.zeros([train1.shape[0],1])],
batch_size=batch_size,shuffle=True)
train_loss.append(Train.history['loss'][:][0])
loss1.append(Train.history['loss1_loss'][:][0]*loss_weights[0])
loss2.append(Train.history['loss2_loss'][:][0]*loss_weights[1])
loss3.append(Train.history['loss3_loss'][:][0]*loss_weights[2])
loss4.append(Train.history['loss4_loss'][:][0]*loss_weights[3])
print(i,'loss=',
Train.history['loss'][:][0],
Train.history['loss1_loss'][:][0]*loss_weights[0],
Train.history['loss2_loss'][:][0]*loss_weights[1],
Train.history['loss3_loss'][:][0]*loss_weights[2],
Train.history['loss4_loss'][:][0]*loss_weights[3])
if i>100:
plt.plot(train_loss[:])
plt.plot(loss1[:])
plt.plot(loss2[:])
plt.plot(loss3[:])
plt.plot(loss4[:])
plt.title('Model loss')
plt.ylabel('Loss')
plt.ylim(0,max(max(train_loss[i-100:],loss1[i-100:],loss2[i-100:],loss3[i-100:],loss4[i-100:])))
plt.xlim(i-100,i)
plt.xlabel('Epoch')
plt.legend(['Train','loss1','loss2','loss3','loss4'],loc='upper left')
plt.show()
plt.plot(train_loss[:])
plt.plot(loss1[:])
plt.plot(loss2[:])
plt.plot(loss3[:])
plt.plot(loss4[:])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train','loss1','loss2','loss3','loss4'],loc='upper left')
plt.show()
else:
plt.plot(train_loss[100:])
plt.plot(loss1[100:])
plt.plot(loss2[100:])
plt.plot(loss3[100:])
plt.plot(loss4[100:])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train','loss1','loss2','loss3','loss4'],loc='upper left')
plt.show()
###############################################################################
network_train.load_weights('lungweight.h5')
network_predict=K.models.Model([input1,input2,input3,input4],
[x1_mid1,x2_mid1,x1_mid2,x2_mid2])
[low_dim1,low_dim2,low_dim3,low_dim4]=network_predict.predict([x_test1,x_test2,x_test1,x_test2])
low_dim1=low_dim1[0:x_input1.shape[0]]
low_dim2=low_dim2[0:x_input2.shape[0]]
low_dim3=low_dim3[0:x_input1.shape[0]]
low_dim4=low_dim4[0:x_input2.shape[0]]
y_real_no1=y_testreal1[0:x_input1.shape[0]]
y_recluster_no1=recluster1[0:x_input1.shape[0]]
y_real_no2=y_testreal2[0:x_input2.shape[0]]
y_recluster_no2=recluster2[0:x_input2.shape[0]]
total_real_type=np.concatenate([y_real_no1,y_real_no2])
total_recluster_type=np.concatenate([y_recluster_no1,y_recluster_no2])
###############################################################################
series1=sc.AnnData(low_dim1)
series2=sc.AnnData(low_dim2)
mergedata=series1.concatenate(series2)
mergedata.obsm['NN']=mergedata.X
sc.pp.neighbors(mergedata,n_pcs=0)
sc.tl.louvain(mergedata)
sc.tl.leiden(mergedata)
sc.tl.umap(mergedata)
df=pd.DataFrame(total_real_type.astype('int'))
df=pd.Series(np.reshape(df.values,df.values.shape[0]), dtype="category")
mergedata.obs['real']=df.values
sc.pl.umap(mergedata,color='louvain',size=30)
sc.pl.umap(mergedata,color='leiden',size=30)
sc.pl.umap(mergedata,color='batch',size=30)
sc.pl.umap(mergedata,color='real',size=30)
type_louvain=mergedata.obs['louvain']
type_leiden=mergedata.obs['leiden']
type_batch=mergedata.obs['batch']
type_real=mergedata.obs['real']
###############################################################################
umapdata=pd.DataFrame(mergedata.obsm['X_umap'].T,index=['tSNE1','tSNE2'])
umapdata1=pd.DataFrame(mergedata.obsm['X_umap'][0:PCAseries1.shape[0],:].T,index=['tSNE1','tSNE2'])
umapdata2=pd.DataFrame(mergedata.obsm['X_umap'][PCAseries1.shape[0]:,:].T,index=['tSNE1','tSNE2'])
###############################################################################
plot_tSNE_batchclusters(umapdata1,umapdata2,choose_seriestype1,choose_seriestype2,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'batch1')
plot_tSNE_batchclusters(umapdata2,umapdata1,choose_seriestype2,choose_seriestype1,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'batch2')
plot_tSNE_clusters(umapdata,list(map(int,type_batch)), cluster_colors=cluster_colors,save=False,name=fromname+'batch')
plot_tSNE_sepclusters(umapdata1,umapdata2,choose_seriestype1,choose_seriestype2,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'label1')
plot_tSNE_sepclusters(umapdata2,umapdata1,choose_seriestype2,choose_seriestype1,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'label2')
plot_tSNE_clusters(umapdata,list(map(int,type_real)), cluster_colors=cluster_colors,save=False, name=fromname+'label')
#sio.savemat('lung_ourdata.mat',{'mergedata':mergedata.X,'umapdata':umapdata.values}) | 29,860 | 43.702096 | 177 | py |
CBA | CBA-main/pancreas/ywb_function.py | import scipy
import random
import keras as K
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sns
import scipy.io as sio
import tensorflow as tf
from keras import layers
from ywb_function import *
from collections import Counter
import matplotlib.pyplot as plt
from keras.regularizers import l2
from sklearn import preprocessing
from keras.layers.core import Lambda
from keras.callbacks import TensorBoard
from keras.callbacks import LearningRateScheduler
from sklearn.cluster import AgglomerativeClustering
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import RandomOverSampler
from mpl_toolkits.axes_grid1 import make_axes_locatable
def color(value):
digit = list(map(str, range(10))) + list("ABCDEF")
if isinstance(value, tuple):
string = '#'
for i in value:
a1 = i // 16
a2 = i % 16
string += digit[a1] + digit[a2]
return string
elif isinstance(value, str):
a1 = digit.index(value[1]) * 16 + digit.index(value[2])
a2 = digit.index(value[3]) * 16 + digit.index(value[4])
a3 = digit.index(value[5]) * 16 + digit.index(value[6])
return (a1, a2, a3)
cluster_colors=[
color((213,94,0)),
color((0,114,178)),
color((204,121,167)),
color((0,158,115)),
color((86,180,233)),
color((230,159,0)),
color((240,228,66)),
color((0,0,0)),
'#D3D3D3',
'#FF00FF',
'#aec470',
'#b3ee3d',
'#de4726',
'#f69149',
'#f81919',
'#ff49b0',
'#f05556',
'#fadf0b',
'#f8c495',
'#ffc1c1',
'#ffc125',
'#ffc0cb',
'#ffbbff',
'#ffb90f',
'#ffb6c1',
'#ffb5c5',
'#ff83fa',
'#ff8c00',
'#ff4040',
'#ff3030',
'#ff34b3',
'#00fa9a',
'#ca4479',
'#eead0e',
'#ff1493',
'#0ab4e4',
'#1e6a87',
'#800080',
'#00e5ee',
'#c71585',
'#027fd0',
'#004dba',
'#0a9fb4',
'#004b71',
'#285528',
'#2f7449',
'#21b183',
'#3e4198',
'#4e14a6',
'#5dd73d',
'#64a44e',
'#6787d6',
'#6c6b6b',
'#6c6b6b',
'#7759a4',
'#78edff',
'#762a14',
'#9805cc',
'#9b067d',
'#af7efe',
'#a7623d']
def plot_tSNE_clusters(df_tSNE,labels,cluster_colors=None,s=6,save=False,name=None):
fig,ax=plt.subplots(figsize=(4, 4))
ax.scatter(df_tSNE.loc['tSNE1'], df_tSNE.loc['tSNE2'],s=s,alpha=0.8,lw=0,c=[cluster_colors[i] for i in labels])
ax.axis('equal')
ax.set_axis_off()
if save==True:
plt.savefig('{}.eps'.format(name),dpi=600,format='eps')
def plot_tSNE_batchclusters(df_tSNE1,df_tSNE2,labels1,labels2,cluster_colors=None,s=0.8,save=False,name=None):
fig,ax=plt.subplots(figsize=(4, 4))
ax.scatter(df_tSNE2.loc['tSNE1'], df_tSNE2.loc['tSNE2'],s=s,alpha=0.8,lw=0,c='#D3D3D3')
ax.scatter(df_tSNE1.loc['tSNE1'], df_tSNE1.loc['tSNE2'],s=s,alpha=0.8,lw=0,c=[cluster_colors[1] for i in labels1])
ax.axis('equal')
ax.set_axis_off()
if save==True:
plt.savefig('{}.eps'.format(name),dpi=600,format='eps')
def plot_tSNE_sepclusters(df_tSNE1,df_tSNE2,labels1,labels2,cluster_colors=None,s=0.8,save=False,name=None):
fig,ax=plt.subplots(figsize=(4, 4))
ax.scatter(df_tSNE2.loc['tSNE1'], df_tSNE2.loc['tSNE2'],s=s,alpha=0.8,lw=0,c='#D3D3D3')
ax.scatter(df_tSNE1.loc['tSNE1'], df_tSNE1.loc['tSNE2'],s=s,alpha=0.8,lw=0,c=[cluster_colors[i] for i in labels1])
ax.axis('equal')
ax.set_axis_off()
if save==True:
plt.savefig('{}.eps'.format(name),dpi=600,format='eps')
def plot_tSNE_cluster(df_tSNE,labels,cluster_colors=None,s=6,save=False,name=None):
index=[[] for i in range(np.max(labels)+1)]
for i in range(len(labels)):
index[int(labels[i])].append(i)
index=[i for i in index if i!=[]]
for i in range(len(np.unique(labels))):
color=np.array(labels)[index[i]][0]
fig,ax=plt.subplots()
ax.scatter(df_tSNE.loc['tSNE1'], df_tSNE.loc['tSNE2'],c='#D3D3D3',s=s,lw=0)
ax.scatter(df_tSNE.loc['tSNE1'].iloc[index[i]],df_tSNE.loc['tSNE2'].iloc[index[i]],c=[cluster_colors[k] for k in np.array(labels)[index[i]]],s=s,lw=0)
ax.axis('equal')
ax.set_axis_off()
if save == True:
plt.savefig('{}.eps'.format(name+str(color)), dpi=600,format='eps')
def gen_labels(df, model):
if str(type(model)).startswith("<class 'sklearn.cluster"):
cell_labels = dict(zip(df.columns, model.labels_))
label_cells = {}
for l in np.unique(model.labels_):
label_cells[l] = []
for i, label in enumerate(model.labels_):
label_cells[label].append(df.columns[i])
cellID = list(df.columns)
labels = list(model.labels_)
labels_a = model.labels_
elif type(model) == np.ndarray:
cell_labels = dict(zip(df.columns, model))
label_cells = {}
for l in np.unique(model):
label_cells[l] = []
for i, label in enumerate(model):
label_cells[label].append(df.columns[i])
cellID = list(df.columns)
labels = list(model)
labels_a = model
else:
print('Error wrong input type')
return cell_labels, label_cells, cellID, labels, labels_a
def heatmap(correlation_recluster_cell_final,choose_seriestype1,choose_seriestype2,save=False,name=''):
df=pd.DataFrame(correlation_recluster_cell_final)
labels1=np.array(choose_seriestype1)
labels2=np.array(choose_seriestype2)
cell_labels1,label_cells1,cellID1,labels1,labels_a1=gen_labels(df.T,np.array(labels1))
cell_labels2,label_cells2,cellID2,labels2,labels_a2=gen_labels(df,np.array(labels2))
optimal_order=np.unique(np.concatenate([labels1,labels2]))
cl,lc=gen_labels(df,np.array(labels2))[:2]
optimal_sort_cells=sum([lc[i] for i in np.unique(labels2)],[])
optimal_sort_labels=[cl[i] for i in optimal_sort_cells]
fig,axHM=plt.subplots(figsize=(9,5))
df_full=df.copy()
z=df_full.values
z=pd.DataFrame(z, index=df_full.index,columns=df_full.columns)
z=z.loc[:,optimal_sort_cells].values
im=axHM.pcolormesh(z,cmap='viridis',vmax=1)
plt.gca().invert_yaxis()
plt.xlim(xmax=len(labels2))
plt.xticks([])
plt.yticks([])
divider=make_axes_locatable(axHM)
axLabel1=divider.append_axes("top",.3,pad=0,sharex=axHM)
axLabel2=divider.append_axes("left",.3,pad=0,sharex=axHM)
counter2=Counter(labels2)
counter1=Counter(labels1)
pos2=0
pos1=0
for l in optimal_order:
axLabel1.barh(y=0,left=pos2,width=counter2[l],color=cluster_colors[l],linewidth=0.5,edgecolor=cluster_colors[l])
pos2+=counter2[l]
optimal_order=np.flipud(optimal_order)
for l in optimal_order:
axLabel2.bar(x=0,bottom=pos1,height=counter1[l],color=cluster_colors[l],linewidth=50,edgecolor=cluster_colors[l])
pos1+=counter1[l]
axLabel1.set_xlim(xmax=len(labels2))
axLabel1.axis('off')
axLabel2.set_ylim(ymax=len(labels1))
axLabel2.axis('off')
cax=fig.add_axes([.91,0.13,0.01,0.22])
colorbar=fig.colorbar(im,cax=cax,ticks=[0,1])
colorbar.set_ticklabels(['0','max'])
plt.savefig('{}.jpg'.format(name),dpi=600,format='jpg') | 7,512 | 33.782407 | 158 | py |
CBA | CBA-main/pancreas/pancreas_main.py | """
Created on Fri Mar 27 18:58:59 2020
@author: 17b90
"""
import kBET
import scipy
import random
import keras as K
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sns
import scipy.io as sio
import tensorflow as tf
from keras import layers
from ywb_function import *
import sklearn.metrics as sm
from collections import Counter
import matplotlib.pyplot as plt
from keras.regularizers import l2
from sklearn import preprocessing
from keras.layers.core import Lambda
from keras.callbacks import TensorBoard
from imblearn.over_sampling import SMOTE,ADASYN
from keras.callbacks import LearningRateScheduler
from sklearn.cluster import AgglomerativeClustering
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import RandomOverSampler
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.cluster.hierarchy import dendrogram, linkage
we_use=[1,2]#we try to integrate pancreas1 and pancreas2
#input the data
RAWseries1=pd.read_csv('RAWseries_'+str(we_use[0])+'.csv',header=None)[1:].values.astype('single')
RAWseries2=pd.read_csv('RAWseries_'+str(we_use[1])+'.csv',header=None)[1:].values.astype('single')
#input the label
choose_seriestype1=pd.read_csv('realseries_'+str(we_use[0])+'.csv',header=None)[1:].values
choose_seriestype2=pd.read_csv('realseries_'+str(we_use[1])+'.csv',header=None)[1:].values
#input the gene name
genename=pd.read_csv('pancreas_genename.csv',header=None)[1:][0].values
#this is our code name
fromname='pancreas'+str(we_use[0])+str(we_use[1])
#we choose some parameters
min_cells=50#remove some genes, expressed in less than 50 cells
pca_dim=50#the number of PCs, you can choose as you like
minnumberofcluster=300#this parameter is used for doing Louvain clustering again
#because sometimes obtained clusters by Louvain are quite big, you can do Louvain again for each obtained cluster
#no rule, if you think the clusters are big, you can do it, judged by yourself
#clusters with more than $minnumberofcluster$ cells will be clustered again to make them smaller
#I think this hardly influence the result, just make it beautiful, so you can choose it!
clusternumber=1#the number of neighboors when doing the cluster matching, we choose one neighbor, but you can choose more
#merge them
Alldata=np.concatenate([RAWseries1.T,RAWseries2.T])
Alllabel=np.concatenate([choose_seriestype1,choose_seriestype2])
Allbatch=np.concatenate([np.zeros(choose_seriestype1.shape[0]),np.zeros(choose_seriestype2.shape[0])+1])
###############################################################################
#ok, we select some interesting cell types
chosen_cluster=['alpha','beta','ductal','acinar','delta','gamma','endothelial','epsilon']
chosen_index=np.arange(Alllabel.shape[0])
for i in range(Alllabel.shape[0]):
if Alllabel[i] in chosen_cluster:
chosen_index[i]=1
else:
chosen_index[i]=0
Alldata=Alldata[chosen_index==1,:]
Allbatch=Allbatch[chosen_index==1]
Alllabel=Alllabel[chosen_index==1]
###############################################################################
#and them, use numbers to replace the name of cell types
Numlabel=np.zeros(Alllabel.shape[0])
cluster_index2={'alpha':0,'beta':1,'ductal':2,'acinar':3,'delta':4,'gamma':5,'endothelial':6,'epsilon':7}
for i in range(Alllabel.shape[0]):
Numlabel[i]=cluster_index2[Alllabel[i][0]]
###############################################################################
#use Scanpy!!!
anndata=sc.AnnData(pd.DataFrame(Alldata,columns=genename))
sc.pp.filter_genes(anndata,min_cells=min_cells)
sc.pp.normalize_per_cell(anndata,counts_per_cell_after=1e4)
sc.pp.log1p(anndata)
sc.pp.highly_variable_genes(anndata)
sc.pl.highly_variable_genes(anndata)
anndata=anndata[:,anndata.var['highly_variable']]
sc.pl.highest_expr_genes(anndata,n_top=20)
sc.tl.pca(anndata,n_comps=100,svd_solver='arpack')
sc.pl.pca(anndata)
sc.pl.pca_variance_ratio(anndata,log=True,n_pcs=100,save=[True,'pancreas'])
#after prepossessing, we rename these datasets
Alldata_aft=anndata.obsm['X_pca'][:,0:pca_dim]
#this is for the preparation of deep learning training, the training is hard if you don't do that
Alldata_aft=preprocessing.StandardScaler().fit_transform(Alldata_aft)
Alldata_aft=preprocessing.MinMaxScaler().fit_transform(Alldata_aft)
PCAseries1=Alldata_aft[Allbatch==0,:][Numlabel[Allbatch==0].argsort()]
PCAseries2=Alldata_aft[Allbatch==1,:][Numlabel[Allbatch==1].argsort()]
choose_seriestype1=Numlabel[Allbatch==0][Numlabel[Allbatch==0].argsort()].astype('int')
choose_seriestype2=Numlabel[Allbatch==1][Numlabel[Allbatch==1].argsort()].astype('int')
###############################################################################
#do Louvain clustering
cluster_series1=sc.AnnData(PCAseries1)
cluster_series2=sc.AnnData(PCAseries2)
sc.pp.neighbors(cluster_series1,n_pcs=0)
sc.pp.neighbors(cluster_series2,n_pcs=0)
sc.tl.umap(cluster_series1)
sc.tl.umap(cluster_series2)
sc.tl.louvain(cluster_series1)
sc.tl.louvain(cluster_series2)
sc.pl.umap(cluster_series1,color='louvain',size=30)
sc.pl.umap(cluster_series2,color='louvain',size=30)
cluster1=np.array(list(map(int,cluster_series1.obs['louvain'])))
cluster2=np.array(list(map(int,cluster_series2.obs['louvain'])))
###############################################################################
#ok, as you like, you can do clustering for each cluster, or not
recluster1=np.zeros(cluster1.shape[0])
recluster2=np.zeros(cluster2.shape[0])
palsecluster1=cluster1
count_cluster1=pd.value_counts(cluster_series1.obs['louvain'])
for i in range(1000000000000000):#until there are no clusters with more than $minnumberofcluster$ cells
if count_cluster1.max()<minnumberofcluster:
break
else:
print(count_cluster1.max())
recluster1=np.zeros(cluster1.shape[0])
recluster1_number=0
for i in np.unique(palsecluster1):
index=palsecluster1==i
if index.sum()<minnumberofcluster:
thisrecluster=np.zeros(index.sum())
recluster1[index]=thisrecluster+recluster1_number
recluster1_number=len(np.unique(recluster1))
else:
data=PCAseries1[index]
anndata=sc.AnnData(data)
sc.pp.neighbors(anndata,n_pcs=0)
sc.tl.louvain(anndata)
thisrecluster=np.array(list(map(int,anndata.obs['louvain'])))
recluster1[index]=thisrecluster+recluster1_number
recluster1_number=len(np.unique(recluster1))
palsecluster1=recluster1.astype('int')
count_cluster1=pd.value_counts(palsecluster1)
palsecluster2=cluster2
count_cluster2=pd.value_counts(cluster_series2.obs['louvain'])
for i in range(1000000000000000):
if count_cluster2.max()<minnumberofcluster:
break
else:
print(count_cluster2.max())
recluster2=np.zeros(cluster2.shape[0])
recluster2_number=0
for i in np.unique(palsecluster2):
index=palsecluster2==i
if index.sum()<minnumberofcluster:
thisrecluster=np.zeros(index.sum())
recluster2[index]=thisrecluster+recluster2_number
recluster2_number=len(np.unique(recluster2))
else:
data=PCAseries2[index]
anndata=sc.AnnData(data)
sc.pp.neighbors(anndata,n_pcs=0)
sc.tl.louvain(anndata)
thisrecluster=np.array(list(map(int,anndata.obs['louvain'])))
recluster2[index]=thisrecluster+recluster2_number
recluster2_number=len(np.unique(recluster2))
palsecluster2=recluster2.astype('int')
count_cluster2=pd.value_counts(palsecluster2)
recluster1=palsecluster1
recluster2=palsecluster2
###############################################################################
#show the Louvain results
series1=sc.AnnData(PCAseries1)
series2=sc.AnnData(PCAseries2)
sc.pp.neighbors(series1,n_pcs=0)
sc.pp.neighbors(series2,n_pcs=0)
sc.tl.umap(series1)
sc.tl.umap(series2)
df1=pd.DataFrame(choose_seriestype1)
df1=pd.Series(np.reshape(df1.values,df1.values.shape[0]), dtype="category")
series1.obs['real']=df1.values
df2=pd.DataFrame(choose_seriestype2)
df2=pd.Series(np.reshape(df2.values,df2.values.shape[0]), dtype="category")
series2.obs['real']=df2.values
sc.pl.umap(series1,color='real',size=30)
sc.pl.umap(series2,color='real',size=30)
df1=pd.DataFrame(recluster1.astype('int'))
df1=pd.Series(np.reshape(df1.values,df1.values.shape[0]), dtype="category")
series1.obs['recluster']=df1.values
df2=pd.DataFrame(recluster2.astype('int'))
df2=pd.Series(np.reshape(df2.values,df2.values.shape[0]), dtype="category")
series2.obs['recluster']=df2.values
sc.pl.umap(series1,color='recluster',size=30)
sc.pl.umap(series2,color='recluster',size=30)
###############################################################################
#this is used to select the metric when selecting neighbor clusters
def dis(P,Q,distance_method):
if distance_method==0:#euclidean distance
return np.sqrt(np.sum(np.square(P-Q)))
if distance_method==1:#cos distance
return 1-(np.multiply(P,Q).sum()/(np.sqrt(np.sum(np.square(P)))*np.sqrt(np.sum(np.square(Q)))))
###############################################################################
#you can choose change their turn or not
if len(np.unique(recluster1))>=len(np.unique(recluster2)):
a=PCAseries1
PCAseries1=PCAseries2
PCAseries2=a
b=choose_seriestype1
choose_seriestype1=choose_seriestype2
choose_seriestype2=b
c=cluster1
cluster1=cluster2
cluster2=c
d=recluster1
recluster1=recluster2
recluster2=d
###############################################################################
#ok, let's calculate the similarity of cells/clusters
correlation_recluster=np.zeros([len(np.unique(recluster1)),len(np.unique(recluster2))])
correlation_recluster_cell=np.zeros([recluster1.shape[0],recluster2.shape[0]])
for i in range(len(np.unique(recluster1))):
for j in range(len(np.unique(recluster2))):
print(i,j)
index_series1=np.where(recluster1==i)[0]
index_series2=np.where(recluster2==j)[0]
cell_series1=PCAseries1[index_series1,:]
cell_series2=PCAseries2[index_series2,:]
mean1=0
for iq in range(cell_series1.shape[0]):
for jq in range(cell_series2.shape[0]):
mean1+=dis(cell_series1[iq,:],cell_series2[jq,:],1)
correlation_recluster[i,j]=mean1/(cell_series1.shape[0]*cell_series2.shape[0])
for ii in range(cell_series1.shape[0]):
for jj in range(cell_series2.shape[0]):
mean2=dis(cell_series1[ii,:],cell_series2[jj,:],0)
correlation_recluster_cell[index_series1[ii],index_series2[jj]]=mean2
plt.imshow(correlation_recluster)
plt.imshow(correlation_recluster_cell)
correlation_recluster_div=-np.log10(correlation_recluster)
correlation_recluster_cell_div=-np.log10(correlation_recluster_cell)
correlation_recluster_norm=(correlation_recluster_div-correlation_recluster_div.min())/(correlation_recluster_div.max()-correlation_recluster_div.min())
correlation_recluster_cell_norm=(correlation_recluster_cell_div-correlation_recluster_cell_div.min())/(correlation_recluster_cell_div.max()-correlation_recluster_cell_div.min())
#show them
plt.imshow(correlation_recluster_norm)
plt.imshow(correlation_recluster_cell_norm)
###############################################################################
#remove bad parts, do the matching
correlation_recluster_select=np.zeros(correlation_recluster_norm.shape)
recluster_mid=np.zeros(recluster1.shape)
for kk in range(correlation_recluster_norm.shape[0]):
ind=np.sort(correlation_recluster_norm[kk,:])
select=correlation_recluster_norm[kk,:]<ind[-clusternumber]
select=(select==False)
recluster_mid[recluster1==kk]+=int(np.where(select==True)[0])
correlation_recluster_select[kk,:]=correlation_recluster_norm[kk,:]*select
plt.imshow(correlation_recluster_select)
correlation_recluster_cell_final=correlation_recluster_cell*0
for i in range(correlation_recluster_cell_norm.shape[0]):
for j in range(correlation_recluster_cell_norm.shape[1]):
label1=recluster1[i]
label2=recluster2[j]
mean1=correlation_recluster_select[label1,label2]
mean2=correlation_recluster_cell_norm[i,j]
if mean1==0:
correlation_recluster_cell_final[i,j]=0
else:
correlation_recluster_cell_final[i,j]=mean2
plt.imshow(correlation_recluster_select)
plt.imshow(correlation_recluster_cell_final)
recluster1=recluster_mid.astype('int')
sort_correlation_recluster_cell_final=correlation_recluster_cell_final[recluster1.argsort(),:]
sort_correlation_recluster_cell_final=sort_correlation_recluster_cell_final[:,recluster2.argsort()]
###############################################################################
#heatmap
heatmap(correlation_recluster_cell_final,choose_seriestype1,choose_seriestype2,save=False,name='pancreasmatrix')
heatmap(sort_correlation_recluster_cell_final,np.sort(recluster1)+9,np.sort(recluster2)+9,save=False,name='ourpancreasmatrix')
###############################################################################
#ok, I use keras, cells in each input are randomly selected, I don't know how to match cells with their similarity
#I also don't know how to match the cell part with their distance, so I design the following inputs
#It will waste some time, it's not easy and unclear for readers, but it works!
x_input1=np.zeros([PCAseries1.shape[0],PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]+recluster2.max()+1])
x_input2=np.zeros([PCAseries2.shape[0],PCAseries2.shape[1]+PCAseries2.shape[0]+PCAseries1.shape[0]+recluster2.max()+1])
for i in range(PCAseries1.shape[0]):
print(i)
x_input1[i,0:PCAseries1.shape[1]]=PCAseries1[i,:]
x_input1[i,PCAseries1.shape[1]:PCAseries1.shape[1]+PCAseries1.shape[0]]=K.utils.np_utils.to_categorical(i,PCAseries1.shape[0])
x_input1[i,PCAseries1.shape[1]+PCAseries1.shape[0]:PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]]=correlation_recluster_cell_final[i,:]
x_input1[i,PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]:]=K.utils.np_utils.to_categorical(recluster1[i],recluster2.max()+1)
for j in range(PCAseries2.shape[0]):
print(j)
x_input2[j,0:PCAseries2.shape[1]]=PCAseries2[j,:]
x_input2[j,PCAseries2.shape[1]:PCAseries2.shape[1]+PCAseries2.shape[0]]=K.utils.np_utils.to_categorical(j,PCAseries2.shape[0])
x_input2[j,PCAseries2.shape[1]+PCAseries2.shape[0]:PCAseries2.shape[1]+PCAseries2.shape[0]+PCAseries1.shape[0]]=correlation_recluster_cell_final[:,j]
x_input2[j,PCAseries2.shape[1]+PCAseries2.shape[0]+PCAseries1.shape[0]:]=K.utils.np_utils.to_categorical(recluster2[j],recluster2.max()+1)
###############################################################################
#interesting, I need to make two batches have the same number of cells, so I have to copy cells again and again
if x_input1.shape[0]>=x_input2.shape[0]:
x_test1=x_input1
y_test1=recluster1
y_testreal1=choose_seriestype1
repeat_num=int(np.ceil(x_input1.shape[0]/x_input2.shape[0]))
x_test2=np.tile(x_input2,(repeat_num,1))
y_test2=np.tile(recluster2,repeat_num)
y_testreal2=np.tile(choose_seriestype2,repeat_num)
x_test2=x_test2[0:x_test1.shape[0],:]
y_test2=y_test2[0:x_test1.shape[0]]
y_testreal2=y_testreal2[0:x_test1.shape[0]]
elif x_input1.shape[0]<x_input2.shape[0]:
x_test2=x_input2
y_test2=recluster2
y_testreal2=choose_seriestype2
repeat_num=int(np.ceil(x_input2.shape[0]/x_input1.shape[0]))
x_test1=np.tile(x_input1,(repeat_num,1))
y_test1=np.tile(recluster1,repeat_num)
y_testreal1=np.tile(choose_seriestype1,repeat_num)
x_test1=x_test1[0:x_test2.shape[0],:]
y_test1=y_test1[0:x_test2.shape[0]]
y_testreal1=y_testreal1[0:x_test2.shape[0]]
###############################################################################
def choose_info(x,info_number):
return x[:,0:info_number]
def choose_index(x,info_number,x_samplenumber):
return x[:,info_number:info_number+x_samplenumber]
def choose_corrlation(x,info_number,x_samplenumber,cor_number):
return x[:,info_number+x_samplenumber:info_number+x_samplenumber+cor_number]
def choose_relabel(x,info_number,x_samplenumber,cor_number):
return x[:,info_number+x_samplenumber+cor_number:]
def slic(input_):
return input_[:,0]
###############################################################################
activation='relu'
info_number=PCAseries1.shape[1]
layer=PCAseries1.shape[1]
input1=K.Input(shape=(x_test1.shape[1],))#line1 species1
input2=K.Input(shape=(x_test2.shape[1],))#line1 species2
input3=K.Input(shape=(x_test1.shape[1],))#line2 species1
input4=K.Input(shape=(x_test2.shape[1],))#line2 species2
Data1=Lambda(choose_info,arguments={'info_number':info_number})(input1)
Data2=Lambda(choose_info,arguments={'info_number':info_number})(input2)
Data3=Lambda(choose_info,arguments={'info_number':info_number})(input3)
Data4=Lambda(choose_info,arguments={'info_number':info_number})(input4)
Index1=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0]})(input1)
Index2=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0]})(input2)
Index3=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0]})(input3)
Index4=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0]})(input4)
Cor1=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input1)
Cor2=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input2)
Cor3=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input3)
Cor4=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input4)
Relabel1=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input1)
Relabel2=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input2)
Relabel3=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input3)
Relabel4=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input4)
x_concat1=layers.concatenate([Data1,Data3])#batch1
x_concat2=layers.concatenate([Data2,Data4])#batch2
x1=layers.Dense(layer,activation=activation)(Data1)
x2=layers.Dense(layer,activation=activation)(Data2)
x3=layers.Dense(layer,activation=activation)(Data3)
x4=layers.Dense(layer,activation=activation)(Data4)
x1=layers.BatchNormalization()(x1)
x2=layers.BatchNormalization()(x2)
x3=layers.BatchNormalization()(x3)
x4=layers.BatchNormalization()(x4)
x1_mid1=layers.Dense(layer,activation=activation)(layers.concatenate([x1,x2]))
x2_mid1=layers.Dense(layer,activation=activation)(layers.concatenate([x1,x2]))
x1_mid2=layers.Dense(layer,activation=activation)(layers.concatenate([x3,x4]))
x2_mid2=layers.Dense(layer,activation=activation)(layers.concatenate([x3,x4]))
x1_mid1=layers.BatchNormalization()(x1_mid1)
x2_mid1=layers.BatchNormalization()(x2_mid1)
x1_mid2=layers.BatchNormalization()(x1_mid2)
x2_mid2=layers.BatchNormalization()(x2_mid2)
layer_classify=layers.Dense(recluster2.max()+1,activation='relu')
y1=layer_classify(x1_mid1)
y2=layer_classify(x2_mid1)
y3=layer_classify(x1_mid2)
y4=layer_classify(x2_mid2)
x1=layers.concatenate([x1_mid1,x1_mid2])#batch1
x2=layers.concatenate([x2_mid1,x2_mid2])#batch2
output1=layers.Dense(2*layer,activation=activation)(x1)
output2=layers.Dense(2*layer,activation=activation)(x2)
output1=layers.BatchNormalization()(output1)
output2=layers.BatchNormalization()(output2)
def loss_weight(input_):
return tf.reduce_sum(tf.multiply(input_[0],input_[1]),axis=-1)
def MSE(input_):
return tf.reduce_mean(tf.square(input_[0]-input_[1]),axis=-1)
def multi_classification_loss(input_):
return tf.keras.losses.categorical_crossentropy(input_[0],input_[1])
AE_loss_1=Lambda(MSE)([output1,x_concat1])
AE_loss_2=Lambda(MSE)([output2,x_concat2])
cls_loss_1=Lambda(MSE)([y1,Relabel1])
cls_loss_2=Lambda(MSE)([y2,Relabel2])
cls_loss_3=Lambda(MSE)([y3,Relabel3])
cls_loss_4=Lambda(MSE)([y4,Relabel4])
interweight1=Lambda(loss_weight)([Index1,Cor2])
interweight4=Lambda(loss_weight)([Index3,Cor4])
interloss_1=Lambda(MSE)([x1_mid1,x2_mid1])
interloss_4=Lambda(MSE)([x1_mid2,x2_mid2])
interloss_1=layers.Multiply()([interweight1,interloss_1])
interloss_4=layers.Multiply()([interweight4,interloss_4])
intraweight1=Lambda(loss_weight)([Relabel1,Relabel3])
intraweight2=Lambda(loss_weight)([Relabel2,Relabel4])
intraloss_1=Lambda(MSE)([x1_mid1,x1_mid2])
intraloss_2=Lambda(MSE)([x2_mid1,x2_mid2])
intraloss_1=layers.Multiply()([intraweight1,intraloss_1])
intraloss_2=layers.Multiply()([intraweight2,intraloss_2])
Loss1=Lambda(lambda x:(x[0]*1+x[1]*1)/2,name='loss1')([AE_loss_1,AE_loss_2])
Loss2=Lambda(lambda x:(x[0]*1+x[1]*1+x[2]*1+x[3]*1)/4,name='loss2')([cls_loss_1,cls_loss_2,cls_loss_3,cls_loss_4])
Loss3=Lambda(lambda x:(x[0]*1+x[1]*1)/2,name='loss3')([interloss_1,interloss_4])
Loss4=Lambda(lambda x:(x[0]*1+x[1]*1)/2,name='loss4')([intraloss_1,intraloss_2])
###############################################################################
network_train=K.models.Model([input1,input2,input3,input4],[Loss1,Loss2,Loss3,Loss4])
network_train.summary()
###############################################################################
intra_data1={}
inter_data1={}
for i in range(x_test1.shape[0]):
label_i=y_test1[i]
intra_data1[i]=np.where(y_test1==label_i)
inter_data1[i]=np.where(y_test1!=label_i)
intra_data2={}
inter_data2={}
for i in range(x_test2.shape[0]):
label_i=y_test2[i]
intra_data2[i]=np.where(y_test2==label_i)
inter_data2[i]=np.where(y_test2!=label_i)
###############################################################################
batch_size=256
train_loss=[]
loss1=[]
loss2=[]
loss3=[]
loss4=[]
###############################################################################
iterations=10000000
lr=1e-4
optimizer=K.optimizers.Adam(lr=lr)
loss_weights=[1,1,1,1]
#these four parts will not converge at the same speed, I don't know how to resolve it
#so I choose a hard strategy, if either one is too small, stop the training, enlarge its weight, do training again
#I think you can train this model better...or maybe you can teach me how to auto-balance the weight, thank you!
network_train.compile(optimizer=optimizer,
loss=[lambda y_true,y_pred: y_pred,
lambda y_true,y_pred: y_pred,
lambda y_true,y_pred: y_pred,
lambda y_true,y_pred: y_pred],
loss_weights=loss_weights)
for i in range(iterations):
x_input1_series1_train=np.zeros(x_test1.shape)
index0=np.zeros(x_input1_series1_train.shape[0])
x_input1_series2_train=np.zeros(x_test2.shape)
index1=np.zeros(x_input1_series2_train.shape[0])
x_input2_series1_train=np.zeros(x_test1.shape)
index2=np.zeros(x_input2_series1_train.shape[0])
x_input2_series2_train=np.zeros(x_test2.shape)
index3=np.zeros(x_input2_series2_train.shape[0])
for ii in range(x_test1.shape[0]):
index0[ii]=random.choice(range(x_test1.shape[0]))
rand1=random.random()
in_rand1=np.where(x_test1[ii,:][PCAseries1.shape[1]+PCAseries1.shape[0]:PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]]>0)[0]
out_rand1=np.where(x_test1[ii,:][PCAseries1.shape[1]+PCAseries1.shape[0]:PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]]<=0)[0]
if rand1>=0.5:
index1[ii]=random.choice(in_rand1)
elif rand1<0.5:
index1[ii]=random.choice(out_rand1)
rand2=random.random()
if rand2>=0.5:
index2[ii]=random.choice(intra_data1[index0[ii]][0])
elif rand2<0.5:
index2[ii]=random.choice(inter_data1[index0[ii]][0])
rand3=random.random()
if rand3>=0.5:
index3[ii]=random.choice(intra_data2[index1[ii]][0])
elif rand3<0.5:
index3[ii]=random.choice(inter_data2[index1[ii]][0])
train1=x_test1[index0.astype('int'),:]
train2=x_test2[index1.astype('int'),:]
train3=x_test1[index2.astype('int'),:]
train4=x_test2[index3.astype('int'),:]
Train=network_train.fit([train1,train2,train3,train4],
[np.zeros([train1.shape[0],1]),
np.zeros([train1.shape[0],1]),
np.zeros([train1.shape[0],1]),
np.zeros([train1.shape[0],1])],
batch_size=batch_size,shuffle=True)
train_loss.append(Train.history['loss'][:][0])
loss1.append(Train.history['loss1_loss'][:][0]*loss_weights[0])
loss2.append(Train.history['loss2_loss'][:][0]*loss_weights[1])
loss3.append(Train.history['loss3_loss'][:][0]*loss_weights[2])
loss4.append(Train.history['loss4_loss'][:][0]*loss_weights[3])
print(i,'loss=',
Train.history['loss'][:][0],
Train.history['loss1_loss'][:][0]*loss_weights[0],
Train.history['loss2_loss'][:][0]*loss_weights[1],
Train.history['loss3_loss'][:][0]*loss_weights[2],
Train.history['loss4_loss'][:][0]*loss_weights[3])
if i>500:
plt.plot(train_loss[:])
plt.plot(loss1[:])
plt.plot(loss2[:])
plt.plot(loss3[:])
plt.plot(loss4[:])
plt.title('Model loss')
plt.ylabel('Loss')
plt.ylim(0,max(max(train_loss[i-500:],loss1[i-500:],loss2[i-500:],loss3[i-500:],loss4[i-500:])))
plt.xlim(i-500,i)
plt.xlabel('Epoch')
plt.legend(['Train','loss1','loss2','loss3','loss4'],loc='upper left')
plt.show()
plt.plot(train_loss[:])
plt.plot(loss1[:])
plt.plot(loss2[:])
plt.plot(loss3[:])
plt.plot(loss4[:])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train','loss1','loss2','loss3','loss4'],loc='upper left')
plt.show()
else:
plt.plot(train_loss[500:])
plt.plot(loss1[500:])
plt.plot(loss2[500:])
plt.plot(loss3[500:])
plt.plot(loss4[500:])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train','loss1','loss2','loss3','loss4'],loc='upper left')
plt.show()
###############################################################################
network_train.load_weights('pancreas42.h5')
network_predict=K.models.Model([input1,input2,input3,input4],[x1_mid1,x2_mid1,x1_mid2,x2_mid2])
[low_dim1,low_dim2,low_dim3,low_dim4]=network_predict.predict([x_test1,x_test2,x_test1,x_test2])
low_dim1=low_dim1[0:x_input1.shape[0]]
low_dim2=low_dim2[0:x_input2.shape[0]]
low_dim3=low_dim3[0:x_input1.shape[0]]
low_dim4=low_dim4[0:x_input2.shape[0]]
low_dim1=np.concatenate([low_dim1,low_dim3],axis=1)
low_dim2=np.concatenate([low_dim2,low_dim4],axis=1)
y_real_no1=y_testreal1[0:x_input1.shape[0]]
y_recluster_no1=recluster1[0:x_input1.shape[0]]
y_real_no2=y_testreal2[0:x_input2.shape[0]]
y_recluster_no2=recluster2[0:x_input2.shape[0]]
total_real_type=np.concatenate([y_real_no1,y_real_no2])
total_recluster_type=np.concatenate([y_recluster_no1,y_recluster_no2])
###############################################################################
series1=sc.AnnData(low_dim1)
series2=sc.AnnData(low_dim2)
mergedata=series1.concatenate(series2)
mergedata.obsm['NN']=mergedata.X
sc.pp.neighbors(mergedata,n_pcs=0)
sc.tl.louvain(mergedata)
sc.tl.leiden(mergedata)
sc.tl.umap(mergedata)
df=pd.DataFrame(total_real_type.astype('int'))
df=pd.Series(np.reshape(df.values,df.values.shape[0]), dtype="category")
mergedata.obs['real']=df.values
sc.pl.umap(mergedata,color='louvain',size=30)
sc.pl.umap(mergedata,color='leiden',size=30)
sc.pl.umap(mergedata,color='batch',size=30)
sc.pl.umap(mergedata,color='real',size=30)
type_louvain=mergedata.obs['louvain']
type_leiden=mergedata.obs['leiden']
type_batch=mergedata.obs['batch']
type_real=mergedata.obs['real']
###############################################################################
umapdata=pd.DataFrame(mergedata.obsm['X_umap'].T,index=['tSNE1','tSNE2'])
umapdata1=pd.DataFrame(mergedata.obsm['X_umap'][0:PCAseries1.shape[0],:].T,index=['tSNE1','tSNE2'])
umapdata2=pd.DataFrame(mergedata.obsm['X_umap'][PCAseries1.shape[0]:,:].T,index=['tSNE1','tSNE2'])
###############################################################################
plot_tSNE_batchclusters(umapdata1,umapdata2,choose_seriestype1,choose_seriestype2,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'batch1')
plot_tSNE_batchclusters(umapdata2,umapdata1,choose_seriestype2,choose_seriestype1,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'batch2')
plot_tSNE_clusters(umapdata,list(map(int,type_batch)), cluster_colors=cluster_colors,save=False,name=fromname+'batch')
plot_tSNE_sepclusters(umapdata1,umapdata2,choose_seriestype1,choose_seriestype2,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'label1')
plot_tSNE_sepclusters(umapdata2,umapdata1,choose_seriestype2,choose_seriestype1,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'label2')
plot_tSNE_clusters(umapdata,list(map(int,type_real)), cluster_colors=cluster_colors,save=False, name=fromname+'label')
#sio.savemat('pancreas_ourdata.mat',{'mergedata':mergedata.X,'umapdata':umapdata.values})#you need to see whether two batches are changed in turn, if so do changing again by yourself!!! | 30,362 | 46.815748 | 185 | py |
CBA | CBA-main/species/ywb_function.py | import scipy
import random
import keras as K
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sns
import scipy.io as sio
import tensorflow as tf
from keras import layers
from ywb_function import *
from collections import Counter
import matplotlib.pyplot as plt
from keras.regularizers import l2
from sklearn import preprocessing
from keras.layers.core import Lambda
from keras.callbacks import TensorBoard
from keras.callbacks import LearningRateScheduler
from sklearn.cluster import AgglomerativeClustering
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import RandomOverSampler
from mpl_toolkits.axes_grid1 import make_axes_locatable
def color(value):
digit = list(map(str, range(10))) + list("ABCDEF")
if isinstance(value, tuple):
string = '#'
for i in value:
a1 = i // 16
a2 = i % 16
string += digit[a1] + digit[a2]
return string
elif isinstance(value, str):
a1 = digit.index(value[1]) * 16 + digit.index(value[2])
a2 = digit.index(value[3]) * 16 + digit.index(value[4])
a3 = digit.index(value[5]) * 16 + digit.index(value[6])
return (a1, a2, a3)
cluster_colors=[
color((213,94,0)),
color((0,114,178)),
color((204,121,167)),
color((0,158,115)),
color((86,180,233)),
color((230,159,0)),
color((240,228,66)),
color((0,0,0)),
'#D3D3D3',
'#FF00FF',
'#aec470',
'#b3ee3d',
'#de4726',
'#f69149',
'#f81919',
'#ff49b0',
'#f05556',
'#fadf0b',
'#f8c495',
'#ffc1c1',
'#ffc125',
'#ffc0cb',
'#ffbbff',
'#ffb90f',
'#ffb6c1',
'#ffb5c5',
'#ff83fa',
'#ff8c00',
'#ff4040',
'#ff3030',
'#ff34b3',
'#00fa9a',
'#ca4479',
'#eead0e',
'#ff1493',
'#0ab4e4',
'#1e6a87',
'#800080',
'#00e5ee',
'#c71585',
'#027fd0',
'#004dba',
'#0a9fb4',
'#004b71',
'#285528',
'#2f7449',
'#21b183',
'#3e4198',
'#4e14a6',
'#5dd73d',
'#64a44e',
'#6787d6',
'#6c6b6b',
'#6c6b6b',
'#7759a4',
'#78edff',
'#762a14',
'#9805cc',
'#9b067d',
'#af7efe',
'#a7623d']
def plot_tSNE_clusters(df_tSNE,labels,cluster_colors=None,s=6,save=False,name=None):
fig,ax=plt.subplots(figsize=(4, 4))
ax.scatter(df_tSNE.loc['tSNE1'], df_tSNE.loc['tSNE2'],s=s,alpha=0.8,lw=0,c=[cluster_colors[i] for i in labels])
ax.axis('equal')
ax.set_axis_off()
if save==True:
plt.savefig('{}.eps'.format(name),dpi=600,format='eps')
def plot_tSNE_batchclusters(df_tSNE1,df_tSNE2,labels1,labels2,cluster_colors=None,s=0.8,save=False,name=None):
fig,ax=plt.subplots(figsize=(4, 4))
ax.scatter(df_tSNE2.loc['tSNE1'], df_tSNE2.loc['tSNE2'],s=s,alpha=0.8,lw=0,c='#D3D3D3')
ax.scatter(df_tSNE1.loc['tSNE1'], df_tSNE1.loc['tSNE2'],s=s,alpha=0.8,lw=0,c=[cluster_colors[1] for i in labels1])
ax.axis('equal')
ax.set_axis_off()
if save==True:
plt.savefig('{}.eps'.format(name),dpi=600,format='eps')
def plot_tSNE_sepclusters(df_tSNE1,df_tSNE2,labels1,labels2,cluster_colors=None,s=0.8,save=False,name=None):
fig,ax=plt.subplots(figsize=(4, 4))
ax.scatter(df_tSNE2.loc['tSNE1'], df_tSNE2.loc['tSNE2'],s=s,alpha=0.8,lw=0,c='#D3D3D3')
ax.scatter(df_tSNE1.loc['tSNE1'], df_tSNE1.loc['tSNE2'],s=s,alpha=0.8,lw=0,c=[cluster_colors[i] for i in labels1])
ax.axis('equal')
ax.set_axis_off()
if save==True:
plt.savefig('{}.eps'.format(name),dpi=600,format='eps')
def plot_tSNE_cluster(df_tSNE,labels,cluster_colors=None,s=6,save=False,name=None):
index=[[] for i in range(np.max(labels)+1)]
for i in range(len(labels)):
index[int(labels[i])].append(i)
index=[i for i in index if i!=[]]
for i in range(len(np.unique(labels))):
color=np.array(labels)[index[i]][0]
fig,ax=plt.subplots()
ax.scatter(df_tSNE.loc['tSNE1'], df_tSNE.loc['tSNE2'],c='#D3D3D3',s=s,lw=0)
ax.scatter(df_tSNE.loc['tSNE1'].iloc[index[i]],df_tSNE.loc['tSNE2'].iloc[index[i]],c=[cluster_colors[k] for k in np.array(labels)[index[i]]],s=s,lw=0)
ax.axis('equal')
ax.set_axis_off()
if save == True:
plt.savefig('{}.eps'.format(name+str(color)), dpi=600,format='eps')
def gen_labels(df, model):
if str(type(model)).startswith("<class 'sklearn.cluster"):
cell_labels = dict(zip(df.columns, model.labels_))
label_cells = {}
for l in np.unique(model.labels_):
label_cells[l] = []
for i, label in enumerate(model.labels_):
label_cells[label].append(df.columns[i])
cellID = list(df.columns)
labels = list(model.labels_)
labels_a = model.labels_
elif type(model) == np.ndarray:
cell_labels = dict(zip(df.columns, model))
label_cells = {}
for l in np.unique(model):
label_cells[l] = []
for i, label in enumerate(model):
label_cells[label].append(df.columns[i])
cellID = list(df.columns)
labels = list(model)
labels_a = model
else:
print('Error wrong input type')
return cell_labels, label_cells, cellID, labels, labels_a
def heatmap(correlation_recluster_cell_final,choose_seriestype1,choose_seriestype2,save=False,name=''):
df=pd.DataFrame(correlation_recluster_cell_final)
labels1=np.array(choose_seriestype1)
labels2=np.array(choose_seriestype2)
cell_labels1,label_cells1,cellID1,labels1,labels_a1=gen_labels(df.T,np.array(labels1))
cell_labels2,label_cells2,cellID2,labels2,labels_a2=gen_labels(df,np.array(labels2))
optimal_order=np.unique(np.concatenate([labels1,labels2]))
cl,lc=gen_labels(df,np.array(labels2))[:2]
optimal_sort_cells=sum([lc[i] for i in np.unique(labels2)],[])
optimal_sort_labels=[cl[i] for i in optimal_sort_cells]
fig,axHM=plt.subplots(figsize=(9,5))
df_full=df.copy()
z=df_full.values
z=pd.DataFrame(z, index=df_full.index,columns=df_full.columns)
z=z.loc[:,optimal_sort_cells].values
im=axHM.pcolormesh(z,cmap='viridis',vmax=1)
plt.gca().invert_yaxis()
plt.xlim(xmax=len(labels2))
plt.xticks([])
plt.yticks([])
divider=make_axes_locatable(axHM)
axLabel1=divider.append_axes("top",.3,pad=0,sharex=axHM)
axLabel2=divider.append_axes("left",.3,pad=0,sharex=axHM)
counter2=Counter(labels2)
counter1=Counter(labels1)
pos2=0
pos1=0
for l in optimal_order:
axLabel1.barh(y=0,left=pos2,width=counter2[l],color=cluster_colors[l],linewidth=0.5,edgecolor=cluster_colors[l])
pos2+=counter2[l]
optimal_order=np.flipud(optimal_order)
for l in optimal_order:
axLabel2.bar(x=0,bottom=pos1,height=counter1[l],color=cluster_colors[l],linewidth=50,edgecolor=cluster_colors[l])
pos1+=counter1[l]
axLabel1.set_xlim(xmax=len(labels2))
axLabel1.axis('off')
axLabel2.set_ylim(ymax=len(labels1))
axLabel2.axis('off')
cax=fig.add_axes([.91,0.13,0.01,0.22])
colorbar=fig.colorbar(im,cax=cax,ticks=[0,1])
colorbar.set_ticklabels(['0','max'])
plt.savefig('{}.jpg'.format(name),dpi=600,format='jpg') | 7,512 | 33.782407 | 158 | py |
CBA | CBA-main/species/species_main.py | """
Created on Fri Mar 27 18:58:59 2020
@author: 17b90
"""
import kBET
import scipy
import random
import keras as K
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sns
import scipy.io as sio
import tensorflow as tf
from keras import layers
from ywb_function import *
import sklearn.metrics as sm
from collections import Counter
import matplotlib.pyplot as plt
from keras.regularizers import l2
from sklearn import preprocessing
from keras.layers.core import Lambda
from keras.callbacks import TensorBoard
from imblearn.over_sampling import SMOTE,ADASYN
from keras.callbacks import LearningRateScheduler
from sklearn.cluster import AgglomerativeClustering
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import RandomOverSampler
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.cluster.hierarchy import dendrogram, linkage
#input the data
H_acc=sc.read_mtx('GSE127774_ACC_H_matrix.mtx')
C_acc=sc.read_mtx('GSE127774_ACC_C_matrix.mtx')
H_acc_data=scipy.sparse.csr_matrix(H_acc.X, dtype=np.int8).toarray()
C_acc_data=scipy.sparse.csr_matrix(C_acc.X, dtype=np.int8).toarray()
H_acc_gene=pd.read_csv('GSE127774_ACC_H_genes.csv', header=None)
H_acc_data=pd.DataFrame(data=H_acc_data, index=H_acc_gene[0].values).astype(float)
C_acc_gene=pd.read_csv('GSE127774_ACC_C_genes.csv', header=None)
C_acc_data=pd.DataFrame(data=C_acc_data, index=C_acc_gene[0].values).astype(float)
human_chimpanzee_genecouple=pd.read_csv('human_chimpanzee.csv', header=None)
row=[]
for i in range(human_chimpanzee_genecouple.shape[0]):
if (human_chimpanzee_genecouple.values==human_chimpanzee_genecouple.loc[i][0]).sum()>=2 or (human_chimpanzee_genecouple.values==human_chimpanzee_genecouple.loc[i][1]).sum()>=2:
human_chimpanzee_genecouple.loc[i][0]='0'
human_chimpanzee_genecouple.loc[i][1]='0'
row.append(i)
human_chimpanzee_genecouple_new=human_chimpanzee_genecouple.drop(row)
human_chimpanzee_genecouple_new=pd.DataFrame(human_chimpanzee_genecouple_new.values)
series1=human_expressionlevel
series2=chimpanzee_expressionlevel
gene_couple=human_chimpanzee_genecouple_new
series1_gene=gene_couple[0][1:].values
series2_gene=gene_couple[1][1:].values
#to remove genes which only exist in single species
series1_gene='hg38____'+series1_gene
series2_gene='panTro5_'+series2_gene
series1_gene=list(series1_gene)
series2_gene=list(series2_gene)
for i in range(len(series1_gene)):
if series1_gene[i] not in list(series1.index) or series2_gene[i] not in list(series2.index):
series1_gene[i]=0
series2_gene[i]=0
series1_gene=list(filter(lambda x:x!=0,series1_gene))
series2_gene=list(filter(lambda x:x!=0,series2_gene))
#only choose these genes
series1_choose=series1.loc[series1_gene]
series2_choose=series2.loc[series2_gene]
series1_ann=sc.AnnData((series1_choose.values).T,obs=pd.DataFrame(series1_choose.columns), var=pd.DataFrame(series1_choose.index))
series2_ann=sc.AnnData((series2_choose.values).T,obs=pd.DataFrame(series2_choose.columns), var=pd.DataFrame(series2_choose.index))
RAWseries1=series1_ann.X.T
RAWseries2=series2_ann.X.T
fromname='humanchimpanzee'
pca_dim=20#the number of PCs
clusternumber=1
###############################################################################
anndata1=sc.AnnData(RAWseries1.T)
celluse=np.arange(0,anndata1.shape[0])
anndata1.obs['usecell']=celluse
sc.pp.filter_cells(anndata1,min_genes=20)#we cant to select some human cells, because my laptop is not good, so many cells are hard for it to do the training, moreover, the memory is also not enough
anndata2=sc.AnnData(RAWseries2.T)
celluse=np.arange(0,anndata2.shape[0])
anndata2.obs['usecell']=celluse
sc.pp.filter_cells(anndata2,min_genes=20)
anndata=anndata1.concatenate(anndata2)
sc.pp.filter_genes(anndata,min_cells=50)
sc.pp.normalize_per_cell(anndata,counts_per_cell_after=1e4)
sc.pp.log1p(anndata)
sc.pp.highly_variable_genes(anndata)
sc.pl.highly_variable_genes(anndata)
anndata=anndata[:,anndata.var['highly_variable']]
sc.tl.pca(anndata,n_comps=pca_dim)
Obtainseries1=(anndata.obsm['X_pca'])[anndata.obs['batch']=='0',:]
Obtainseries2=(anndata.obsm['X_pca'])[anndata.obs['batch']=='1',:]
Obtainseries1=sc.AnnData(Obtainseries1)
Obtainseries2=sc.AnnData(Obtainseries2)
sc.pp.neighbors(Obtainseries1,n_pcs=0)
sc.tl.umap(Obtainseries1)
sc.tl.louvain(Obtainseries1,resolution=1)
sc.pl.umap(Obtainseries1,color='louvain',size=30)
sc.pp.neighbors(Obtainseries2,n_pcs=0)
sc.tl.umap(Obtainseries2)
sc.tl.louvain(Obtainseries2,resolution=1)
sc.pl.umap(Obtainseries2,color='louvain',size=30)
PCAseries1=Obtainseries1.X
PCAseries2=Obtainseries2.X
###############################################################################
recluster1=np.array(list(map(int,Obtainseries1.obs['louvain'])))
recluster2=np.array(list(map(int,Obtainseries2.obs['louvain'])))
###############################################################################
#for i in range(len(np.unique(recluster1))):
# print((np.where(recluster1==i))[0].shape[0])
#for i in range(len(np.unique(recluster2))):
# print((np.where(recluster2==i))[0].shape[0])
#
##for the first batch
#number_cluster1=len(np.unique(recluster1))
#series1_data=np.zeros([0,PCAseries1.shape[1]])
#series1_index=np.zeros([0])
#recluster1plus=np.zeros([0])
#alpha=3#because the limiattion of memory of my laptop, I have to retain 1/3 human cells,so I preserve 1/3 human cells in each louvain cluster, this step is also unsupervised
#for i in range(number_cluster1):
# index=np.where(recluster1==i)[0]
# random.shuffle(index)
# series1_data=np.concatenate([series1_data,(PCAseries1)[index[0::alpha]]])
# series1_index=np.concatenate([series1_index,index[0::alpha]])
# recluster1plus=np.concatenate([recluster1plus,np.zeros([index[0::alpha].shape[0]])+i])
#
##for the second batch
#number_cluster2=len(np.unique(recluster2))
#series2_data=np.zeros([0,PCAseries2.shape[1]])
#series2_index=np.zeros([0])
#recluster2plus=np.zeros([0])
#beta=1#fortunately, we could retain all chimp cells!!!!!
#for i in range(number_cluster2):
# index=np.where(recluster2==i)[0]
# random.shuffle(index)
# series2_data=np.concatenate([series2_data,(PCAseries2)[index[0::beta]]])
# series2_index=np.concatenate([series2_index,index[0::beta]])
# recluster2plus=np.concatenate([recluster2plus,np.zeros([index[0::beta].shape[0]])+i])
#
#sio.savemat('series1_index.mat',{'series1_index':series1_index})
#sio.savemat('series2_index.mat',{'series2_index':series2_index})
#this is the indexes of cells I used
series1_index=sio.loadmat('series1_index.mat')['series1_index'][0].astype('int')
series2_index=sio.loadmat('series2_index.mat')['series2_index'][0].astype('int')
PCAseries1=PCAseries1[series1_index]
PCAseries2=PCAseries2[series2_index]
recluster1=recluster1[series1_index]
recluster2=recluster2[series2_index]
recluster1=recluster1.astype('int')
recluster2=recluster2.astype('int')
print(recluster1.shape[0])
print(recluster2.shape[0])
###############################################################################
def dis(P,Q,distance_method):
if distance_method==0:
return np.sqrt(np.sum(np.square(P-Q)))
if distance_method==1:
return 1-(np.multiply(P,Q).sum()/(np.sqrt(np.sum(np.square(P)))*np.sqrt(np.sum(np.square(Q)))))
###############################################################################
change=0
if len(np.unique(recluster1))<=len(np.unique(recluster2)):
PCAseries1plus=PCAseries2
PCAseries2plus=PCAseries1
recluster1plus=recluster2
recluster2plus=recluster1
change=1
else:
PCAseries1plus=PCAseries1
PCAseries2plus=PCAseries2
recluster1plus=recluster1
recluster2plus=recluster2
###############################################################################
#ok, let's calculate the similarity of cells/clusters
correlation_recluster=np.zeros([len(np.unique(recluster1plus)),len(np.unique(recluster2plus))])
correlation_recluster_cell=np.zeros([recluster1plus.shape[0],recluster2plus.shape[0]])
for i in range(len(np.unique(recluster1plus))):
for j in range(len(np.unique(recluster2plus))):
print(i,j)
index_series1=np.where(recluster1plus==i)[0]
index_series2=np.where(recluster2plus==j)[0]
cell_series1=PCAseries1plus[index_series1,:]
cell_series2=PCAseries2plus[index_series2,:]
mean1=0
for iq in range(cell_series1.shape[0]):
for jq in range(cell_series2.shape[0]):
mean1+=dis(cell_series1[iq,:],cell_series2[jq,:],1)
correlation_recluster[i,j]=mean1/(cell_series1.shape[0]*cell_series2.shape[0])
for ii in range(cell_series1.shape[0]):
for jj in range(cell_series2.shape[0]):
mean2=dis(cell_series1[ii,:],cell_series2[jj,:],0)
correlation_recluster_cell[index_series1[ii],index_series2[jj]]=mean2+0.00001
plt.imshow(correlation_recluster)
plt.imshow(correlation_recluster_cell)
correlation_recluster_div=-np.log10(correlation_recluster)
correlation_recluster_cell_div=-np.log10(correlation_recluster_cell)
correlation_recluster_norm=(correlation_recluster_div-correlation_recluster_div.min())/(correlation_recluster_div.max()-correlation_recluster_div.min())
correlation_recluster_cell_norm=(correlation_recluster_cell_div-correlation_recluster_cell_div.min())/(correlation_recluster_cell_div.max()-correlation_recluster_cell_div.min())
plt.imshow(correlation_recluster_norm)
plt.imshow(correlation_recluster_cell_norm)
###############################################################################
#remove bad parts, do the matching
correlation_recluster_select=np.zeros(correlation_recluster_norm.shape)
recluster_mid=np.zeros(recluster1plus.shape)
for kk in range(correlation_recluster_norm.shape[0]):
ind=np.sort(correlation_recluster_norm[kk,:])
select=correlation_recluster_norm[kk,:]<ind[-clusternumber]
select=(select==False)
recluster_mid[recluster1plus==kk]+=int(np.where(select==True)[0])
correlation_recluster_select[kk,:]=correlation_recluster_norm[kk,:]*select
plt.imshow(correlation_recluster_select)
correlation_recluster_cell_final=correlation_recluster_cell*0
for i in range(correlation_recluster_cell_norm.shape[0]):
for j in range(correlation_recluster_cell_norm.shape[1]):
label1=recluster1plus[i]
label2=recluster2plus[j]
mean1=correlation_recluster_select[label1,label2]
mean2=correlation_recluster_cell_norm[i,j]
if mean1==0:
correlation_recluster_cell_final[i,j]=0
else:
correlation_recluster_cell_final[i,j]=mean2
plt.imshow(correlation_recluster_select)
plt.imshow(correlation_recluster_cell_final)
recluster1plus=recluster_mid.astype('int')
np.unique(recluster1plus)
np.unique(recluster2plus)
sort_correlation_recluster_cell_final=correlation_recluster_cell_final[recluster1plus.argsort(),:]
sort_correlation_recluster_cell_final=sort_correlation_recluster_cell_final[:,recluster2plus.argsort()]
heatmap(sort_correlation_recluster_cell_final,recluster1plus,recluster2plus,save=True,name='speciesmatrix')
###############################################################################
if change==1:
PCAseries1=PCAseries2plus
PCAseries2=PCAseries1plus
recluster1=recluster2plus
recluster2=recluster1plus
else:
PCAseries1=PCAseries1plus
PCAseries2=PCAseries2plus
recluster1=recluster1plus
recluster2=recluster2plus
###############################################################################
Obtainseries1plus=sc.AnnData(PCAseries1)
Obtainseries2plus=sc.AnnData(PCAseries2)
sc.pp.neighbors(Obtainseries1plus,n_pcs=0)
sc.tl.umap(Obtainseries1plus)
df=pd.DataFrame(recluster1.astype('int'))
df=pd.Series(np.reshape(df.values,df.values.shape[0]), dtype="category")
Obtainseries1plus.obs['louvain']=df.values
sc.pl.umap(Obtainseries1plus,color='louvain',size=30)
umapdata1=pd.DataFrame(Obtainseries1plus.obsm['X_umap'].T,
index=['tSNE1','tSNE2'])
plot_tSNE_clusters(umapdata1,Obtainseries1plus.obs['louvain'],cluster_colors=cluster_colors,save=False, name=fromname+'louvain')
sc.pp.neighbors(Obtainseries2plus,n_pcs=0)
sc.tl.umap(Obtainseries2plus)
df=pd.DataFrame(recluster2.astype('int'))
df=pd.Series(np.reshape(df.values,df.values.shape[0]), dtype="category")
Obtainseries2plus.obs['louvain']=df.values
sc.pl.umap(Obtainseries2plus,color='louvain',size=30)
umapdata2=pd.DataFrame(Obtainseries2plus.obsm['X_umap'].T,
index=['tSNE1','tSNE2'])
plot_tSNE_clusters(umapdata2,Obtainseries2plus.obs['louvain'],cluster_colors=cluster_colors,save=False, name=fromname+'louvain')
###############################################################################
#ok, I use keras, cells in each input are randomly selected, I don't know how to match cells with their similarity
#I also don't know how to match the cell part with their distance, so I design the following inputs
#It will waste some time, it's not easy and unclear for readers, but it works!
PCAseries=np.concatenate([PCAseries1,PCAseries2])
PCAseries=preprocessing.StandardScaler().fit_transform(PCAseries)
PCAseries=preprocessing.MinMaxScaler().fit_transform(PCAseries)
PCAseries1=PCAseries[0:PCAseries1.shape[0]]
PCAseries2=PCAseries[PCAseries1.shape[0]:]
x_input1=np.zeros([PCAseries1.shape[0],PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]+max(recluster1.max(),recluster2.max())+1])
x_input2=np.zeros([PCAseries2.shape[0],PCAseries2.shape[1]+PCAseries2.shape[0]+PCAseries1.shape[0]+max(recluster1.max(),recluster2.max())+1])
for i in range(PCAseries1.shape[0]):
print(i)
x_input1[i,0:PCAseries1.shape[1]]=PCAseries1[i,:]
x_input1[i,PCAseries1.shape[1]:PCAseries1.shape[1]+PCAseries1.shape[0]]=K.utils.np_utils.to_categorical(i,PCAseries1.shape[0])
x_input1[i,PCAseries1.shape[1]+PCAseries1.shape[0]:PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]]=correlation_recluster_cell_final[i,:]
x_input1[i,PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]:]=K.utils.np_utils.to_categorical(recluster1[i],max(recluster1.max(),recluster2.max())+1)
for j in range(PCAseries2.shape[0]):
print(j)
x_input2[j,0:PCAseries2.shape[1]]=PCAseries2[j,:]
x_input2[j,PCAseries2.shape[1]:PCAseries2.shape[1]+PCAseries2.shape[0]]=K.utils.np_utils.to_categorical(j,PCAseries2.shape[0])
x_input2[j,PCAseries2.shape[1]+PCAseries2.shape[0]:PCAseries2.shape[1]+PCAseries2.shape[0]+PCAseries1.shape[0]]=correlation_recluster_cell_final[:,j]
x_input2[j,PCAseries2.shape[1]+PCAseries2.shape[0]+PCAseries1.shape[0]:]=K.utils.np_utils.to_categorical(recluster2[j],max(recluster1.max(),recluster2.max())+1)
###############################################################################
#interesting, I need to make two batches have the same number of cells, so I have to copy cells again and again
if x_input1.shape[0]>=x_input2.shape[0]:
x_test1=x_input1
y_test1=recluster1
y_testreal1=choose_seriestype1
repeat_num=int(np.ceil(x_input1.shape[0]/x_input2.shape[0]))
x_test2=np.tile(x_input2,(repeat_num,1))
y_test2=np.tile(recluster2,repeat_num)
y_testreal2=np.tile(choose_seriestype2,repeat_num)
x_test2=x_test2[0:x_test1.shape[0],:]
y_test2=y_test2[0:x_test1.shape[0]]
y_testreal2=y_testreal2[0:x_test1.shape[0]]
elif x_input1.shape[0]<x_input2.shape[0]:
x_test2=x_input2
y_test2=recluster2
y_testreal2=choose_seriestype2
repeat_num=int(np.ceil(x_input2.shape[0]/x_input1.shape[0]))
x_test1=np.tile(x_input1,(repeat_num,1))
y_test1=np.tile(recluster1,repeat_num)
y_testreal1=np.tile(choose_seriestype1,repeat_num)
x_test1=x_test1[0:x_test2.shape[0],:]
y_test1=y_test1[0:x_test2.shape[0]]
y_testreal1=y_testreal1[0:x_test2.shape[0]]
###############################################################################
def choose_info(x,info_number):
return x[:,0:info_number]
def choose_index(x,info_number,x_samplenumber):
return x[:,info_number:info_number+x_samplenumber]
def choose_corrlation(x,info_number,x_samplenumber,cor_number):
return x[:,info_number+x_samplenumber:info_number+x_samplenumber+cor_number]
def choose_relabel(x,info_number,x_samplenumber,cor_number):
return x[:,info_number+x_samplenumber+cor_number:]
def slic(input_):
return input_[:,0]
###############################################################################
activation='relu'
info_number=PCAseries1.shape[1]
layer=PCAseries1.shape[1]
input1=K.Input(shape=(x_test1.shape[1],))#line1 species1
input2=K.Input(shape=(x_test2.shape[1],))#line1 species2
input3=K.Input(shape=(x_test1.shape[1],))#line2 species1
input4=K.Input(shape=(x_test2.shape[1],))#line2 species2
Data1=Lambda(choose_info,arguments={'info_number':info_number})(input1)
Data2=Lambda(choose_info,arguments={'info_number':info_number})(input2)
Data3=Lambda(choose_info,arguments={'info_number':info_number})(input3)
Data4=Lambda(choose_info,arguments={'info_number':info_number})(input4)
Index1=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0]})(input1)
Index2=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0]})(input2)
Index3=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0]})(input3)
Index4=Lambda(choose_index,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0]})(input4)
Cor1=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input1)
Cor2=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input2)
Cor3=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input3)
Cor4=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input4)
Relabel1=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input1)
Relabel2=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input2)
Relabel3=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input3)
Relabel4=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input4)
x_concat1=layers.concatenate([Data1,Data3])#batch1
x_concat2=layers.concatenate([Data2,Data4])#batch2
x1=layers.Dense(layer,activation=activation)(Data1)
x2=layers.Dense(layer,activation=activation)(Data2)
x3=layers.Dense(layer,activation=activation)(Data3)
x4=layers.Dense(layer,activation=activation)(Data4)
x1=layers.BatchNormalization()(x1)
x2=layers.BatchNormalization()(x2)
x3=layers.BatchNormalization()(x3)
x4=layers.BatchNormalization()(x4)
x1_mid1=layers.Dense(layer,activation=activation)(layers.concatenate([x1,x2]))
x2_mid1=layers.Dense(layer,activation=activation)(layers.concatenate([x1,x2]))
x1_mid2=layers.Dense(layer,activation=activation)(layers.concatenate([x3,x4]))
x2_mid2=layers.Dense(layer,activation=activation)(layers.concatenate([x3,x4]))
x1_mid1=layers.BatchNormalization()(x1_mid1)
x2_mid1=layers.BatchNormalization()(x2_mid1)
x1_mid2=layers.BatchNormalization()(x1_mid2)
x2_mid2=layers.BatchNormalization()(x2_mid2)
layer_classify=layers.Dense(max(recluster1.max(),recluster2.max())+1,activation='relu')
y1=layer_classify(x1_mid1)
y2=layer_classify(x2_mid1)
y3=layer_classify(x1_mid2)
y4=layer_classify(x2_mid2)
x1=layers.concatenate([x1_mid1,x1_mid2])#batch1
x2=layers.concatenate([x2_mid1,x2_mid2])#batch2
output1=layers.Dense(2*layer,activation=activation)(x1)
output2=layers.Dense(2*layer,activation=activation)(x2)
output1=layers.BatchNormalization()(output1)
output2=layers.BatchNormalization()(output2)
def loss_weight(input_):
return tf.reduce_sum(tf.multiply(input_[0],input_[1]),axis=-1)
def MSE(input_):
return tf.reduce_mean(tf.square(input_[0]-input_[1]),axis=-1)
def multi_classification_loss(input_):
return tf.keras.losses.categorical_crossentropy(input_[0],input_[1])
AE_loss_1=Lambda(MSE)([output1,x_concat1])
AE_loss_2=Lambda(MSE)([output2,x_concat2])
cls_loss_1=Lambda(MSE)([y1,Relabel1])
cls_loss_2=Lambda(MSE)([y2,Relabel2])
cls_loss_3=Lambda(MSE)([y3,Relabel3])
cls_loss_4=Lambda(MSE)([y4,Relabel4])
interweight1=Lambda(loss_weight)([Index1,Cor2])
interweight4=Lambda(loss_weight)([Index3,Cor4])
interloss_1=Lambda(MSE)([x1_mid1,x2_mid1])
interloss_4=Lambda(MSE)([x1_mid2,x2_mid2])
interloss_1=layers.Multiply()([interweight1,interloss_1])
interloss_4=layers.Multiply()([interweight4,interloss_4])
intraweight1=Lambda(loss_weight)([Relabel1,Relabel3])
intraweight2=Lambda(loss_weight)([Relabel2,Relabel4])
intraloss_1=Lambda(MSE)([x1_mid1,x1_mid2])
intraloss_2=Lambda(MSE)([x2_mid1,x2_mid2])
intraloss_1=layers.Multiply()([intraweight1,intraloss_1])
intraloss_2=layers.Multiply()([intraweight2,intraloss_2])
Loss1=Lambda(lambda x:(x[0]*1+x[1]*1)/2,name='loss1')([AE_loss_1,AE_loss_2])
Loss2=Lambda(lambda x:(x[0]*1+x[1]*1+x[2]*1+x[3]*1)/4,name='loss2')([cls_loss_1,cls_loss_2,cls_loss_3,cls_loss_4])
Loss3=Lambda(lambda x:(x[0]*1+x[1]*1)/2,name='loss3')([interloss_1,interloss_4])
Loss4=Lambda(lambda x:(x[0]*1+x[1]*1)/2,name='loss4')([intraloss_1,intraloss_2])
###############################################################################
network_train=K.models.Model([input1,input2,input3,input4],
[Loss1,Loss2,Loss3,Loss4])
network_train.summary()
###############################################################################
intra_data1={}
inter_data1={}
for i in range(x_test1.shape[0]):
label_i=y_test1[i]
intra_data1[i]=np.where(y_test1==label_i)
inter_data1[i]=np.where(y_test1!=label_i)
intra_data2={}
inter_data2={}
for i in range(x_test2.shape[0]):
label_i=y_test2[i]
intra_data2[i]=np.where(y_test2==label_i)
inter_data2[i]=np.where(y_test2!=label_i)
###############################################################################
batch_size=512
train_loss=[]
loss1=[]
loss2=[]
loss3=[]
loss4=[]
###############################################################################
iterations=1
lr=5e-3
optimizer=K.optimizers.Adam(lr=lr)
loss_weights=[1,1,1,1]
network_train.compile(optimizer=optimizer,
loss=[lambda y_true,y_pred: y_pred,
lambda y_true,y_pred: y_pred,
lambda y_true,y_pred: y_pred,
lambda y_true,y_pred: y_pred],
loss_weights=loss_weights)
for i in range(iterations):
x_input1_series1_train=np.zeros(x_test1.shape)
index0=np.zeros(x_input1_series1_train.shape[0])
x_input1_series2_train=np.zeros(x_test2.shape)
index1=np.zeros(x_input1_series2_train.shape[0])
x_input2_series1_train=np.zeros(x_test1.shape)
index2=np.zeros(x_input2_series1_train.shape[0])
x_input2_series2_train=np.zeros(x_test2.shape)
index3=np.zeros(x_input2_series2_train.shape[0])
for ii in range(x_test1.shape[0]):
index0[ii]=random.choice(range(x_test1.shape[0]))
rand1=random.random()
in_rand1=np.where(x_test1[ii,:][PCAseries1.shape[1]+PCAseries1.shape[0]:PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]]>0)[0]
out_rand1=np.where(x_test1[ii,:][PCAseries1.shape[1]+PCAseries1.shape[0]:PCAseries1.shape[1]+PCAseries1.shape[0]+PCAseries2.shape[0]]<=0)[0]
if rand1>=0.5:
index1[ii]=random.choice(in_rand1)
elif rand1<0.5:
index1[ii]=random.choice(out_rand1)
rand2=random.random()
if rand2>=0.5:
index2[ii]=random.choice(intra_data1[index0[ii]][0])
elif rand2<0.5:
index2[ii]=random.choice(inter_data1[index0[ii]][0])
rand3=random.random()
if rand3>=0.5:
index3[ii]=random.choice(intra_data2[index1[ii]][0])
elif rand3<0.5:
index3[ii]=random.choice(inter_data2[index1[ii]][0])
train1=x_test1[index0.astype('int'),:]
train2=x_test2[index1.astype('int'),:]
train3=x_test1[index2.astype('int'),:]
train4=x_test2[index3.astype('int'),:]
Train=network_train.fit([train1,train2,train3,train4],
[np.zeros([train1.shape[0],1]),
np.zeros([train1.shape[0],1]),
np.zeros([train1.shape[0],1]),
np.zeros([train1.shape[0],1])],
batch_size=batch_size,shuffle=True)
train_loss.append(Train.history['loss'][:][0])
loss1.append(Train.history['loss1_loss'][:][0]*loss_weights[0])
loss2.append(Train.history['loss2_loss'][:][0]*loss_weights[1])
loss3.append(Train.history['loss3_loss'][:][0]*loss_weights[2])
loss4.append(Train.history['loss4_loss'][:][0]*loss_weights[3])
print(i,'loss=',
Train.history['loss'][:][0],
Train.history['loss1_loss'][:][0]*loss_weights[0],
Train.history['loss2_loss'][:][0]*loss_weights[1],
Train.history['loss3_loss'][:][0]*loss_weights[2],
Train.history['loss4_loss'][:][0]*loss_weights[3])
if i>10:
plt.plot(train_loss[:])
plt.plot(loss1[:])
plt.plot(loss2[:])
plt.plot(loss3[:])
plt.plot(loss4[:])
plt.title('Model loss')
plt.ylabel('Loss')
plt.ylim(0,max(max(train_loss[len(train_loss)-10:],loss1[len(train_loss)-10:],
loss2[len(train_loss)-10:],loss3[len(train_loss)-10:],
loss4[len(train_loss)-10:])))
plt.xlim(len(train_loss)-10-10,len(train_loss))
plt.xlabel('Epoch')
plt.legend(['Train','loss1','loss2','loss3','loss4'],loc='upper left')
plt.show()
plt.plot(train_loss[:])
plt.plot(loss1[:])
plt.plot(loss2[:])
plt.plot(loss3[:])
plt.plot(loss4[:])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train','loss1','loss2','loss3','loss4'],loc='upper left')
plt.show()
else:
plt.plot(train_loss[10:])
plt.plot(loss1[10:])
plt.plot(loss2[10:])
plt.plot(loss3[10:])
plt.plot(loss4[10:])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train','loss1','loss2','loss3','loss4'],loc='upper left')
plt.show()
###############################################################################
network_train.load_weights('speciesAC.h5')
network_predict=K.models.Model([input1,input2,input3,input4],
[x1_mid1,x2_mid1,x1_mid2,x2_mid2])
[low_dim1,low_dim2,low_dim3,low_dim4]=network_predict.predict([x_test1,x_test2,x_test1,x_test2])
low_dim1=low_dim1[0:x_input1.shape[0]]
low_dim2=low_dim2[0:x_input2.shape[0]]
low_dim3=low_dim3[0:x_input1.shape[0]]
low_dim4=low_dim4[0:x_input2.shape[0]]
y_recluster_no1=recluster1[0:x_input1.shape[0]]
y_recluster_no2=recluster2[0:x_input2.shape[0]]
###############################################################################
total_recluster_type=np.concatenate([y_recluster_no1,y_recluster_no2])
###############################################################################
series1=sc.AnnData(low_dim1)
series2=sc.AnnData(low_dim2)
mergedata=series1.concatenate(series2)
mergedata.obsm['NN']=mergedata.X
sc.pp.neighbors(mergedata,n_pcs=0)
sc.tl.louvain(mergedata)
sc.tl.leiden(mergedata)
sc.tl.umap(mergedata)
sc.pl.umap(mergedata,color='louvain',size=30)
sc.pl.umap(mergedata,color='leiden',size=30)
sc.pl.umap(mergedata,color='batch',size=30)
type_louvain=mergedata.obs['louvain']
type_leiden=mergedata.obs['leiden']
type_batch=mergedata.obs['batch']
###############################################################################
umapdata=pd.DataFrame(mergedata.obsm['X_umap'].T,index=['tSNE1','tSNE2'])
umapdata1=pd.DataFrame(mergedata.obsm['X_umap'][0:PCAseries1.shape[0],:].T,index=['tSNE1','tSNE2'])
umapdata2=pd.DataFrame(mergedata.obsm['X_umap'][PCAseries1.shape[0]:,:].T,index=['tSNE1','tSNE2'])
###############################################################################
fromname='一次审核之后的结果/figure/speciesCBA_'
plot_tSNE_sepclusters(umapdata1,umapdata2,y_recluster_noSMOTE1*0,y_recluster_noSMOTE2*0+1,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'label1')
plot_tSNE_sepclusters(umapdata2,umapdata1,y_recluster_noSMOTE2*0+1,y_recluster_noSMOTE1*0,s=6,cluster_colors=cluster_colors,save=False,name=fromname+'label2')
plot_tSNE_clusters(umapdata,list(map(int,np.concatenate([y_recluster_noSMOTE1*0,y_recluster_noSMOTE2*0+1]))),cluster_colors=cluster_colors,save=False, name=fromname+'batch')
plot_tSNE_clusters(umapdata,list(map(int,type_louvain)), cluster_colors=cluster_colors,save=False, name=fromname+'louvain') | 29,319 | 43.969325 | 198 | py |
ColBERT | ColBERT-master/colbert/parameters.py | import torch
DEVICE = torch.device("cuda")
SAVED_CHECKPOINTS = [32*1000, 100*1000, 150*1000, 200*1000, 300*1000, 400*1000]
SAVED_CHECKPOINTS += [10*1000, 20*1000, 30*1000, 40*1000, 50*1000, 60*1000, 70*1000, 80*1000, 90*1000]
SAVED_CHECKPOINTS += [25*1000, 50*1000, 75*1000]
SAVED_CHECKPOINTS = set(SAVED_CHECKPOINTS)
| 321 | 31.2 | 102 | py |
ColBERT | ColBERT-master/colbert/train.py | import os
import random
import torch
import copy
import colbert.utils.distributed as distributed
from colbert.utils.parser import Arguments
from colbert.utils.runs import Run
from colbert.training.training import train
def main():
parser = Arguments(description='Training ColBERT with <query, positive passage, negative passage> triples.')
parser.add_model_parameters()
parser.add_model_training_parameters()
parser.add_training_input()
args = parser.parse()
assert args.bsize % args.accumsteps == 0, ((args.bsize, args.accumsteps),
"The batch size must be divisible by the number of gradient accumulation steps.")
assert args.query_maxlen <= 512
assert args.doc_maxlen <= 512
args.lazy = args.collection is not None
with Run.context(consider_failed_if_interrupted=False):
train(args)
if __name__ == "__main__":
main()
| 929 | 25.571429 | 128 | py |
ColBERT | ColBERT-master/colbert/evaluation/loaders.py | import os
import ujson
import torch
import random
from collections import defaultdict, OrderedDict
from colbert.parameters import DEVICE
from colbert.modeling.colbert import ColBERT
from colbert.utils.utils import print_message, load_checkpoint
from colbert.evaluation.load_model import load_model
from colbert.utils.runs import Run
def load_queries(queries_path):
queries = OrderedDict()
print_message("#> Loading the queries from", queries_path, "...")
with open(queries_path) as f:
for line in f:
qid, query, *_ = line.strip().split('\t')
qid = int(qid)
assert (qid not in queries), ("Query QID", qid, "is repeated!")
queries[qid] = query
print_message("#> Got", len(queries), "queries. All QIDs are unique.\n")
return queries
def load_qrels(qrels_path):
if qrels_path is None:
return None
print_message("#> Loading qrels from", qrels_path, "...")
qrels = OrderedDict()
with open(qrels_path, mode='r', encoding="utf-8") as f:
for line in f:
qid, x, pid, y = map(int, line.strip().split('\t'))
assert x == 0 and y == 1
qrels[qid] = qrels.get(qid, [])
qrels[qid].append(pid)
assert all(len(qrels[qid]) == len(set(qrels[qid])) for qid in qrels)
avg_positive = round(sum(len(qrels[qid]) for qid in qrels) / len(qrels), 2)
print_message("#> Loaded qrels for", len(qrels), "unique queries with",
avg_positive, "positives per query on average.\n")
return qrels
def load_topK(topK_path):
queries = OrderedDict()
topK_docs = OrderedDict()
topK_pids = OrderedDict()
print_message("#> Loading the top-k per query from", topK_path, "...")
with open(topK_path) as f:
for line_idx, line in enumerate(f):
if line_idx and line_idx % (10*1000*1000) == 0:
print(line_idx, end=' ', flush=True)
qid, pid, query, passage = line.split('\t')
qid, pid = int(qid), int(pid)
assert (qid not in queries) or (queries[qid] == query)
queries[qid] = query
topK_docs[qid] = topK_docs.get(qid, [])
topK_docs[qid].append(passage)
topK_pids[qid] = topK_pids.get(qid, [])
topK_pids[qid].append(pid)
print()
assert all(len(topK_pids[qid]) == len(set(topK_pids[qid])) for qid in topK_pids)
Ks = [len(topK_pids[qid]) for qid in topK_pids]
print_message("#> max(Ks) =", max(Ks), ", avg(Ks) =", round(sum(Ks) / len(Ks), 2))
print_message("#> Loaded the top-k per query for", len(queries), "unique queries.\n")
return queries, topK_docs, topK_pids
def load_topK_pids(topK_path, qrels):
topK_pids = defaultdict(list)
topK_positives = defaultdict(list)
print_message("#> Loading the top-k PIDs per query from", topK_path, "...")
with open(topK_path) as f:
for line_idx, line in enumerate(f):
if line_idx and line_idx % (10*1000*1000) == 0:
print(line_idx, end=' ', flush=True)
qid, pid, *rest = line.strip().split('\t')
qid, pid = int(qid), int(pid)
topK_pids[qid].append(pid)
assert len(rest) in [1, 2, 3]
if len(rest) > 1:
*_, label = rest
label = int(label)
assert label in [0, 1]
if label >= 1:
topK_positives[qid].append(pid)
print()
assert all(len(topK_pids[qid]) == len(set(topK_pids[qid])) for qid in topK_pids)
assert all(len(topK_positives[qid]) == len(set(topK_positives[qid])) for qid in topK_positives)
# Make them sets for fast lookups later
topK_positives = {qid: set(topK_positives[qid]) for qid in topK_positives}
Ks = [len(topK_pids[qid]) for qid in topK_pids]
print_message("#> max(Ks) =", max(Ks), ", avg(Ks) =", round(sum(Ks) / len(Ks), 2))
print_message("#> Loaded the top-k per query for", len(topK_pids), "unique queries.\n")
if len(topK_positives) == 0:
topK_positives = None
else:
assert len(topK_pids) >= len(topK_positives)
for qid in set.difference(set(topK_pids.keys()), set(topK_positives.keys())):
topK_positives[qid] = []
assert len(topK_pids) == len(topK_positives)
avg_positive = round(sum(len(topK_positives[qid]) for qid in topK_positives) / len(topK_pids), 2)
print_message("#> Concurrently got annotations for", len(topK_positives), "unique queries with",
avg_positive, "positives per query on average.\n")
assert qrels is None or topK_positives is None, "Cannot have both qrels and an annotated top-K file!"
if topK_positives is None:
topK_positives = qrels
return topK_pids, topK_positives
def load_collection(collection_path):
print_message("#> Loading collection...")
collection = []
with open(collection_path) as f:
for line_idx, line in enumerate(f):
if line_idx % (1000*1000) == 0:
print(f'{line_idx // 1000 // 1000}M', end=' ', flush=True)
pid, passage, *rest = line.strip().split('\t')
assert pid == 'id' or int(pid) == line_idx
if len(rest) >= 1:
title = rest[0]
passage = title + ' | ' + passage
collection.append(passage)
print()
return collection
def load_colbert(args, do_print=True):
colbert, checkpoint = load_model(args, do_print)
# TODO: If the parameters below were not specified on the command line, their *checkpoint* values should be used.
# I.e., not their purely (i.e., training) default values.
for k in ['query_maxlen', 'doc_maxlen', 'dim', 'similarity', 'amp']:
if 'arguments' in checkpoint and hasattr(args, k):
if k in checkpoint['arguments'] and checkpoint['arguments'][k] != getattr(args, k):
a, b = checkpoint['arguments'][k], getattr(args, k)
Run.warn(f"Got checkpoint['arguments']['{k}'] != args.{k} (i.e., {a} != {b})")
if 'arguments' in checkpoint:
if args.rank < 1:
print(ujson.dumps(checkpoint['arguments'], indent=4))
if do_print:
print('\n')
return colbert, checkpoint
| 6,329 | 31.13198 | 117 | py |
ColBERT | ColBERT-master/colbert/evaluation/load_model.py | import os
import ujson
import torch
import random
from collections import defaultdict, OrderedDict
from colbert.parameters import DEVICE
from colbert.modeling.colbert import ColBERT
from colbert.utils.utils import print_message, load_checkpoint
def load_model(args, do_print=True):
colbert = ColBERT.from_pretrained('bert-base-uncased',
query_maxlen=args.query_maxlen,
doc_maxlen=args.doc_maxlen,
dim=args.dim,
similarity_metric=args.similarity,
mask_punctuation=args.mask_punctuation)
colbert = colbert.to(DEVICE)
print_message("#> Loading model checkpoint.", condition=do_print)
checkpoint = load_checkpoint(args.checkpoint, colbert, do_print=do_print)
colbert.eval()
return colbert, checkpoint
| 919 | 30.724138 | 77 | py |
ColBERT | ColBERT-master/colbert/evaluation/ranking.py | import os
import random
import time
import torch
import torch.nn as nn
from itertools import accumulate
from math import ceil
from colbert.utils.runs import Run
from colbert.utils.utils import print_message
from colbert.evaluation.metrics import Metrics
from colbert.evaluation.ranking_logger import RankingLogger
from colbert.modeling.inference import ModelInference
from colbert.evaluation.slow import slow_rerank
def evaluate(args):
args.inference = ModelInference(args.colbert, amp=args.amp)
qrels, queries, topK_pids = args.qrels, args.queries, args.topK_pids
depth = args.depth
collection = args.collection
if collection is None:
topK_docs = args.topK_docs
def qid2passages(qid):
if collection is not None:
return [collection[pid] for pid in topK_pids[qid][:depth]]
else:
return topK_docs[qid][:depth]
metrics = Metrics(mrr_depths={10, 100}, recall_depths={50, 200, 1000},
success_depths={5, 10, 20, 50, 100, 1000},
total_queries=len(queries))
ranking_logger = RankingLogger(Run.path, qrels=qrels)
args.milliseconds = []
with ranking_logger.context('ranking.tsv', also_save_annotations=(qrels is not None)) as rlogger:
with torch.no_grad():
keys = sorted(list(queries.keys()))
random.shuffle(keys)
for query_idx, qid in enumerate(keys):
query = queries[qid]
print_message(query_idx, qid, query, '\n')
if qrels and args.shortcircuit and len(set.intersection(set(qrels[qid]), set(topK_pids[qid]))) == 0:
continue
ranking = slow_rerank(args, query, topK_pids[qid], qid2passages(qid))
rlogger.log(qid, ranking, [0, 1])
if qrels:
metrics.add(query_idx, qid, ranking, qrels[qid])
for i, (score, pid, passage) in enumerate(ranking):
if pid in qrels[qid]:
print("\n#> Found", pid, "at position", i+1, "with score", score)
print(passage)
break
metrics.print_metrics(query_idx)
metrics.log(query_idx)
print_message("#> checkpoint['batch'] =", args.checkpoint['batch'], '\n')
print("rlogger.filename =", rlogger.filename)
if len(args.milliseconds) > 1:
print('Slow-Ranking Avg Latency =', sum(args.milliseconds[1:]) / len(args.milliseconds[1:]))
print("\n\n")
print("\n\n")
# print('Avg Latency =', sum(args.milliseconds[1:]) / len(args.milliseconds[1:]))
print("\n\n")
print('\n\n')
if qrels:
assert query_idx + 1 == len(keys) == len(set(keys))
metrics.output_final_metrics(os.path.join(Run.path, 'ranking.metrics'), query_idx, len(queries))
print('\n\n')
| 2,993 | 32.640449 | 116 | py |
ColBERT | ColBERT-master/colbert/indexing/loaders.py | import os
import torch
import ujson
from math import ceil
from itertools import accumulate
from colbert.utils.utils import print_message
def get_parts(directory):
extension = '.pt'
parts = sorted([int(filename[: -1 * len(extension)]) for filename in os.listdir(directory)
if filename.endswith(extension)])
assert list(range(len(parts))) == parts, parts
# Integer-sortedness matters.
parts_paths = [os.path.join(directory, '{}{}'.format(filename, extension)) for filename in parts]
samples_paths = [os.path.join(directory, '{}.sample'.format(filename)) for filename in parts]
return parts, parts_paths, samples_paths
def load_doclens(directory, flatten=True):
parts, _, _ = get_parts(directory)
doclens_filenames = [os.path.join(directory, 'doclens.{}.json'.format(filename)) for filename in parts]
all_doclens = [ujson.load(open(filename)) for filename in doclens_filenames]
if flatten:
all_doclens = [x for sub_doclens in all_doclens for x in sub_doclens]
return all_doclens
| 1,064 | 29.428571 | 107 | py |
ColBERT | ColBERT-master/colbert/indexing/faiss.py | import os
import math
import faiss
import torch
import numpy as np
import threading
import queue
from colbert.utils.utils import print_message, grouper
from colbert.indexing.loaders import get_parts
from colbert.indexing.index_manager import load_index_part
from colbert.indexing.faiss_index import FaissIndex
def get_faiss_index_name(args, offset=None, endpos=None):
partitions_info = '' if args.partitions is None else f'.{args.partitions}'
range_info = '' if offset is None else f'.{offset}-{endpos}'
return f'ivfpq{partitions_info}{range_info}.faiss'
def load_sample(samples_paths, sample_fraction=None):
sample = []
for filename in samples_paths:
print_message(f"#> Loading {filename} ...")
part = load_index_part(filename)
if sample_fraction:
part = part[torch.randint(0, high=part.size(0), size=(int(part.size(0) * sample_fraction),))]
sample.append(part)
sample = torch.cat(sample).float().numpy()
print("#> Sample has shape", sample.shape)
return sample
def prepare_faiss_index(slice_samples_paths, partitions, sample_fraction=None):
training_sample = load_sample(slice_samples_paths, sample_fraction=sample_fraction)
dim = training_sample.shape[-1]
index = FaissIndex(dim, partitions)
print_message("#> Training with the vectors...")
index.train(training_sample)
print_message("Done training!\n")
return index
SPAN = 3
def index_faiss(args):
print_message("#> Starting..")
parts, parts_paths, samples_paths = get_parts(args.index_path)
if args.sample is not None:
assert args.sample, args.sample
print_message(f"#> Training with {round(args.sample * 100.0, 1)}% of *all* embeddings (provided --sample).")
samples_paths = parts_paths
num_parts_per_slice = math.ceil(len(parts) / args.slices)
for slice_idx, part_offset in enumerate(range(0, len(parts), num_parts_per_slice)):
part_endpos = min(part_offset + num_parts_per_slice, len(parts))
slice_parts_paths = parts_paths[part_offset:part_endpos]
slice_samples_paths = samples_paths[part_offset:part_endpos]
if args.slices == 1:
faiss_index_name = get_faiss_index_name(args)
else:
faiss_index_name = get_faiss_index_name(args, offset=part_offset, endpos=part_endpos)
output_path = os.path.join(args.index_path, faiss_index_name)
print_message(f"#> Processing slice #{slice_idx+1} of {args.slices} (range {part_offset}..{part_endpos}).")
print_message(f"#> Will write to {output_path}.")
assert not os.path.exists(output_path), output_path
index = prepare_faiss_index(slice_samples_paths, args.partitions, args.sample)
loaded_parts = queue.Queue(maxsize=1)
def _loader_thread(thread_parts_paths):
for filenames in grouper(thread_parts_paths, SPAN, fillvalue=None):
sub_collection = [load_index_part(filename) for filename in filenames if filename is not None]
sub_collection = torch.cat(sub_collection)
sub_collection = sub_collection.float().numpy()
loaded_parts.put(sub_collection)
thread = threading.Thread(target=_loader_thread, args=(slice_parts_paths,))
thread.start()
print_message("#> Indexing the vectors...")
for filenames in grouper(slice_parts_paths, SPAN, fillvalue=None):
print_message("#> Loading", filenames, "(from queue)...")
sub_collection = loaded_parts.get()
print_message("#> Processing a sub_collection with shape", sub_collection.shape)
index.add(sub_collection)
print_message("Done indexing!")
index.save(output_path)
print_message(f"\n\nDone! All complete (for slice #{slice_idx+1} of {args.slices})!")
thread.join()
| 3,899 | 32.333333 | 116 | py |
ColBERT | ColBERT-master/colbert/indexing/index_manager.py | import torch
import faiss
import numpy as np
from colbert.utils.utils import print_message
class IndexManager():
def __init__(self, dim):
self.dim = dim
def save(self, tensor, path_prefix):
torch.save(tensor, path_prefix)
def load_index_part(filename, verbose=True):
part = torch.load(filename)
if type(part) == list: # for backward compatibility
part = torch.cat(part)
return part
| 435 | 17.956522 | 56 | py |
ColBERT | ColBERT-master/colbert/indexing/encoder.py | import os
import time
import torch
import ujson
import numpy as np
import itertools
import threading
import queue
from colbert.modeling.inference import ModelInference
from colbert.evaluation.loaders import load_colbert
from colbert.utils.utils import print_message
from colbert.indexing.index_manager import IndexManager
class CollectionEncoder():
def __init__(self, args, process_idx, num_processes):
self.args = args
self.collection = args.collection
self.process_idx = process_idx
self.num_processes = num_processes
assert 0.5 <= args.chunksize <= 128.0
max_bytes_per_file = args.chunksize * (1024*1024*1024)
max_bytes_per_doc = (self.args.doc_maxlen * self.args.dim * 2.0)
# Determine subset sizes for output
minimum_subset_size = 10_000
maximum_subset_size = max_bytes_per_file / max_bytes_per_doc
maximum_subset_size = max(minimum_subset_size, maximum_subset_size)
self.possible_subset_sizes = [int(maximum_subset_size)]
self.print_main("#> Local args.bsize =", args.bsize)
self.print_main("#> args.index_root =", args.index_root)
self.print_main(f"#> self.possible_subset_sizes = {self.possible_subset_sizes}")
self._load_model()
self.indexmgr = IndexManager(args.dim)
self.iterator = self._initialize_iterator()
def _initialize_iterator(self):
return open(self.collection)
def _saver_thread(self):
for args in iter(self.saver_queue.get, None):
self._save_batch(*args)
def _load_model(self):
self.colbert, self.checkpoint = load_colbert(self.args, do_print=(self.process_idx == 0))
self.colbert = self.colbert.cuda()
self.colbert.eval()
self.inference = ModelInference(self.colbert, amp=self.args.amp)
def encode(self):
self.saver_queue = queue.Queue(maxsize=3)
thread = threading.Thread(target=self._saver_thread)
thread.start()
t0 = time.time()
local_docs_processed = 0
for batch_idx, (offset, lines, owner) in enumerate(self._batch_passages(self.iterator)):
if owner != self.process_idx:
continue
t1 = time.time()
batch = self._preprocess_batch(offset, lines)
embs, doclens = self._encode_batch(batch_idx, batch)
t2 = time.time()
self.saver_queue.put((batch_idx, embs, offset, doclens))
t3 = time.time()
local_docs_processed += len(lines)
overall_throughput = compute_throughput(local_docs_processed, t0, t3)
this_encoding_throughput = compute_throughput(len(lines), t1, t2)
this_saving_throughput = compute_throughput(len(lines), t2, t3)
self.print(f'#> Completed batch #{batch_idx} (starting at passage #{offset}) \t\t'
f'Passages/min: {overall_throughput} (overall), ',
f'{this_encoding_throughput} (this encoding), ',
f'{this_saving_throughput} (this saving)')
self.saver_queue.put(None)
self.print("#> Joining saver thread.")
thread.join()
def _batch_passages(self, fi):
"""
Must use the same seed across processes!
"""
np.random.seed(0)
offset = 0
for owner in itertools.cycle(range(self.num_processes)):
batch_size = np.random.choice(self.possible_subset_sizes)
L = [line for _, line in zip(range(batch_size), fi)]
if len(L) == 0:
break # EOF
yield (offset, L, owner)
offset += len(L)
if len(L) < batch_size:
break # EOF
self.print("[NOTE] Done with local share.")
return
def _preprocess_batch(self, offset, lines):
endpos = offset + len(lines)
batch = []
for line_idx, line in zip(range(offset, endpos), lines):
line_parts = line.strip().split('\t')
pid, passage, *other = line_parts
assert len(passage) >= 1
if len(other) >= 1:
title, *_ = other
passage = title + ' | ' + passage
batch.append(passage)
assert pid == 'id' or int(pid) == line_idx
return batch
def _encode_batch(self, batch_idx, batch):
with torch.no_grad():
embs = self.inference.docFromText(batch, bsize=self.args.bsize, keep_dims=False)
assert type(embs) is list
assert len(embs) == len(batch)
local_doclens = [d.size(0) for d in embs]
embs = torch.cat(embs)
return embs, local_doclens
def _save_batch(self, batch_idx, embs, offset, doclens):
start_time = time.time()
output_path = os.path.join(self.args.index_path, "{}.pt".format(batch_idx))
output_sample_path = os.path.join(self.args.index_path, "{}.sample".format(batch_idx))
doclens_path = os.path.join(self.args.index_path, 'doclens.{}.json'.format(batch_idx))
# Save the embeddings.
self.indexmgr.save(embs, output_path)
self.indexmgr.save(embs[torch.randint(0, high=embs.size(0), size=(embs.size(0) // 20,))], output_sample_path)
# Save the doclens.
with open(doclens_path, 'w') as output_doclens:
ujson.dump(doclens, output_doclens)
throughput = compute_throughput(len(doclens), start_time, time.time())
self.print_main("#> Saved batch #{} to {} \t\t".format(batch_idx, output_path),
"Saving Throughput =", throughput, "passages per minute.\n")
def print(self, *args):
print_message("[" + str(self.process_idx) + "]", "\t\t", *args)
def print_main(self, *args):
if self.process_idx == 0:
self.print(*args)
def compute_throughput(size, t0, t1):
throughput = size / (t1 - t0) * 60
if throughput > 1000 * 1000:
throughput = throughput / (1000*1000)
throughput = round(throughput, 1)
return '{}M'.format(throughput)
throughput = throughput / (1000)
throughput = round(throughput, 1)
return '{}k'.format(throughput)
| 6,247 | 32.234043 | 117 | py |
ColBERT | ColBERT-master/colbert/indexing/faiss_index_gpu.py | """
Heavily based on: https://github.com/facebookresearch/faiss/blob/master/benchs/bench_gpu_1bn.py
"""
import sys
import time
import math
import faiss
import torch
import numpy as np
from colbert.utils.utils import print_message
class FaissIndexGPU():
def __init__(self):
self.ngpu = faiss.get_num_gpus()
if self.ngpu == 0:
return
self.tempmem = 1 << 33
self.max_add_per_gpu = 1 << 25
self.max_add = self.max_add_per_gpu * self.ngpu
self.add_batch_size = 65536
self.gpu_resources = self._prepare_gpu_resources()
def _prepare_gpu_resources(self):
print_message(f"Preparing resources for {self.ngpu} GPUs.")
gpu_resources = []
for _ in range(self.ngpu):
res = faiss.StandardGpuResources()
if self.tempmem >= 0:
res.setTempMemory(self.tempmem)
gpu_resources.append(res)
return gpu_resources
def _make_vres_vdev(self):
"""
return vectors of device ids and resources useful for gpu_multiple
"""
assert self.ngpu > 0
vres = faiss.GpuResourcesVector()
vdev = faiss.IntVector()
for i in range(self.ngpu):
vdev.push_back(i)
vres.push_back(self.gpu_resources[i])
return vres, vdev
def training_initialize(self, index, quantizer):
"""
The index and quantizer should be owned by caller.
"""
assert self.ngpu > 0
s = time.time()
self.index_ivf = faiss.extract_index_ivf(index)
self.clustering_index = faiss.index_cpu_to_all_gpus(quantizer)
self.index_ivf.clustering_index = self.clustering_index
print(time.time() - s)
def training_finalize(self):
assert self.ngpu > 0
s = time.time()
self.index_ivf.clustering_index = faiss.index_gpu_to_cpu(self.index_ivf.clustering_index)
print(time.time() - s)
def adding_initialize(self, index):
"""
The index should be owned by caller.
"""
assert self.ngpu > 0
self.co = faiss.GpuMultipleClonerOptions()
self.co.useFloat16 = True
self.co.useFloat16CoarseQuantizer = False
self.co.usePrecomputed = False
self.co.indicesOptions = faiss.INDICES_CPU
self.co.verbose = True
self.co.reserveVecs = self.max_add
self.co.shard = True
assert self.co.shard_type in (0, 1, 2)
self.vres, self.vdev = self._make_vres_vdev()
self.gpu_index = faiss.index_cpu_to_gpu_multiple(self.vres, self.vdev, index, self.co)
def add(self, index, data, offset):
assert self.ngpu > 0
t0 = time.time()
nb = data.shape[0]
for i0 in range(0, nb, self.add_batch_size):
i1 = min(i0 + self.add_batch_size, nb)
xs = data[i0:i1]
self.gpu_index.add_with_ids(xs, np.arange(offset+i0, offset+i1))
if self.max_add > 0 and self.gpu_index.ntotal > self.max_add:
self._flush_to_cpu(index, nb, offset)
print('\r%d/%d (%.3f s) ' % (i0, nb, time.time() - t0), end=' ')
sys.stdout.flush()
if self.gpu_index.ntotal > 0:
self._flush_to_cpu(index, nb, offset)
assert index.ntotal == offset+nb, (index.ntotal, offset+nb, offset, nb)
print(f"add(.) time: %.3f s \t\t--\t\t index.ntotal = {index.ntotal}" % (time.time() - t0))
def _flush_to_cpu(self, index, nb, offset):
print("Flush indexes to CPU")
for i in range(self.ngpu):
index_src_gpu = faiss.downcast_index(self.gpu_index if self.ngpu == 1 else self.gpu_index.at(i))
index_src = faiss.index_gpu_to_cpu(index_src_gpu)
index_src.copy_subset_to(index, 0, offset, offset+nb)
index_src_gpu.reset()
index_src_gpu.reserveMemory(self.max_add)
if self.ngpu > 1:
try:
self.gpu_index.sync_with_shard_indexes()
except:
self.gpu_index.syncWithSubIndexes()
| 4,108 | 28.561151 | 108 | py |
ColBERT | ColBERT-master/colbert/indexing/faiss_index.py | import sys
import time
import math
import faiss
import torch
import numpy as np
from colbert.indexing.faiss_index_gpu import FaissIndexGPU
from colbert.utils.utils import print_message
class FaissIndex():
def __init__(self, dim, partitions):
self.dim = dim
self.partitions = partitions
self.gpu = FaissIndexGPU()
self.quantizer, self.index = self._create_index()
self.offset = 0
def _create_index(self):
quantizer = faiss.IndexFlatL2(self.dim) # faiss.IndexHNSWFlat(dim, 32)
index = faiss.IndexIVFPQ(quantizer, self.dim, self.partitions, 16, 8)
return quantizer, index
def train(self, train_data):
print_message(f"#> Training now (using {self.gpu.ngpu} GPUs)...")
if self.gpu.ngpu > 0:
self.gpu.training_initialize(self.index, self.quantizer)
s = time.time()
self.index.train(train_data)
print(time.time() - s)
if self.gpu.ngpu > 0:
self.gpu.training_finalize()
def add(self, data):
print_message(f"Add data with shape {data.shape} (offset = {self.offset})..")
if self.gpu.ngpu > 0 and self.offset == 0:
self.gpu.adding_initialize(self.index)
if self.gpu.ngpu > 0:
self.gpu.add(self.index, data, self.offset)
else:
self.index.add(data)
self.offset += data.shape[0]
def save(self, output_path):
print_message(f"Writing index to {output_path} ...")
self.index.nprobe = 10 # just a default
faiss.write_index(self.index, output_path)
| 1,605 | 26.220339 | 85 | py |
ColBERT | ColBERT-master/colbert/training/training.py | import os
import random
import time
import torch
import torch.nn as nn
import numpy as np
from transformers import AdamW
from colbert.utils.runs import Run
from colbert.utils.amp import MixedPrecisionManager
from colbert.training.lazy_batcher import LazyBatcher
from colbert.training.eager_batcher import EagerBatcher
from colbert.parameters import DEVICE
from colbert.modeling.colbert import ColBERT
from colbert.utils.utils import print_message
from colbert.training.utils import print_progress, manage_checkpoints
def train(args):
random.seed(12345)
np.random.seed(12345)
torch.manual_seed(12345)
if args.distributed:
torch.cuda.manual_seed_all(12345)
if args.distributed:
assert args.bsize % args.nranks == 0, (args.bsize, args.nranks)
assert args.accumsteps == 1
args.bsize = args.bsize // args.nranks
print("Using args.bsize =", args.bsize, "(per process) and args.accumsteps =", args.accumsteps)
if args.lazy:
reader = LazyBatcher(args, (0 if args.rank == -1 else args.rank), args.nranks)
else:
reader = EagerBatcher(args, (0 if args.rank == -1 else args.rank), args.nranks)
if args.rank not in [-1, 0]:
torch.distributed.barrier()
colbert = ColBERT.from_pretrained('bert-base-uncased',
query_maxlen=args.query_maxlen,
doc_maxlen=args.doc_maxlen,
dim=args.dim,
similarity_metric=args.similarity,
mask_punctuation=args.mask_punctuation)
if args.checkpoint is not None:
assert args.resume_optimizer is False, "TODO: This would mean reload optimizer too."
print_message(f"#> Starting from checkpoint {args.checkpoint} -- but NOT the optimizer!")
checkpoint = torch.load(args.checkpoint, map_location='cpu')
try:
colbert.load_state_dict(checkpoint['model_state_dict'])
except:
print_message("[WARNING] Loading checkpoint with strict=False")
colbert.load_state_dict(checkpoint['model_state_dict'], strict=False)
if args.rank == 0:
torch.distributed.barrier()
colbert = colbert.to(DEVICE)
colbert.train()
if args.distributed:
colbert = torch.nn.parallel.DistributedDataParallel(colbert, device_ids=[args.rank],
output_device=args.rank,
find_unused_parameters=True)
optimizer = AdamW(filter(lambda p: p.requires_grad, colbert.parameters()), lr=args.lr, eps=1e-8)
optimizer.zero_grad()
amp = MixedPrecisionManager(args.amp)
criterion = nn.CrossEntropyLoss()
labels = torch.zeros(args.bsize, dtype=torch.long, device=DEVICE)
start_time = time.time()
train_loss = 0.0
start_batch_idx = 0
if args.resume:
assert args.checkpoint is not None
start_batch_idx = checkpoint['batch']
reader.skip_to_batch(start_batch_idx, checkpoint['arguments']['bsize'])
for batch_idx, BatchSteps in zip(range(start_batch_idx, args.maxsteps), reader):
this_batch_loss = 0.0
for queries, passages in BatchSteps:
with amp.context():
scores = colbert(queries, passages).view(2, -1).permute(1, 0)
loss = criterion(scores, labels[:scores.size(0)])
loss = loss / args.accumsteps
if args.rank < 1:
print_progress(scores)
amp.backward(loss)
train_loss += loss.item()
this_batch_loss += loss.item()
amp.step(colbert, optimizer)
if args.rank < 1:
avg_loss = train_loss / (batch_idx+1)
num_examples_seen = (batch_idx - start_batch_idx) * args.bsize * args.nranks
elapsed = float(time.time() - start_time)
log_to_mlflow = (batch_idx % 20 == 0)
Run.log_metric('train/avg_loss', avg_loss, step=batch_idx, log_to_mlflow=log_to_mlflow)
Run.log_metric('train/batch_loss', this_batch_loss, step=batch_idx, log_to_mlflow=log_to_mlflow)
Run.log_metric('train/examples', num_examples_seen, step=batch_idx, log_to_mlflow=log_to_mlflow)
Run.log_metric('train/throughput', num_examples_seen / elapsed, step=batch_idx, log_to_mlflow=log_to_mlflow)
print_message(batch_idx, avg_loss)
manage_checkpoints(args, colbert, optimizer, batch_idx+1)
| 4,585 | 35.983871 | 120 | py |
ColBERT | ColBERT-master/colbert/training/utils.py | import os
import torch
from colbert.utils.runs import Run
from colbert.utils.utils import print_message, save_checkpoint
from colbert.parameters import SAVED_CHECKPOINTS
def print_progress(scores):
positive_avg, negative_avg = round(scores[:, 0].mean().item(), 2), round(scores[:, 1].mean().item(), 2)
print("#>>> ", positive_avg, negative_avg, '\t\t|\t\t', positive_avg - negative_avg)
def manage_checkpoints(args, colbert, optimizer, batch_idx):
arguments = args.input_arguments.__dict__
path = os.path.join(Run.path, 'checkpoints')
if not os.path.exists(path):
os.mkdir(path)
if batch_idx % 2000 == 0:
name = os.path.join(path, "colbert.dnn")
save_checkpoint(name, 0, batch_idx, colbert, optimizer, arguments)
if batch_idx in SAVED_CHECKPOINTS:
name = os.path.join(path, "colbert-{}.dnn".format(batch_idx))
save_checkpoint(name, 0, batch_idx, colbert, optimizer, arguments)
| 956 | 32 | 107 | py |
ColBERT | ColBERT-master/colbert/utils/logging.py | import os
import sys
import ujson
import mlflow
import traceback
from torch.utils.tensorboard import SummaryWriter
from colbert.utils.utils import print_message, create_directory
class Logger():
def __init__(self, rank, run):
self.rank = rank
self.is_main = self.rank in [-1, 0]
self.run = run
self.logs_path = os.path.join(self.run.path, "logs/")
if self.is_main:
self._init_mlflow()
self.initialized_tensorboard = False
create_directory(self.logs_path)
def _init_mlflow(self):
mlflow.set_tracking_uri('file://' + os.path.join(self.run.experiments_root, "logs/mlruns/"))
mlflow.set_experiment('/'.join([self.run.experiment, self.run.script]))
mlflow.set_tag('experiment', self.run.experiment)
mlflow.set_tag('name', self.run.name)
mlflow.set_tag('path', self.run.path)
def _init_tensorboard(self):
root = os.path.join(self.run.experiments_root, "logs/tensorboard/")
logdir = '__'.join([self.run.experiment, self.run.script, self.run.name])
logdir = os.path.join(root, logdir)
self.writer = SummaryWriter(log_dir=logdir)
self.initialized_tensorboard = True
def _log_exception(self, etype, value, tb):
if not self.is_main:
return
output_path = os.path.join(self.logs_path, 'exception.txt')
trace = ''.join(traceback.format_exception(etype, value, tb)) + '\n'
print_message(trace, '\n\n')
self.log_new_artifact(output_path, trace)
def _log_all_artifacts(self):
if not self.is_main:
return
mlflow.log_artifacts(self.logs_path)
def _log_args(self, args):
if not self.is_main:
return
for key in vars(args):
value = getattr(args, key)
if type(value) in [int, float, str, bool]:
mlflow.log_param(key, value)
with open(os.path.join(self.logs_path, 'args.json'), 'w') as output_metadata:
ujson.dump(args.input_arguments.__dict__, output_metadata, indent=4)
output_metadata.write('\n')
with open(os.path.join(self.logs_path, 'args.txt'), 'w') as output_metadata:
output_metadata.write(' '.join(sys.argv) + '\n')
def log_metric(self, name, value, step, log_to_mlflow=True):
if not self.is_main:
return
if not self.initialized_tensorboard:
self._init_tensorboard()
if log_to_mlflow:
mlflow.log_metric(name, value, step=step)
self.writer.add_scalar(name, value, step)
def log_new_artifact(self, path, content):
with open(path, 'w') as f:
f.write(content)
mlflow.log_artifact(path)
def warn(self, *args):
msg = print_message('[WARNING]', '\t', *args)
with open(os.path.join(self.logs_path, 'warnings.txt'), 'a') as output_metadata:
output_metadata.write(msg + '\n\n\n')
def info_all(self, *args):
print_message('[' + str(self.rank) + ']', '\t', *args)
def info(self, *args):
if self.is_main:
print_message(*args)
| 3,185 | 30.86 | 100 | py |
ColBERT | ColBERT-master/colbert/utils/utils.py | import os
import tqdm
import torch
import datetime
import itertools
from multiprocessing import Pool
from collections import OrderedDict, defaultdict
def print_message(*s, condition=True):
s = ' '.join([str(x) for x in s])
msg = "[{}] {}".format(datetime.datetime.now().strftime("%b %d, %H:%M:%S"), s)
if condition:
print(msg, flush=True)
return msg
def timestamp():
format_str = "%Y-%m-%d_%H.%M.%S"
result = datetime.datetime.now().strftime(format_str)
return result
def file_tqdm(file):
print(f"#> Reading {file.name}")
with tqdm.tqdm(total=os.path.getsize(file.name) / 1024.0 / 1024.0, unit="MiB") as pbar:
for line in file:
yield line
pbar.update(len(line) / 1024.0 / 1024.0)
pbar.close()
def save_checkpoint(path, epoch_idx, mb_idx, model, optimizer, arguments=None):
print(f"#> Saving a checkpoint to {path} ..")
if hasattr(model, 'module'):
model = model.module # extract model from a distributed/data-parallel wrapper
checkpoint = {}
checkpoint['epoch'] = epoch_idx
checkpoint['batch'] = mb_idx
checkpoint['model_state_dict'] = model.state_dict()
checkpoint['optimizer_state_dict'] = optimizer.state_dict()
checkpoint['arguments'] = arguments
torch.save(checkpoint, path)
def load_checkpoint(path, model, optimizer=None, do_print=True):
if do_print:
print_message("#> Loading checkpoint", path, "..")
if path.startswith("http:") or path.startswith("https:"):
checkpoint = torch.hub.load_state_dict_from_url(path, map_location='cpu')
else:
checkpoint = torch.load(path, map_location='cpu')
state_dict = checkpoint['model_state_dict']
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k
if k[:7] == 'module.':
name = k[7:]
new_state_dict[name] = v
checkpoint['model_state_dict'] = new_state_dict
try:
model.load_state_dict(checkpoint['model_state_dict'])
except:
print_message("[WARNING] Loading checkpoint with strict=False")
model.load_state_dict(checkpoint['model_state_dict'], strict=False)
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if do_print:
print_message("#> checkpoint['epoch'] =", checkpoint['epoch'])
print_message("#> checkpoint['batch'] =", checkpoint['batch'])
return checkpoint
def create_directory(path):
if os.path.exists(path):
print('\n')
print_message("#> Note: Output directory", path, 'already exists\n\n')
else:
print('\n')
print_message("#> Creating directory", path, '\n\n')
os.makedirs(path)
# def batch(file, bsize):
# while True:
# L = [ujson.loads(file.readline()) for _ in range(bsize)]
# yield L
# return
def f7(seq):
"""
Source: https://stackoverflow.com/a/480227/1493011
"""
seen = set()
return [x for x in seq if not (x in seen or seen.add(x))]
def batch(group, bsize, provide_offset=False):
offset = 0
while offset < len(group):
L = group[offset: offset + bsize]
yield ((offset, L) if provide_offset else L)
offset += len(L)
return
class dotdict(dict):
"""
dot.notation access to dictionary attributes
Credit: derek73 @ https://stackoverflow.com/questions/2352181
"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def flatten(L):
return [x for y in L for x in y]
def zipstar(L, lazy=False):
"""
A much faster A, B, C = zip(*[(a, b, c), (a, b, c), ...])
May return lists or tuples.
"""
if len(L) == 0:
return L
width = len(L[0])
if width < 100:
return [[elem[idx] for elem in L] for idx in range(width)]
L = zip(*L)
return L if lazy else list(L)
def zip_first(L1, L2):
length = len(L1) if type(L1) in [tuple, list] else None
L3 = list(zip(L1, L2))
assert length in [None, len(L3)], "zip_first() failure: length differs!"
return L3
def int_or_float(val):
if '.' in val:
return float(val)
return int(val)
def load_ranking(path, types=None, lazy=False):
print_message(f"#> Loading the ranked lists from {path} ..")
try:
lists = torch.load(path)
lists = zipstar([l.tolist() for l in tqdm.tqdm(lists)], lazy=lazy)
except:
if types is None:
types = itertools.cycle([int_or_float])
with open(path) as f:
lists = [[typ(x) for typ, x in zip_first(types, line.strip().split('\t'))]
for line in file_tqdm(f)]
return lists
def save_ranking(ranking, path):
lists = zipstar(ranking)
lists = [torch.tensor(l) for l in lists]
torch.save(lists, path)
return lists
def groupby_first_item(lst):
groups = defaultdict(list)
for first, *rest in lst:
rest = rest[0] if len(rest) == 1 else rest
groups[first].append(rest)
return groups
def process_grouped_by_first_item(lst):
"""
Requires items in list to already be grouped by first item.
"""
groups = defaultdict(list)
started = False
last_group = None
for first, *rest in lst:
rest = rest[0] if len(rest) == 1 else rest
if started and first != last_group:
yield (last_group, groups[last_group])
assert first not in groups, f"{first} seen earlier --- violates precondition."
groups[first].append(rest)
last_group = first
started = True
return groups
def grouper(iterable, n, fillvalue=None):
"""
Collect data into fixed-length chunks or blocks
Example: grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
Source: https://docs.python.org/3/library/itertools.html#itertools-recipes
"""
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=fillvalue)
# see https://stackoverflow.com/a/45187287
class NullContextManager(object):
def __init__(self, dummy_resource=None):
self.dummy_resource = dummy_resource
def __enter__(self):
return self.dummy_resource
def __exit__(self, *args):
pass
def load_batch_backgrounds(args, qids):
if args.qid2backgrounds is None:
return None
qbackgrounds = []
for qid in qids:
back = args.qid2backgrounds[qid]
if len(back) and type(back[0]) == int:
x = [args.collection[pid] for pid in back]
else:
x = [args.collectionX.get(pid, '') for pid in back]
x = ' [SEP] '.join(x)
qbackgrounds.append(x)
return qbackgrounds
| 6,747 | 23.808824 | 91 | py |
ColBERT | ColBERT-master/colbert/utils/distributed.py | import os
import random
import torch
import numpy as np
def init(rank):
nranks = 'WORLD_SIZE' in os.environ and int(os.environ['WORLD_SIZE'])
nranks = max(1, nranks)
is_distributed = nranks > 1
if rank == 0:
print('nranks =', nranks, '\t num_gpus =', torch.cuda.device_count())
if is_distributed:
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
return nranks, is_distributed
def barrier(rank):
if rank >= 0:
torch.distributed.barrier()
| 614 | 22.653846 | 82 | py |
ColBERT | ColBERT-master/colbert/utils/amp.py | import torch
from contextlib import contextmanager
from colbert.utils.utils import NullContextManager
from packaging import version
v = version.parse
PyTorch_over_1_6 = v(torch.__version__) >= v('1.6')
class MixedPrecisionManager():
def __init__(self, activated):
assert (not activated) or PyTorch_over_1_6, "Cannot use AMP for PyTorch version < 1.6"
self.activated = activated
if self.activated:
self.scaler = torch.cuda.amp.GradScaler()
def context(self):
return torch.cuda.amp.autocast() if self.activated else NullContextManager()
def backward(self, loss):
if self.activated:
self.scaler.scale(loss).backward()
else:
loss.backward()
def step(self, colbert, optimizer):
if self.activated:
self.scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(colbert.parameters(), 2.0)
self.scaler.step(optimizer)
self.scaler.update()
optimizer.zero_grad()
else:
torch.nn.utils.clip_grad_norm_(colbert.parameters(), 2.0)
optimizer.step()
optimizer.zero_grad()
| 1,178 | 28.475 | 94 | py |
ColBERT | ColBERT-master/colbert/ranking/index_part.py | import os
import torch
import ujson
from math import ceil
from itertools import accumulate
from colbert.utils.utils import print_message, dotdict, flatten
from colbert.indexing.loaders import get_parts, load_doclens
from colbert.indexing.index_manager import load_index_part
from colbert.ranking.index_ranker import IndexRanker
class IndexPart():
def __init__(self, directory, dim=128, part_range=None, verbose=True):
first_part, last_part = (0, None) if part_range is None else (part_range.start, part_range.stop)
# Load parts metadata
all_parts, all_parts_paths, _ = get_parts(directory)
self.parts = all_parts[first_part:last_part]
self.parts_paths = all_parts_paths[first_part:last_part]
# Load doclens metadata
all_doclens = load_doclens(directory, flatten=False)
self.doc_offset = sum([len(part_doclens) for part_doclens in all_doclens[:first_part]])
self.doc_endpos = sum([len(part_doclens) for part_doclens in all_doclens[:last_part]])
self.pids_range = range(self.doc_offset, self.doc_endpos)
self.parts_doclens = all_doclens[first_part:last_part]
self.doclens = flatten(self.parts_doclens)
self.num_embeddings = sum(self.doclens)
self.tensor = self._load_parts(dim, verbose)
self.ranker = IndexRanker(self.tensor, self.doclens)
def _load_parts(self, dim, verbose):
tensor = torch.zeros(self.num_embeddings + 512, dim, dtype=torch.float16)
if verbose:
print_message("tensor.size() = ", tensor.size())
offset = 0
for idx, filename in enumerate(self.parts_paths):
print_message("|> Loading", filename, "...", condition=verbose)
endpos = offset + sum(self.parts_doclens[idx])
part = load_index_part(filename, verbose=verbose)
tensor[offset:endpos] = part
offset = endpos
return tensor
def pid_in_range(self, pid):
return pid in self.pids_range
def rank(self, Q, pids):
"""
Rank a single batch of Q x pids (e.g., 1k--10k pairs).
"""
assert Q.size(0) in [1, len(pids)], (Q.size(0), len(pids))
assert all(pid in self.pids_range for pid in pids), self.pids_range
pids_ = [pid - self.doc_offset for pid in pids]
scores = self.ranker.rank(Q, pids_)
return scores
def batch_rank(self, all_query_embeddings, query_indexes, pids, sorted_pids):
"""
Rank a large, fairly dense set of query--passage pairs (e.g., 1M+ pairs).
Higher overhead, much faster for large batches.
"""
assert ((pids >= self.pids_range.start) & (pids < self.pids_range.stop)).sum() == pids.size(0)
pids_ = pids - self.doc_offset
scores = self.ranker.batch_rank(all_query_embeddings, query_indexes, pids_, sorted_pids)
return scores
| 2,912 | 34.096386 | 104 | py |
ColBERT | ColBERT-master/colbert/ranking/batch_retrieval.py | import os
import time
import faiss
import random
import torch
from colbert.utils.runs import Run
from multiprocessing import Pool
from colbert.modeling.inference import ModelInference
from colbert.evaluation.ranking_logger import RankingLogger
from colbert.utils.utils import print_message, batch
from colbert.ranking.faiss_index import FaissIndex
def batch_retrieve(args):
assert args.retrieve_only, "TODO: Combine batch (multi-query) retrieval with batch re-ranking"
faiss_index = FaissIndex(args.index_path, args.faiss_index_path, args.nprobe, args.part_range)
inference = ModelInference(args.colbert, amp=args.amp)
ranking_logger = RankingLogger(Run.path, qrels=None)
with ranking_logger.context('unordered.tsv', also_save_annotations=False) as rlogger:
queries = args.queries
qids_in_order = list(queries.keys())
for qoffset, qbatch in batch(qids_in_order, 100_000, provide_offset=True):
qbatch_text = [queries[qid] for qid in qbatch]
print_message(f"#> Embedding {len(qbatch_text)} queries in parallel...")
Q = inference.queryFromText(qbatch_text, bsize=512)
print_message("#> Starting batch retrieval...")
all_pids = faiss_index.retrieve(args.faiss_depth, Q, verbose=True)
# Log the PIDs with rank -1 for all
for query_idx, (qid, ranking) in enumerate(zip(qbatch, all_pids)):
query_idx = qoffset + query_idx
if query_idx % 1000 == 0:
print_message(f"#> Logging query #{query_idx} (qid {qid}) now...")
ranking = [(None, pid, None) for pid in ranking]
rlogger.log(qid, ranking, is_ranked=False)
print('\n\n')
print(ranking_logger.filename)
print("#> Done.")
print('\n\n')
| 1,819 | 34.686275 | 98 | py |
ColBERT | ColBERT-master/colbert/ranking/batch_reranking.py | import os
import time
import torch
import queue
import threading
from collections import defaultdict
from colbert.utils.runs import Run
from colbert.modeling.inference import ModelInference
from colbert.evaluation.ranking_logger import RankingLogger
from colbert.utils.utils import print_message, flatten, zipstar
from colbert.indexing.loaders import get_parts
from colbert.ranking.index_part import IndexPart
MAX_DEPTH_LOGGED = 1000 # TODO: Use args.depth
def prepare_ranges(index_path, dim, step, part_range):
print_message("#> Launching a separate thread to load index parts asynchronously.")
parts, _, _ = get_parts(index_path)
positions = [(offset, offset + step) for offset in range(0, len(parts), step)]
if part_range is not None:
positions = positions[part_range.start: part_range.stop]
loaded_parts = queue.Queue(maxsize=2)
def _loader_thread(index_path, dim, positions):
for offset, endpos in positions:
index = IndexPart(index_path, dim=dim, part_range=range(offset, endpos), verbose=True)
loaded_parts.put(index, block=True)
thread = threading.Thread(target=_loader_thread, args=(index_path, dim, positions,))
thread.start()
return positions, loaded_parts, thread
def score_by_range(positions, loaded_parts, all_query_embeddings, all_query_rankings, all_pids):
print_message("#> Sorting by PID..")
all_query_indexes, all_pids = zipstar(all_pids)
sorting_pids = torch.tensor(all_pids).sort()
all_query_indexes, all_pids = torch.tensor(all_query_indexes)[sorting_pids.indices], sorting_pids.values
range_start, range_end = 0, 0
for offset, endpos in positions:
print_message(f"#> Fetching parts {offset}--{endpos} from queue..")
index = loaded_parts.get()
print_message(f"#> Filtering PIDs to the range {index.pids_range}..")
range_start = range_start + (all_pids[range_start:] < index.pids_range.start).sum()
range_end = range_end + (all_pids[range_end:] < index.pids_range.stop).sum()
pids = all_pids[range_start:range_end]
query_indexes = all_query_indexes[range_start:range_end]
print_message(f"#> Got {len(pids)} query--passage pairs in this range.")
if len(pids) == 0:
continue
print_message(f"#> Ranking in batches the pairs #{range_start} through #{range_end}...")
scores = index.batch_rank(all_query_embeddings, query_indexes, pids, sorted_pids=True)
for query_index, pid, score in zip(query_indexes.tolist(), pids.tolist(), scores):
all_query_rankings[0][query_index].append(pid)
all_query_rankings[1][query_index].append(score)
def batch_rerank(args):
positions, loaded_parts, thread = prepare_ranges(args.index_path, args.dim, args.step, args.part_range)
inference = ModelInference(args.colbert, amp=args.amp)
queries, topK_pids = args.queries, args.topK_pids
with torch.no_grad():
queries_in_order = list(queries.values())
print_message(f"#> Encoding all {len(queries_in_order)} queries in batches...")
all_query_embeddings = inference.queryFromText(queries_in_order, bsize=512, to_cpu=True)
all_query_embeddings = all_query_embeddings.to(dtype=torch.float16).permute(0, 2, 1).contiguous()
for qid in queries:
"""
Since topK_pids is a defaultdict, make sure each qid *has* actual PID information (even if empty).
"""
assert qid in topK_pids, qid
all_pids = flatten([[(query_index, pid) for pid in topK_pids[qid]] for query_index, qid in enumerate(queries)])
all_query_rankings = [defaultdict(list), defaultdict(list)]
print_message(f"#> Will process {len(all_pids)} query--document pairs in total.")
with torch.no_grad():
score_by_range(positions, loaded_parts, all_query_embeddings, all_query_rankings, all_pids)
ranking_logger = RankingLogger(Run.path, qrels=None, log_scores=args.log_scores)
with ranking_logger.context('ranking.tsv', also_save_annotations=False) as rlogger:
with torch.no_grad():
for query_index, qid in enumerate(queries):
if query_index % 1000 == 0:
print_message("#> Logging query #{} (qid {}) now...".format(query_index, qid))
pids = all_query_rankings[0][query_index]
scores = all_query_rankings[1][query_index]
K = min(MAX_DEPTH_LOGGED, len(scores))
if K == 0:
continue
scores_topk = torch.tensor(scores).topk(K, largest=True, sorted=True)
pids, scores = torch.tensor(pids)[scores_topk.indices].tolist(), scores_topk.values.tolist()
ranking = [(score, pid, None) for pid, score in zip(pids, scores)]
assert len(ranking) <= MAX_DEPTH_LOGGED, (len(ranking), MAX_DEPTH_LOGGED)
rlogger.log(qid, ranking, is_ranked=True, print_positions=[1, 2] if query_index % 100 == 0 else [])
print('\n\n')
print(ranking_logger.filename)
print_message('#> Done.\n')
thread.join()
| 5,139 | 37.939394 | 115 | py |
ColBERT | ColBERT-master/colbert/ranking/index_ranker.py | import os
import math
import torch
import ujson
import traceback
from itertools import accumulate
from colbert.parameters import DEVICE
from colbert.utils.utils import print_message, dotdict, flatten
BSIZE = 1 << 14
class IndexRanker():
def __init__(self, tensor, doclens):
self.tensor = tensor
self.doclens = doclens
self.maxsim_dtype = torch.float32
self.doclens_pfxsum = [0] + list(accumulate(self.doclens))
self.doclens = torch.tensor(self.doclens)
self.doclens_pfxsum = torch.tensor(self.doclens_pfxsum)
self.dim = self.tensor.size(-1)
self.strides = [torch_percentile(self.doclens, p) for p in [90]]
self.strides.append(self.doclens.max().item())
self.strides = sorted(list(set(self.strides)))
print_message(f"#> Using strides {self.strides}..")
self.views = self._create_views(self.tensor)
self.buffers = self._create_buffers(BSIZE, self.tensor.dtype, {'cpu', 'cuda:0'})
def _create_views(self, tensor):
views = []
for stride in self.strides:
outdim = tensor.size(0) - stride + 1
view = torch.as_strided(tensor, (outdim, stride, self.dim), (self.dim, self.dim, 1))
views.append(view)
return views
def _create_buffers(self, max_bsize, dtype, devices):
buffers = {}
for device in devices:
buffers[device] = [torch.zeros(max_bsize, stride, self.dim, dtype=dtype,
device=device, pin_memory=(device == 'cpu'))
for stride in self.strides]
return buffers
def rank(self, Q, pids, views=None, shift=0):
assert len(pids) > 0
assert Q.size(0) in [1, len(pids)]
Q = Q.contiguous().to(DEVICE).to(dtype=self.maxsim_dtype)
views = self.views if views is None else views
VIEWS_DEVICE = views[0].device
D_buffers = self.buffers[str(VIEWS_DEVICE)]
raw_pids = pids if type(pids) is list else pids.tolist()
pids = torch.tensor(pids) if type(pids) is list else pids
doclens, offsets = self.doclens[pids], self.doclens_pfxsum[pids]
assignments = (doclens.unsqueeze(1) > torch.tensor(self.strides).unsqueeze(0) + 1e-6).sum(-1)
one_to_n = torch.arange(len(raw_pids))
output_pids, output_scores, output_permutation = [], [], []
for group_idx, stride in enumerate(self.strides):
locator = (assignments == group_idx)
if locator.sum() < 1e-5:
continue
group_pids, group_doclens, group_offsets = pids[locator], doclens[locator], offsets[locator]
group_Q = Q if Q.size(0) == 1 else Q[locator]
group_offsets = group_offsets.to(VIEWS_DEVICE) - shift
group_offsets_uniq, group_offsets_expand = torch.unique_consecutive(group_offsets, return_inverse=True)
D_size = group_offsets_uniq.size(0)
D = torch.index_select(views[group_idx], 0, group_offsets_uniq, out=D_buffers[group_idx][:D_size])
D = D.to(DEVICE)
D = D[group_offsets_expand.to(DEVICE)].to(dtype=self.maxsim_dtype)
mask = torch.arange(stride, device=DEVICE) + 1
mask = mask.unsqueeze(0) <= group_doclens.to(DEVICE).unsqueeze(-1)
scores = (D @ group_Q) * mask.unsqueeze(-1)
scores = scores.max(1).values.sum(-1).cpu()
output_pids.append(group_pids)
output_scores.append(scores)
output_permutation.append(one_to_n[locator])
output_permutation = torch.cat(output_permutation).sort().indices
output_pids = torch.cat(output_pids)[output_permutation].tolist()
output_scores = torch.cat(output_scores)[output_permutation].tolist()
assert len(raw_pids) == len(output_pids)
assert len(raw_pids) == len(output_scores)
assert raw_pids == output_pids
return output_scores
def batch_rank(self, all_query_embeddings, all_query_indexes, all_pids, sorted_pids):
assert sorted_pids is True
######
scores = []
range_start, range_end = 0, 0
for pid_offset in range(0, len(self.doclens), 50_000):
pid_endpos = min(pid_offset + 50_000, len(self.doclens))
range_start = range_start + (all_pids[range_start:] < pid_offset).sum()
range_end = range_end + (all_pids[range_end:] < pid_endpos).sum()
pids = all_pids[range_start:range_end]
query_indexes = all_query_indexes[range_start:range_end]
print_message(f"###--> Got {len(pids)} query--passage pairs in this sub-range {(pid_offset, pid_endpos)}.")
if len(pids) == 0:
continue
print_message(f"###--> Ranking in batches the pairs #{range_start} through #{range_end} in this sub-range.")
tensor_offset = self.doclens_pfxsum[pid_offset].item()
tensor_endpos = self.doclens_pfxsum[pid_endpos].item() + 512
collection = self.tensor[tensor_offset:tensor_endpos].to(DEVICE)
views = self._create_views(collection)
print_message(f"#> Ranking in batches of {BSIZE} query--passage pairs...")
for batch_idx, offset in enumerate(range(0, len(pids), BSIZE)):
if batch_idx % 100 == 0:
print_message("#> Processing batch #{}..".format(batch_idx))
endpos = offset + BSIZE
batch_query_index, batch_pids = query_indexes[offset:endpos], pids[offset:endpos]
Q = all_query_embeddings[batch_query_index]
scores.extend(self.rank(Q, batch_pids, views, shift=tensor_offset))
return scores
def torch_percentile(tensor, p):
assert p in range(1, 100+1)
assert tensor.dim() == 1
return tensor.kthvalue(int(p * tensor.size(0) / 100.0)).values.item()
| 5,952 | 35.078788 | 120 | py |
ColBERT | ColBERT-master/colbert/ranking/retrieval.py | import os
import time
import faiss
import random
import torch
import itertools
from colbert.utils.runs import Run
from multiprocessing import Pool
from colbert.modeling.inference import ModelInference
from colbert.evaluation.ranking_logger import RankingLogger
from colbert.utils.utils import print_message, batch
from colbert.ranking.rankers import Ranker
def retrieve(args):
inference = ModelInference(args.colbert, amp=args.amp)
ranker = Ranker(args, inference, faiss_depth=args.faiss_depth)
ranking_logger = RankingLogger(Run.path, qrels=None)
milliseconds = 0
with ranking_logger.context('ranking.tsv', also_save_annotations=False) as rlogger:
queries = args.queries
qids_in_order = list(queries.keys())
for qoffset, qbatch in batch(qids_in_order, 100, provide_offset=True):
qbatch_text = [queries[qid] for qid in qbatch]
rankings = []
for query_idx, q in enumerate(qbatch_text):
torch.cuda.synchronize('cuda:0')
s = time.time()
Q = ranker.encode([q])
pids, scores = ranker.rank(Q)
torch.cuda.synchronize()
milliseconds += (time.time() - s) * 1000.0
if len(pids):
print(qoffset+query_idx, q, len(scores), len(pids), scores[0], pids[0],
milliseconds / (qoffset+query_idx+1), 'ms')
rankings.append(zip(pids, scores))
for query_idx, (qid, ranking) in enumerate(zip(qbatch, rankings)):
query_idx = qoffset + query_idx
if query_idx % 100 == 0:
print_message(f"#> Logging query #{query_idx} (qid {qid}) now...")
ranking = [(score, pid, None) for pid, score in itertools.islice(ranking, args.depth)]
rlogger.log(qid, ranking, is_ranked=True)
print('\n\n')
print(ranking_logger.filename)
print("#> Done.")
print('\n\n')
| 2,000 | 31.274194 | 102 | py |
ColBERT | ColBERT-master/colbert/ranking/reranking.py | import os
import time
import faiss
import random
import torch
from colbert.utils.runs import Run
from multiprocessing import Pool
from colbert.modeling.inference import ModelInference
from colbert.evaluation.ranking_logger import RankingLogger
from colbert.utils.utils import print_message, batch
from colbert.ranking.rankers import Ranker
def rerank(args):
inference = ModelInference(args.colbert, amp=args.amp)
ranker = Ranker(args, inference, faiss_depth=None)
ranking_logger = RankingLogger(Run.path, qrels=None)
milliseconds = 0
with ranking_logger.context('ranking.tsv', also_save_annotations=False) as rlogger:
queries = args.queries
qids_in_order = list(queries.keys())
for qoffset, qbatch in batch(qids_in_order, 100, provide_offset=True):
qbatch_text = [queries[qid] for qid in qbatch]
qbatch_pids = [args.topK_pids[qid] for qid in qbatch]
rankings = []
for query_idx, (q, pids) in enumerate(zip(qbatch_text, qbatch_pids)):
torch.cuda.synchronize('cuda:0')
s = time.time()
Q = ranker.encode([q])
pids, scores = ranker.rank(Q, pids=pids)
torch.cuda.synchronize()
milliseconds += (time.time() - s) * 1000.0
if len(pids):
print(qoffset+query_idx, q, len(scores), len(pids), scores[0], pids[0],
milliseconds / (qoffset+query_idx+1), 'ms')
rankings.append(zip(pids, scores))
for query_idx, (qid, ranking) in enumerate(zip(qbatch, rankings)):
query_idx = qoffset + query_idx
if query_idx % 100 == 0:
print_message(f"#> Logging query #{query_idx} (qid {qid}) now...")
ranking = [(score, pid, None) for pid, score in ranking]
rlogger.log(qid, ranking, is_ranked=True)
print('\n\n')
print(ranking_logger.filename)
print("#> Done.")
print('\n\n')
| 2,042 | 31.951613 | 91 | py |
ColBERT | ColBERT-master/colbert/ranking/faiss_index.py | import os
import time
import faiss
import random
import torch
from multiprocessing import Pool
from colbert.modeling.inference import ModelInference
from colbert.utils.utils import print_message, flatten, batch
from colbert.indexing.loaders import load_doclens
class FaissIndex():
def __init__(self, index_path, faiss_index_path, nprobe, part_range=None):
print_message("#> Loading the FAISS index from", faiss_index_path, "..")
faiss_part_range = os.path.basename(faiss_index_path).split('.')[-2].split('-')
if len(faiss_part_range) == 2:
faiss_part_range = range(*map(int, faiss_part_range))
assert part_range[0] in faiss_part_range, (part_range, faiss_part_range)
assert part_range[-1] in faiss_part_range, (part_range, faiss_part_range)
else:
faiss_part_range = None
self.part_range = part_range
self.faiss_part_range = faiss_part_range
self.faiss_index = faiss.read_index(faiss_index_path)
self.faiss_index.nprobe = nprobe
print_message("#> Building the emb2pid mapping..")
all_doclens = load_doclens(index_path, flatten=False)
pid_offset = 0
if faiss_part_range is not None:
print(f"#> Restricting all_doclens to the range {faiss_part_range}.")
pid_offset = len(flatten(all_doclens[:faiss_part_range.start]))
all_doclens = all_doclens[faiss_part_range.start:faiss_part_range.stop]
self.relative_range = None
if self.part_range is not None:
start = self.faiss_part_range.start if self.faiss_part_range is not None else 0
a = len(flatten(all_doclens[:self.part_range.start - start]))
b = len(flatten(all_doclens[:self.part_range.stop - start]))
self.relative_range = range(a, b)
print(f"self.relative_range = {self.relative_range}")
all_doclens = flatten(all_doclens)
total_num_embeddings = sum(all_doclens)
self.emb2pid = torch.zeros(total_num_embeddings, dtype=torch.int)
offset_doclens = 0
for pid, dlength in enumerate(all_doclens):
self.emb2pid[offset_doclens: offset_doclens + dlength] = pid_offset + pid
offset_doclens += dlength
print_message("len(self.emb2pid) =", len(self.emb2pid))
self.parallel_pool = Pool(16)
def retrieve(self, faiss_depth, Q, verbose=False):
embedding_ids = self.queries_to_embedding_ids(faiss_depth, Q, verbose=verbose)
pids = self.embedding_ids_to_pids(embedding_ids, verbose=verbose)
if self.relative_range is not None:
pids = [[pid for pid in pids_ if pid in self.relative_range] for pids_ in pids]
return pids
def queries_to_embedding_ids(self, faiss_depth, Q, verbose=True):
# Flatten into a matrix for the faiss search.
num_queries, embeddings_per_query, dim = Q.size()
Q_faiss = Q.view(num_queries * embeddings_per_query, dim).cpu().contiguous()
# Search in large batches with faiss.
print_message("#> Search in batches with faiss. \t\t",
f"Q.size() = {Q.size()}, Q_faiss.size() = {Q_faiss.size()}",
condition=verbose)
embeddings_ids = []
faiss_bsize = embeddings_per_query * 5000
for offset in range(0, Q_faiss.size(0), faiss_bsize):
endpos = min(offset + faiss_bsize, Q_faiss.size(0))
print_message("#> Searching from {} to {}...".format(offset, endpos), condition=verbose)
some_Q_faiss = Q_faiss[offset:endpos].float().numpy()
_, some_embedding_ids = self.faiss_index.search(some_Q_faiss, faiss_depth)
embeddings_ids.append(torch.from_numpy(some_embedding_ids))
embedding_ids = torch.cat(embeddings_ids)
# Reshape to (number of queries, non-unique embedding IDs per query)
embedding_ids = embedding_ids.view(num_queries, embeddings_per_query * embedding_ids.size(1))
return embedding_ids
def embedding_ids_to_pids(self, embedding_ids, verbose=True):
# Find unique PIDs per query.
print_message("#> Lookup the PIDs..", condition=verbose)
all_pids = self.emb2pid[embedding_ids]
print_message(f"#> Converting to a list [shape = {all_pids.size()}]..", condition=verbose)
all_pids = all_pids.tolist()
print_message("#> Removing duplicates (in parallel if large enough)..", condition=verbose)
if len(all_pids) > 5000:
all_pids = list(self.parallel_pool.map(uniq, all_pids))
else:
all_pids = list(map(uniq, all_pids))
print_message("#> Done with embedding_ids_to_pids().", condition=verbose)
return all_pids
def uniq(l):
return list(set(l))
| 4,820 | 38.195122 | 101 | py |
ColBERT | ColBERT-master/colbert/ranking/rankers.py | import torch
from functools import partial
from colbert.ranking.index_part import IndexPart
from colbert.ranking.faiss_index import FaissIndex
from colbert.utils.utils import flatten, zipstar
class Ranker():
def __init__(self, args, inference, faiss_depth=1024):
self.inference = inference
self.faiss_depth = faiss_depth
if faiss_depth is not None:
self.faiss_index = FaissIndex(args.index_path, args.faiss_index_path, args.nprobe, part_range=args.part_range)
self.retrieve = partial(self.faiss_index.retrieve, self.faiss_depth)
self.index = IndexPart(args.index_path, dim=inference.colbert.dim, part_range=args.part_range, verbose=True)
def encode(self, queries):
assert type(queries) in [list, tuple], type(queries)
Q = self.inference.queryFromText(queries, bsize=512 if len(queries) > 512 else None)
return Q
def rank(self, Q, pids=None):
pids = self.retrieve(Q, verbose=False)[0] if pids is None else pids
assert type(pids) in [list, tuple], type(pids)
assert Q.size(0) == 1, (len(pids), Q.size())
assert all(type(pid) is int for pid in pids)
scores = []
if len(pids) > 0:
Q = Q.permute(0, 2, 1)
scores = self.index.rank(Q, pids)
scores_sorter = torch.tensor(scores).sort(descending=True)
pids, scores = torch.tensor(pids)[scores_sorter.indices].tolist(), scores_sorter.values.tolist()
return pids, scores
| 1,520 | 33.568182 | 122 | py |
ColBERT | ColBERT-master/colbert/modeling/inference.py | import torch
from colbert.modeling.colbert import ColBERT
from colbert.modeling.tokenization import QueryTokenizer, DocTokenizer
from colbert.utils.amp import MixedPrecisionManager
from colbert.parameters import DEVICE
class ModelInference():
def __init__(self, colbert: ColBERT, amp=False):
assert colbert.training is False
self.colbert = colbert
self.query_tokenizer = QueryTokenizer(colbert.query_maxlen)
self.doc_tokenizer = DocTokenizer(colbert.doc_maxlen)
self.amp_manager = MixedPrecisionManager(amp)
def query(self, *args, to_cpu=False, **kw_args):
with torch.no_grad():
with self.amp_manager.context():
Q = self.colbert.query(*args, **kw_args)
return Q.cpu() if to_cpu else Q
def doc(self, *args, to_cpu=False, **kw_args):
with torch.no_grad():
with self.amp_manager.context():
D = self.colbert.doc(*args, **kw_args)
return D.cpu() if to_cpu else D
def queryFromText(self, queries, bsize=None, to_cpu=False):
if bsize:
batches = self.query_tokenizer.tensorize(queries, bsize=bsize)
batches = [self.query(input_ids, attention_mask, to_cpu=to_cpu) for input_ids, attention_mask in batches]
return torch.cat(batches)
input_ids, attention_mask = self.query_tokenizer.tensorize(queries)
return self.query(input_ids, attention_mask)
def docFromText(self, docs, bsize=None, keep_dims=True, to_cpu=False):
if bsize:
batches, reverse_indices = self.doc_tokenizer.tensorize(docs, bsize=bsize)
batches = [self.doc(input_ids, attention_mask, keep_dims=keep_dims, to_cpu=to_cpu)
for input_ids, attention_mask in batches]
if keep_dims:
D = _stack_3D_tensors(batches)
return D[reverse_indices]
D = [d for batch in batches for d in batch]
return [D[idx] for idx in reverse_indices.tolist()]
input_ids, attention_mask = self.doc_tokenizer.tensorize(docs)
return self.doc(input_ids, attention_mask, keep_dims=keep_dims)
def score(self, Q, D, mask=None, lengths=None, explain=False):
if lengths is not None:
assert mask is None, "don't supply both mask and lengths"
mask = torch.arange(D.size(1), device=DEVICE) + 1
mask = mask.unsqueeze(0) <= lengths.to(DEVICE).unsqueeze(-1)
scores = (D @ Q)
scores = scores if mask is None else scores * mask.unsqueeze(-1)
scores = scores.max(1)
if explain:
assert False, "TODO"
return scores.values.sum(-1).cpu()
def _stack_3D_tensors(groups):
bsize = sum([x.size(0) for x in groups])
maxlen = max([x.size(1) for x in groups])
hdim = groups[0].size(2)
output = torch.zeros(bsize, maxlen, hdim, device=groups[0].device, dtype=groups[0].dtype)
offset = 0
for x in groups:
endpos = offset + x.size(0)
output[offset:endpos, :x.size(1)] = x
offset = endpos
return output
| 3,132 | 34.602273 | 117 | py |
ColBERT | ColBERT-master/colbert/modeling/colbert.py | import string
import torch
import torch.nn as nn
from transformers import BertPreTrainedModel, BertModel, BertTokenizerFast
from colbert.parameters import DEVICE
class ColBERT(BertPreTrainedModel):
def __init__(self, config, query_maxlen, doc_maxlen, mask_punctuation, dim=128, similarity_metric='cosine'):
super(ColBERT, self).__init__(config)
self.query_maxlen = query_maxlen
self.doc_maxlen = doc_maxlen
self.similarity_metric = similarity_metric
self.dim = dim
self.mask_punctuation = mask_punctuation
self.skiplist = {}
if self.mask_punctuation:
self.tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
self.skiplist = {w: True
for symbol in string.punctuation
for w in [symbol, self.tokenizer.encode(symbol, add_special_tokens=False)[0]]}
self.bert = BertModel(config)
self.linear = nn.Linear(config.hidden_size, dim, bias=False)
self.init_weights()
def forward(self, Q, D):
return self.score(self.query(*Q), self.doc(*D))
def query(self, input_ids, attention_mask):
input_ids, attention_mask = input_ids.to(DEVICE), attention_mask.to(DEVICE)
Q = self.bert(input_ids, attention_mask=attention_mask)[0]
Q = self.linear(Q)
return torch.nn.functional.normalize(Q, p=2, dim=2)
def doc(self, input_ids, attention_mask, keep_dims=True):
input_ids, attention_mask = input_ids.to(DEVICE), attention_mask.to(DEVICE)
D = self.bert(input_ids, attention_mask=attention_mask)[0]
D = self.linear(D)
mask = torch.tensor(self.mask(input_ids), device=DEVICE).unsqueeze(2).float()
D = D * mask
D = torch.nn.functional.normalize(D, p=2, dim=2)
if not keep_dims:
D, mask = D.cpu().to(dtype=torch.float16), mask.cpu().bool().squeeze(-1)
D = [d[mask[idx]] for idx, d in enumerate(D)]
return D
def score(self, Q, D):
if self.similarity_metric == 'cosine':
return (Q @ D.permute(0, 2, 1)).max(2).values.sum(1)
assert self.similarity_metric == 'l2'
return (-1.0 * ((Q.unsqueeze(2) - D.unsqueeze(1))**2).sum(-1)).max(-1).values.sum(-1)
def mask(self, input_ids):
mask = [[(x not in self.skiplist) and (x != 0) for x in d] for d in input_ids.cpu().tolist()]
return mask
| 2,458 | 34.637681 | 112 | py |