repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
TiKick | TiKick-main/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
import os
from setuptools import setup, find_packages
import setuptools
def get_version() -> str:
# https://packaging.python.org/guides/single-sourcing-package-version/
init = open(os.path.join("tmarl", "__init__.py"), "r").read().split()
return init[init.index("__version__") + 2][1:-1]
setup(
name="tmarl", # Replace with your own username
version=get_version(),
description="marl algorithms",
long_description=open("README.md", encoding="utf8").read(),
long_description_content_type="text/markdown",
author="tmarl",
author_email="[email protected]",
packages=setuptools.find_packages(),
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache License",
"Operating System :: OS Independent",
],
keywords="multi-agent reinforcement learning algorithms pytorch",
python_requires='>=3.6',
)
| 1,788 | 35.510204 | 74 | py |
TiKick | TiKick-main/tmarl/networks/policy_network.py |
import torch
import torch.nn as nn
from tmarl.networks.utils.util import init, check
from tmarl.networks.utils.mlp import MLPBase, MLPLayer
from tmarl.networks.utils.rnn import RNNLayer
from tmarl.networks.utils.act import ACTLayer
from tmarl.networks.utils.popart import PopArt
from tmarl.utils.util import get_shape_from_obs_space
# networks are defined here
class PolicyNetwork(nn.Module):
def __init__(self, args, obs_space, action_space, device=torch.device("cpu")):
super(PolicyNetwork, self).__init__()
self.hidden_size = args.hidden_size
self._gain = args.gain
self._use_orthogonal = args.use_orthogonal
self._activation_id = args.activation_id
self._use_policy_active_masks = args.use_policy_active_masks
self._use_naive_recurrent_policy = args.use_naive_recurrent_policy
self._use_recurrent_policy = args.use_recurrent_policy
self._use_influence_policy = args.use_influence_policy
self._influence_layer_N = args.influence_layer_N
self._use_policy_vhead = args.use_policy_vhead
self._recurrent_N = args.recurrent_N
self.tpdv = dict(dtype=torch.float32, device=device)
obs_shape = get_shape_from_obs_space(obs_space)
self._mixed_obs = False
self.base = MLPBase(args, obs_shape, use_attn_internal=False, use_cat_self=True)
input_size = self.base.output_size
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
self.rnn = RNNLayer(input_size, self.hidden_size, self._recurrent_N, self._use_orthogonal)
input_size = self.hidden_size
if self._use_influence_policy:
self.mlp = MLPLayer(obs_shape[0], self.hidden_size,
self._influence_layer_N, self._use_orthogonal, self._activation_id)
input_size += self.hidden_size
self.act = ACTLayer(action_space, input_size, self._use_orthogonal, self._gain)
if self._use_policy_vhead:
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][self._use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0))
if self._use_popart:
self.v_out = init_(PopArt(input_size, 1, device=device))
else:
self.v_out = init_(nn.Linear(input_size, 1))
self.to(device)
def forward(self, obs, rnn_states, masks, available_actions=None, deterministic=False):
if self._mixed_obs:
for key in obs.keys():
obs[key] = check(obs[key]).to(**self.tpdv)
else:
obs = check(obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
if available_actions is not None:
available_actions = check(available_actions).to(**self.tpdv)
actor_features = self.base(obs)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)
if self._use_influence_policy:
mlp_obs = self.mlp(obs)
actor_features = torch.cat([actor_features, mlp_obs], dim=1)
actions, action_log_probs = self.act(actor_features, available_actions, deterministic)
return actions, action_log_probs, rnn_states
def evaluate_actions(self, obs, rnn_states, action, masks, available_actions=None, active_masks=None):
if self._mixed_obs:
for key in obs.keys():
obs[key] = check(obs[key]).to(**self.tpdv)
else:
obs = check(obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
action = check(action).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
if available_actions is not None:
available_actions = check(available_actions).to(**self.tpdv)
if active_masks is not None:
active_masks = check(active_masks).to(**self.tpdv)
actor_features = self.base(obs)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)
if self._use_influence_policy:
mlp_obs = self.mlp(obs)
actor_features = torch.cat([actor_features, mlp_obs], dim=1)
action_log_probs, dist_entropy = self.act.evaluate_actions(actor_features, action, available_actions, active_masks = active_masks if self._use_policy_active_masks else None)
values = self.v_out(actor_features) if self._use_policy_vhead else None
return action_log_probs, dist_entropy, values
def get_policy_values(self, obs, rnn_states, masks):
if self._mixed_obs:
for key in obs.keys():
obs[key] = check(obs[key]).to(**self.tpdv)
else:
obs = check(obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
actor_features = self.base(obs)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)
if self._use_influence_policy:
mlp_obs = self.mlp(obs)
actor_features = torch.cat([actor_features, mlp_obs], dim=1)
values = self.v_out(actor_features)
return values | 5,558 | 41.113636 | 181 | py |
TiKick | TiKick-main/tmarl/networks/utils/distributions.py | import torch
import torch.nn as nn
from .util import init
"""
Modify standard PyTorch distributions so they are compatible with this code.
"""
#
# Standardize distribution interfaces
#
# Categorical
class FixedCategorical(torch.distributions.Categorical):
def sample(self):
return super().sample().unsqueeze(-1)
def log_probs(self, actions):
return (
super()
.log_prob(actions.squeeze(-1))
.view(actions.size(0), -1)
.sum(-1)
.unsqueeze(-1)
)
def mode(self):
return self.probs.argmax(dim=-1, keepdim=True)
# Normal
class FixedNormal(torch.distributions.Normal):
def log_probs(self, actions):
return super().log_prob(actions).sum(-1, keepdim=True)
def entrop(self):
return super.entropy().sum(-1)
def mode(self):
return self.mean
# Bernoulli
class FixedBernoulli(torch.distributions.Bernoulli):
def log_probs(self, actions):
return super.log_prob(actions).view(actions.size(0), -1).sum(-1).unsqueeze(-1)
def entropy(self):
return super().entropy().sum(-1)
def mode(self):
return torch.gt(self.probs, 0.5).float()
class Categorical(nn.Module):
def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01):
super(Categorical, self).__init__()
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)
self.linear = init_(nn.Linear(num_inputs, num_outputs))
def forward(self, x, available_actions=None):
x = self.linear(x)
if available_actions is not None:
x[available_actions == 0] = -1e10
return FixedCategorical(logits=x)
class DiagGaussian(nn.Module):
def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01):
super(DiagGaussian, self).__init__()
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)
self.fc_mean = init_(nn.Linear(num_inputs, num_outputs))
self.logstd = AddBias(torch.zeros(num_outputs))
def forward(self, x):
action_mean = self.fc_mean(x)
# An ugly hack for my KFAC implementation.
zeros = torch.zeros(action_mean.size())
if x.is_cuda:
zeros = zeros.cuda()
action_logstd = self.logstd(zeros)
return FixedNormal(action_mean, action_logstd.exp())
class Bernoulli(nn.Module):
def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01):
super(Bernoulli, self).__init__()
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)
self.linear = init_(nn.Linear(num_inputs, num_outputs))
def forward(self, x):
x = self.linear(x)
return FixedBernoulli(logits=x)
class AddBias(nn.Module):
def __init__(self, bias):
super(AddBias, self).__init__()
self._bias = nn.Parameter(bias.unsqueeze(1))
def forward(self, x):
if x.dim() == 2:
bias = self._bias.t().view(1, -1)
else:
bias = self._bias.t().view(1, -1, 1, 1)
return x + bias
| 3,466 | 27.891667 | 86 | py |
TiKick | TiKick-main/tmarl/networks/utils/mlp.py |
import torch.nn as nn
from .util import init, get_clones
class MLPLayer(nn.Module):
def __init__(self, input_dim, hidden_size, layer_N, use_orthogonal, activation_id):
super(MLPLayer, self).__init__()
self._layer_N = layer_N
active_func = [nn.Tanh(), nn.ReLU(), nn.LeakyReLU(), nn.ELU()][activation_id]
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
gain = nn.init.calculate_gain(['tanh', 'relu', 'leaky_relu', 'leaky_relu'][activation_id])
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain=gain)
self.fc1 = nn.Sequential(
init_(nn.Linear(input_dim, hidden_size)), active_func, nn.LayerNorm(hidden_size))
self.fc_h = nn.Sequential(init_(
nn.Linear(hidden_size, hidden_size)), active_func, nn.LayerNorm(hidden_size))
self.fc2 = get_clones(self.fc_h, self._layer_N)
def forward(self, x):
x = self.fc1(x)
for i in range(self._layer_N):
x = self.fc2[i](x)
return x
class MLPBase(nn.Module):
def __init__(self, args, obs_shape, use_attn_internal=False, use_cat_self=True):
super(MLPBase, self).__init__()
self._use_feature_normalization = args.use_feature_normalization
self._use_orthogonal = args.use_orthogonal
self._activation_id = args.activation_id
self._use_conv1d = args.use_conv1d
self._stacked_frames = args.stacked_frames
self._layer_N = args.layer_N
self.hidden_size = args.hidden_size
obs_dim = obs_shape[0]
inputs_dim = obs_dim
if self._use_feature_normalization:
self.feature_norm = nn.LayerNorm(obs_dim)
self.mlp = MLPLayer(inputs_dim, self.hidden_size,
self._layer_N, self._use_orthogonal, self._activation_id)
def forward(self, x):
if self._use_feature_normalization:
x = self.feature_norm(x)
x = self.mlp(x)
return x
@property
def output_size(self):
return self.hidden_size | 2,116 | 32.603175 | 98 | py |
TiKick | TiKick-main/tmarl/networks/utils/popart.py | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class PopArt(torch.nn.Module):
def __init__(self, input_shape, output_shape, norm_axes=1, beta=0.99999, epsilon=1e-5, device=torch.device("cpu")):
super(PopArt, self).__init__()
self.beta = beta
self.epsilon = epsilon
self.norm_axes = norm_axes
self.tpdv = dict(dtype=torch.float32, device=device)
self.input_shape = input_shape
self.output_shape = output_shape
self.weight = nn.Parameter(torch.Tensor(output_shape, input_shape)).to(**self.tpdv)
self.bias = nn.Parameter(torch.Tensor(output_shape)).to(**self.tpdv)
self.stddev = nn.Parameter(torch.ones(output_shape), requires_grad=False).to(**self.tpdv)
self.mean = nn.Parameter(torch.zeros(output_shape), requires_grad=False).to(**self.tpdv)
self.mean_sq = nn.Parameter(torch.zeros(output_shape), requires_grad=False).to(**self.tpdv)
self.debiasing_term = nn.Parameter(torch.tensor(0.0), requires_grad=False).to(**self.tpdv)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(self.bias, -bound, bound)
self.mean.zero_()
self.mean_sq.zero_()
self.debiasing_term.zero_()
def forward(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
return F.linear(input_vector, self.weight, self.bias)
@torch.no_grad()
def update(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
old_mean, old_stddev = self.mean, self.stddev
batch_mean = input_vector.mean(dim=tuple(range(self.norm_axes)))
batch_sq_mean = (input_vector ** 2).mean(dim=tuple(range(self.norm_axes)))
self.mean.mul_(self.beta).add_(batch_mean * (1.0 - self.beta))
self.mean_sq.mul_(self.beta).add_(batch_sq_mean * (1.0 - self.beta))
self.debiasing_term.mul_(self.beta).add_(1.0 * (1.0 - self.beta))
self.stddev = (self.mean_sq - self.mean ** 2).sqrt().clamp(min=1e-4)
self.weight = self.weight * old_stddev / self.stddev
self.bias = (old_stddev * self.bias + old_mean - self.mean) / self.stddev
def debiased_mean_var(self):
debiased_mean = self.mean / self.debiasing_term.clamp(min=self.epsilon)
debiased_mean_sq = self.mean_sq / self.debiasing_term.clamp(min=self.epsilon)
debiased_var = (debiased_mean_sq - debiased_mean ** 2).clamp(min=1e-2)
return debiased_mean, debiased_var
def normalize(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
mean, var = self.debiased_mean_var()
out = (input_vector - mean[(None,) * self.norm_axes]) / torch.sqrt(var)[(None,) * self.norm_axes]
return out
def denormalize(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
mean, var = self.debiased_mean_var()
out = input_vector * torch.sqrt(var)[(None,) * self.norm_axes] + mean[(None,) * self.norm_axes]
out = out.cpu().numpy()
return out
| 3,796 | 38.968421 | 119 | py |
TiKick | TiKick-main/tmarl/networks/utils/util.py |
import copy
import numpy as np
import torch
import torch.nn as nn
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def check(input):
output = torch.from_numpy(input) if type(input) == np.ndarray else input
return output
| 426 | 21.473684 | 76 | py |
TiKick | TiKick-main/tmarl/networks/utils/act.py |
from .distributions import Bernoulli, Categorical, DiagGaussian
import torch
import torch.nn as nn
class ACTLayer(nn.Module):
def __init__(self, action_space, inputs_dim, use_orthogonal, gain):
super(ACTLayer, self).__init__()
self.multidiscrete_action = False
self.continuous_action = False
self.mixed_action = False
if action_space.__class__.__name__ == "Discrete":
action_dim = action_space.n
self.action_out = Categorical(inputs_dim, action_dim, use_orthogonal, gain)
elif action_space.__class__.__name__ == "Box":
self.continuous_action = True
action_dim = action_space.shape[0]
self.action_out = DiagGaussian(inputs_dim, action_dim, use_orthogonal, gain)
elif action_space.__class__.__name__ == "MultiBinary":
action_dim = action_space.shape[0]
self.action_out = Bernoulli(inputs_dim, action_dim, use_orthogonal, gain)
elif action_space.__class__.__name__ == "MultiDiscrete":
self.multidiscrete_action = True
action_dims = action_space.high - action_space.low + 1
self.action_outs = []
for action_dim in action_dims:
self.action_outs.append(Categorical(inputs_dim, action_dim, use_orthogonal, gain))
self.action_outs = nn.ModuleList(self.action_outs)
else: # discrete + continous
self.mixed_action = True
continous_dim = action_space[0].shape[0]
discrete_dim = action_space[1].n
self.action_outs = nn.ModuleList([DiagGaussian(inputs_dim, continous_dim, use_orthogonal, gain), Categorical(
inputs_dim, discrete_dim, use_orthogonal, gain)])
def forward(self, x, available_actions=None, deterministic=False):
if self.mixed_action :
actions = []
action_log_probs = []
for action_out in self.action_outs:
action_logit = action_out(x)
action = action_logit.mode() if deterministic else action_logit.sample()
action_log_prob = action_logit.log_probs(action)
actions.append(action.float())
action_log_probs.append(action_log_prob)
actions = torch.cat(actions, -1)
action_log_probs = torch.sum(torch.cat(action_log_probs, -1), -1, keepdim=True)
elif self.multidiscrete_action:
actions = []
action_log_probs = []
for action_out in self.action_outs:
action_logit = action_out(x)
action = action_logit.mode() if deterministic else action_logit.sample()
action_log_prob = action_logit.log_probs(action)
actions.append(action)
action_log_probs.append(action_log_prob)
actions = torch.cat(actions, -1)
action_log_probs = torch.cat(action_log_probs, -1)
elif self.continuous_action:
action_logits = self.action_out(x)
actions = action_logits.mode() if deterministic else action_logits.sample()
action_log_probs = action_logits.log_probs(actions)
else:
action_logits = self.action_out(x, available_actions)
actions = action_logits.mode() if deterministic else action_logits.sample()
action_log_probs = action_logits.log_probs(actions)
return actions, action_log_probs
def get_probs(self, x, available_actions=None):
if self.mixed_action or self.multidiscrete_action:
action_probs = []
for action_out in self.action_outs:
action_logit = action_out(x)
action_prob = action_logit.probs
action_probs.append(action_prob)
action_probs = torch.cat(action_probs, -1)
elif self.continuous_action:
action_logits = self.action_out(x)
action_probs = action_logits.probs
else:
action_logits = self.action_out(x, available_actions)
action_probs = action_logits.probs
return action_probs
def get_log_1mp(self, x, action, available_actions=None, active_masks=None):
action_logits = self.action_out(x, available_actions)
action_prob = torch.gather(action_logits.probs, 1, action.long())
action_prob = torch.clamp(action_prob, 0, 1-1e-6)
action_log_1mp = torch.log(1 - action_prob)
return action_log_1mp
def evaluate_actions(self, x, action, available_actions=None, active_masks=None):
if self.mixed_action:
a, b = action.split((2, 1), -1)
b = b.long()
action = [a, b]
action_log_probs = []
dist_entropy = []
for action_out, act in zip(self.action_outs, action):
action_logit = action_out(x)
action_log_probs.append(action_logit.log_probs(act))
if active_masks is not None:
if len(action_logit.entropy().shape) == len(active_masks.shape):
dist_entropy.append((action_logit.entropy() * active_masks).sum()/active_masks.sum())
else:
dist_entropy.append((action_logit.entropy() * active_masks.squeeze(-1)).sum()/active_masks.sum())
else:
dist_entropy.append(action_logit.entropy().mean())
action_log_probs = torch.sum(torch.cat(action_log_probs, -1), -1, keepdim=True)
dist_entropy = dist_entropy[0] * 0.0025 + dist_entropy[1] * 0.01
elif self.multidiscrete_action:
action = torch.transpose(action, 0, 1)
action_log_probs = []
dist_entropy = []
for action_out, act in zip(self.action_outs, action):
action_logit = action_out(x)
action_log_probs.append(action_logit.log_probs(act))
if active_masks is not None:
dist_entropy.append((action_logit.entropy()*active_masks.squeeze(-1)).sum()/active_masks.sum())
else:
dist_entropy.append(action_logit.entropy().mean())
action_log_probs = torch.cat(action_log_probs, -1) # ! could be wrong
dist_entropy = torch.tensor(dist_entropy).mean()
elif self.continuous_action:
action_logits = self.action_out(x)
action_log_probs = action_logits.log_probs(action)
if active_masks is not None:
dist_entropy = (action_logits.entropy()*active_masks).sum()/active_masks.sum()
else:
dist_entropy = action_logits.entropy().mean()
else:
action_logits = self.action_out(x, available_actions)
action_log_probs = action_logits.log_probs(action)
if active_masks is not None:
dist_entropy = (action_logits.entropy()*active_masks.squeeze(-1)).sum()/active_masks.sum()
else:
dist_entropy = action_logits.entropy().mean()
return action_log_probs, dist_entropy | 7,195 | 46.342105 | 121 | py |
TiKick | TiKick-main/tmarl/networks/utils/rnn.py |
import torch
import torch.nn as nn
class RNNLayer(nn.Module):
def __init__(self, inputs_dim, outputs_dim, recurrent_N, use_orthogonal):
super(RNNLayer, self).__init__()
self._recurrent_N = recurrent_N
self._use_orthogonal = use_orthogonal
self.rnn = nn.GRU(inputs_dim, outputs_dim, num_layers=self._recurrent_N)
for name, param in self.rnn.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0)
elif 'weight' in name:
if self._use_orthogonal:
nn.init.orthogonal_(param)
else:
nn.init.xavier_uniform_(param)
self.norm = nn.LayerNorm(outputs_dim)
def forward(self, x, hxs, masks):
if x.size(0) == hxs.size(0):
x, hxs = self.rnn(x.unsqueeze(0), (hxs * masks.repeat(1, self._recurrent_N).unsqueeze(-1)).transpose(0, 1).contiguous())
x = x.squeeze(0)
hxs = hxs.transpose(0, 1)
else:
# x is a (T, N, -1) tensor that has been flatten to (T * N, -1)
N = hxs.size(0)
T = int(x.size(0) / N)
# unflatten
x = x.view(T, N, x.size(1))
# Same deal with masks
masks = masks.view(T, N)
# Let's figure out which steps in the sequence have a zero for any agent
# We will always assume t=0 has a zero in it as that makes the logic cleaner
has_zeros = ((masks[1:] == 0.0)
.any(dim=-1)
.nonzero()
.squeeze()
.cpu())
# +1 to correct the masks[1:]
if has_zeros.dim() == 0:
# Deal with scalar
has_zeros = [has_zeros.item() + 1]
else:
has_zeros = (has_zeros + 1).numpy().tolist()
# add t=0 and t=T to the list
has_zeros = [0] + has_zeros + [T]
hxs = hxs.transpose(0, 1)
outputs = []
for i in range(len(has_zeros) - 1):
# We can now process steps that don't have any zeros in masks together!
# This is much faster
start_idx = has_zeros[i]
end_idx = has_zeros[i + 1]
temp = (hxs * masks[start_idx].view(1, -1, 1).repeat(self._recurrent_N, 1, 1)).contiguous()
rnn_scores, hxs = self.rnn(x[start_idx:end_idx], temp)
outputs.append(rnn_scores)
# assert len(outputs) == T
# x is a (T, N, -1) tensor
x = torch.cat(outputs, dim=0)
# flatten
x = x.reshape(T * N, -1)
hxs = hxs.transpose(0, 1)
x = self.norm(x)
return x, hxs
| 2,816 | 34.2125 | 132 | py |
TiKick | TiKick-main/tmarl/drivers/shared_distributed/base_driver.py | import numpy as np
import torch
def _t2n(x):
return x.detach().cpu().numpy()
class Driver(object):
def __init__(self, config, client=None):
self.all_args = config['all_args']
self.envs = config['envs']
self.eval_envs = config['eval_envs']
self.device = config['device']
self.num_agents = config['num_agents']
if 'signal' in config:
self.actor_id = config['signal'].actor_id
self.weight_ids = config['signal'].weight_ids
else:
self.actor_id = 0
self.weight_ids = [0]
# parameters
self.env_name = self.all_args.env_name
self.algorithm_name = self.all_args.algorithm_name
self.experiment_name = self.all_args.experiment_name
self.use_centralized_V = self.all_args.use_centralized_V
self.use_obs_instead_of_state = self.all_args.use_obs_instead_of_state
self.num_env_steps = self.all_args.num_env_steps if hasattr(self.all_args,'num_env_steps') else self.all_args.eval_num
self.episode_length = self.all_args.episode_length
self.n_rollout_threads = self.all_args.n_rollout_threads
self.learner_n_rollout_threads = self.all_args.n_rollout_threads
self.n_eval_rollout_threads = self.all_args.n_eval_rollout_threads
self.hidden_size = self.all_args.hidden_size
self.recurrent_N = self.all_args.recurrent_N
# interval
self.save_interval = self.all_args.save_interval
self.use_eval = self.all_args.use_eval
self.eval_interval = self.all_args.eval_interval
self.log_interval = self.all_args.log_interval
# dir
self.model_dir = self.all_args.model_dir
if self.algorithm_name == "rmappo":
from tmarl.algorithms.r_mappo_distributed.mappo_algorithm import MAPPOAlgorithm as TrainAlgo
from tmarl.algorithms.r_mappo_distributed.mappo_module import MAPPOModule as AlgoModule
else:
raise NotImplementedError
if self.envs:
share_observation_space = self.envs.share_observation_space[0] \
if self.use_centralized_V else self.envs.observation_space[0]
# policy network
self.algo_module = AlgoModule(self.all_args,
self.envs.observation_space[0],
share_observation_space,
self.envs.action_space[0],
device=self.device)
else:
share_observation_space = self.eval_envs.share_observation_space[0] \
if self.use_centralized_V else self.eval_envs.observation_space[0]
# policy network
self.algo_module = AlgoModule(self.all_args,
self.eval_envs.observation_space[0],
share_observation_space,
self.eval_envs.action_space[0],
device=self.device)
if self.model_dir is not None:
self.restore()
# algorithm
self.trainer = TrainAlgo(self.all_args, self.algo_module, device=self.device)
# buffer
from tmarl.replay_buffers.normal.shared_buffer import SharedReplayBuffer
self.buffer = SharedReplayBuffer(self.all_args,
self.num_agents,
self.envs.observation_space[0] if self.envs else self.eval_envs.observation_space[0],
share_observation_space,
self.envs.action_space[0] if self.envs else self.eval_envs.action_space[0])
def run(self):
raise NotImplementedError
def warmup(self):
raise NotImplementedError
def collect(self, step):
raise NotImplementedError
def insert(self, data):
raise NotImplementedError
def restore(self):
policy_actor_state_dict = torch.load(str(self.model_dir) + '/actor.pt', map_location=self.device)
self.algo_module.actor.load_state_dict(policy_actor_state_dict)
| 4,244 | 39.04717 | 126 | py |
TiKick | TiKick-main/tmarl/algorithms/r_mappo_distributed/mappo_algorithm.py | import torch
from tmarl.utils.valuenorm import ValueNorm
# implement the loss of the MAPPO here
class MAPPOAlgorithm():
def __init__(self,
args,
init_module,
device=torch.device("cpu")):
self.device = device
self.tpdv = dict(dtype=torch.float32, device=device)
self.algo_module = init_module
self.clip_param = args.clip_param
self.ppo_epoch = args.ppo_epoch
self.num_mini_batch = args.num_mini_batch
self.data_chunk_length = args.data_chunk_length
self.policy_value_loss_coef = args.policy_value_loss_coef
self.value_loss_coef = args.value_loss_coef
self.entropy_coef = args.entropy_coef
self.max_grad_norm = args.max_grad_norm
self.huber_delta = args.huber_delta
self._use_recurrent_policy = args.use_recurrent_policy
self._use_naive_recurrent = args.use_naive_recurrent_policy
self._use_max_grad_norm = args.use_max_grad_norm
self._use_clipped_value_loss = args.use_clipped_value_loss
self._use_huber_loss = args.use_huber_loss
self._use_popart = args.use_popart
self._use_valuenorm = args.use_valuenorm
self._use_value_active_masks = args.use_value_active_masks
self._use_policy_active_masks = args.use_policy_active_masks
self._use_policy_vhead = args.use_policy_vhead
assert (self._use_popart and self._use_valuenorm) == False, ("self._use_popart and self._use_valuenorm can not be set True simultaneously")
if self._use_popart:
self.value_normalizer = self.algo_module.critic.v_out
if self._use_policy_vhead:
self.policy_value_normalizer = self.algo_module.actor.v_out
elif self._use_valuenorm:
self.value_normalizer = ValueNorm(1, device = self.device)
if self._use_policy_vhead:
self.policy_value_normalizer = ValueNorm(1, device = self.device)
else:
self.value_normalizer = None
if self._use_policy_vhead:
self.policy_value_normalizer = None
def prep_rollout(self):
self.algo_module.actor.eval()
| 2,234 | 38.210526 | 147 | py |
TiKick | TiKick-main/tmarl/algorithms/r_mappo_distributed/mappo_module.py | import torch
from tmarl.networks.policy_network import PolicyNetwork
class MAPPOModule:
def __init__(self, args, obs_space, share_obs_space, act_space, device=torch.device("cpu")):
self.device = device
self.lr = args.lr
self.critic_lr = args.critic_lr
self.opti_eps = args.opti_eps
self.weight_decay = args.weight_decay
self.obs_space = obs_space
self.share_obs_space = share_obs_space
self.act_space = act_space
self.actor = PolicyNetwork(args, self.obs_space, self.act_space, self.device)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.lr, eps=self.opti_eps, weight_decay=self.weight_decay)
def get_actions(self, share_obs, obs, rnn_states_actor, rnn_states_critic, masks, available_actions=None, deterministic=False):
actions, action_log_probs, rnn_states_actor = self.actor(obs, rnn_states_actor, masks, available_actions, deterministic)
return None, actions, action_log_probs, rnn_states_actor, None | 1,050 | 41.04 | 135 | py |
TiKick | TiKick-main/tmarl/replay_buffers/normal/shared_buffer.py | import torch
import numpy as np
from collections import defaultdict
from tmarl.utils.util import check,get_shape_from_obs_space, get_shape_from_act_space
def _flatten(T, N, x):
return x.reshape(T * N, *x.shape[2:])
def _cast(x):
return x.transpose(1, 2, 0, 3).reshape(-1, *x.shape[3:])
class SharedReplayBuffer(object):
def __init__(self, args, num_agents, obs_space, share_obs_space, act_space):
self.episode_length = args.episode_length
self.n_rollout_threads = args.n_rollout_threads
self.hidden_size = args.hidden_size
self.recurrent_N = args.recurrent_N
self.gamma = args.gamma
self.gae_lambda = args.gae_lambda
self._use_gae = args.use_gae
self._use_popart = args.use_popart
self._use_valuenorm = args.use_valuenorm
self._use_proper_time_limits = args.use_proper_time_limits
self._mixed_obs = False # for mixed observation
obs_shape = get_shape_from_obs_space(obs_space)
share_obs_shape = get_shape_from_obs_space(share_obs_space)
# for mixed observation
if 'Dict' in obs_shape.__class__.__name__:
self._mixed_obs = True
self.obs = {}
self.share_obs = {}
for key in obs_shape:
self.obs[key] = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *obs_shape[key].shape), dtype=np.float32)
for key in share_obs_shape:
self.share_obs[key] = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *share_obs_shape[key].shape), dtype=np.float32)
else:
# deal with special attn format
if type(obs_shape[-1]) == list:
obs_shape = obs_shape[:1]
if type(share_obs_shape[-1]) == list:
share_obs_shape = share_obs_shape[:1]
self.share_obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *share_obs_shape), dtype=np.float32)
self.obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *obs_shape), dtype=np.float32)
self.rnn_states = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32)
self.rnn_states_critic = np.zeros_like(self.rnn_states)
self.value_preds = np.zeros(
(self.episode_length + 1, self.n_rollout_threads, num_agents, 1), dtype=np.float32)
self.returns = np.zeros_like(self.value_preds)
if act_space.__class__.__name__ == 'Discrete':
self.available_actions = np.ones((self.episode_length + 1, self.n_rollout_threads, num_agents, act_space.n), dtype=np.float32)
else:
self.available_actions = None
act_shape = get_shape_from_act_space(act_space)
self.actions = np.zeros(
(self.episode_length, self.n_rollout_threads, num_agents, act_shape), dtype=np.float32)
self.action_log_probs = np.zeros(
(self.episode_length, self.n_rollout_threads, num_agents, act_shape), dtype=np.float32)
self.rewards = np.zeros(
(self.episode_length, self.n_rollout_threads, num_agents, 1), dtype=np.float32)
self.masks = np.ones((self.episode_length + 1, self.n_rollout_threads, num_agents, 1), dtype=np.float32)
self.bad_masks = np.ones_like(self.masks)
self.active_masks = np.ones_like(self.masks)
self.step = 0
def insert(self, share_obs, obs, rnn_states, rnn_states_critic, actions, action_log_probs,
value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None):
if self._mixed_obs:
for key in self.share_obs.keys():
self.share_obs[key][self.step + 1] = share_obs[key].copy()
for key in self.obs.keys():
self.obs[key][self.step + 1] = obs[key].copy()
else:
self.share_obs[self.step + 1] = share_obs.copy()
self.obs[self.step + 1] = obs.copy()
self.rnn_states[self.step + 1] = rnn_states.copy()
self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()
self.actions[self.step] = actions.copy()
self.action_log_probs[self.step] = action_log_probs.copy()
self.value_preds[self.step] = value_preds.copy()
self.rewards[self.step] = rewards.copy()
self.masks[self.step + 1] = masks.copy()
if bad_masks is not None:
self.bad_masks[self.step + 1] = bad_masks.copy()
if active_masks is not None:
self.active_masks[self.step + 1] = active_masks.copy()
if available_actions is not None:
self.available_actions[self.step + 1] = available_actions.copy()
self.step = (self.step + 1) % self.episode_length
def init_buffer(self,share_obs,obs):
self.share_obs[0] = share_obs
self.obs[0] = obs
def chooseinsert(self, share_obs, obs, rnn_states, rnn_states_critic, actions, action_log_probs,
value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None):
self.share_obs[self.step] = share_obs.copy()
self.obs[self.step] = obs.copy()
self.rnn_states[self.step + 1] = rnn_states.copy()
self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()
self.actions[self.step] = actions.copy()
self.action_log_probs[self.step] = action_log_probs.copy()
self.value_preds[self.step] = value_preds.copy()
self.rewards[self.step] = rewards.copy()
self.masks[self.step + 1] = masks.copy()
if bad_masks is not None:
self.bad_masks[self.step + 1] = bad_masks.copy()
if active_masks is not None:
self.active_masks[self.step] = active_masks.copy()
if available_actions is not None:
self.available_actions[self.step] = available_actions.copy()
self.step = (self.step + 1) % self.episode_length
def after_update(self):
if self._mixed_obs:
for key in self.share_obs.keys():
self.share_obs[key][0] = self.share_obs[key][-1].copy()
for key in self.obs.keys():
self.obs[key][0] = self.obs[key][-1].copy()
else:
self.share_obs[0] = self.share_obs[-1].copy()
self.obs[0] = self.obs[-1].copy()
self.rnn_states[0] = self.rnn_states[-1].copy()
self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()
self.masks[0] = self.masks[-1].copy()
self.bad_masks[0] = self.bad_masks[-1].copy()
self.active_masks[0] = self.active_masks[-1].copy()
if self.available_actions is not None:
self.available_actions[0] = self.available_actions[-1].copy()
def chooseafter_update(self):
self.rnn_states[0] = self.rnn_states[-1].copy()
self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()
self.masks[0] = self.masks[-1].copy()
self.bad_masks[0] = self.bad_masks[-1].copy()
def compute_returns(self, next_value, value_normalizer=None):
if self._use_proper_time_limits:
if self._use_gae:
self.value_preds[-1] = next_value
gae = 0
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
# step + 1
delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(self.value_preds[step + 1]) * self.masks[step + 1] \
- value_normalizer.denormalize(self.value_preds[step])
gae = delta + self.gamma * self.gae_lambda * gae * self.masks[step + 1]
gae = gae * self.bad_masks[step + 1]
self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step])
else:
delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - self.value_preds[step]
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
gae = gae * self.bad_masks[step + 1]
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]) * self.bad_masks[step + 1] \
+ (1 - self.bad_masks[step + 1]) * value_normalizer.denormalize(self.value_preds[step])
else:
self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]) * self.bad_masks[step + 1] \
+ (1 - self.bad_masks[step + 1]) * self.value_preds[step]
else:
if self._use_gae:
self.value_preds[-1] = next_value
gae = 0
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(self.value_preds[step + 1]) * self.masks[step + 1] \
- value_normalizer.denormalize(self.value_preds[step])
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step])
else:
delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - self.value_preds[step]
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(self.rewards.shape[0])):
self.returns[step] = self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]
def feed_forward_generator(self, advantages, num_mini_batch=None, mini_batch_size=None):
episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
batch_size = n_rollout_threads * episode_length * num_agents
if mini_batch_size is None:
assert batch_size >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"* number of steps ({}) * number of agents ({}) = {} "
"to be greater than or equal to the number of PPO mini batches ({})."
"".format(n_rollout_threads, episode_length, num_agents, n_rollout_threads * episode_length * num_agents,
num_mini_batch))
mini_batch_size = batch_size // num_mini_batch
rand = torch.randperm(batch_size).numpy()
sampler = [rand[i*mini_batch_size:(i+1)*mini_batch_size] for i in range(num_mini_batch)]
if self._mixed_obs:
share_obs = {}
obs = {}
for key in self.share_obs.keys():
share_obs[key] = self.share_obs[key][:-1].reshape(-1, *self.share_obs[key].shape[3:])
for key in self.obs.keys():
obs[key] = self.obs[key][:-1].reshape(-1, *self.obs[key].shape[3:])
else:
share_obs = self.share_obs[:-1].reshape(-1, *self.share_obs.shape[3:])
obs = self.obs[:-1].reshape(-1, *self.obs.shape[3:])
rnn_states = self.rnn_states[:-1].reshape(-1, *self.rnn_states.shape[3:])
rnn_states_critic = self.rnn_states_critic[:-1].reshape(-1, *self.rnn_states_critic.shape[3:])
actions = self.actions.reshape(-1, self.actions.shape[-1])
if self.available_actions is not None:
available_actions = self.available_actions[:-1].reshape(-1, self.available_actions.shape[-1])
value_preds = self.value_preds[:-1].reshape(-1, 1)
returns = self.returns[:-1].reshape(-1, 1)
masks = self.masks[:-1].reshape(-1, 1)
active_masks = self.active_masks[:-1].reshape(-1, 1)
action_log_probs = self.action_log_probs.reshape(-1, self.action_log_probs.shape[-1])
advantages = advantages.reshape(-1, 1)
for indices in sampler:
# obs size [T+1 N M Dim]-->[T N M Dim]-->[T*N*M,Dim]-->[index,Dim]
if self._mixed_obs:
share_obs_batch = {}
obs_batch = {}
for key in share_obs.keys():
share_obs_batch[key] = share_obs[key][indices]
for key in obs.keys():
obs_batch[key] = obs[key][indices]
else:
share_obs_batch = share_obs[indices]
obs_batch = obs[indices]
rnn_states_batch = rnn_states[indices]
rnn_states_critic_batch = rnn_states_critic[indices]
actions_batch = actions[indices]
if self.available_actions is not None:
available_actions_batch = available_actions[indices]
else:
available_actions_batch = None
value_preds_batch = value_preds[indices]
return_batch = returns[indices]
masks_batch = masks[indices]
active_masks_batch = active_masks[indices]
old_action_log_probs_batch = action_log_probs[indices]
if advantages is None:
adv_targ = None
else:
adv_targ = advantages[indices]
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch
def naive_recurrent_generator(self, advantages, num_mini_batch):
episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
batch_size = n_rollout_threads*num_agents
assert n_rollout_threads*num_agents >= num_mini_batch, (
"PPO requires the number of processes ({})* number of agents ({}) "
"to be greater than or equal to the number of "
"PPO mini batches ({}).".format(n_rollout_threads, num_agents, num_mini_batch))
num_envs_per_batch = batch_size // num_mini_batch
perm = torch.randperm(batch_size).numpy()
if self._mixed_obs:
share_obs = {}
obs = {}
for key in self.share_obs.keys():
share_obs[key] = self.share_obs[key].reshape(-1, batch_size, *self.share_obs[key].shape[3:])
for key in self.obs.keys():
obs[key] = self.obs[key].reshape(-1, batch_size, *self.obs[key].shape[3:])
else:
share_obs = self.share_obs.reshape(-1, batch_size, *self.share_obs.shape[3:])
obs = self.obs.reshape(-1, batch_size, *self.obs.shape[3:])
rnn_states = self.rnn_states.reshape(-1, batch_size, *self.rnn_states.shape[3:])
rnn_states_critic = self.rnn_states_critic.reshape(-1, batch_size, *self.rnn_states_critic.shape[3:])
actions = self.actions.reshape(-1, batch_size, self.actions.shape[-1])
if self.available_actions is not None:
available_actions = self.available_actions.reshape(-1, batch_size, self.available_actions.shape[-1])
value_preds = self.value_preds.reshape(-1, batch_size, 1)
returns = self.returns.reshape(-1, batch_size, 1)
masks = self.masks.reshape(-1, batch_size, 1)
active_masks = self.active_masks.reshape(-1, batch_size, 1)
action_log_probs = self.action_log_probs.reshape(-1, batch_size, self.action_log_probs.shape[-1])
advantages = advantages.reshape(-1, batch_size, 1)
for start_ind in range(0, batch_size, num_envs_per_batch):
if self._mixed_obs:
share_obs_batch = defaultdict(list)
obs_batch = defaultdict(list)
else:
share_obs_batch = []
obs_batch = []
rnn_states_batch = []
rnn_states_critic_batch = []
actions_batch = []
available_actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
active_masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for offset in range(num_envs_per_batch):
ind = perm[start_ind + offset]
if self._mixed_obs:
for key in share_obs.keys():
share_obs_batch[key].append(share_obs[key][:-1, ind])
for key in obs.keys():
obs_batch[key].append(obs[key][:-1, ind])
else:
share_obs_batch.append(share_obs[:-1, ind])
obs_batch.append(obs[:-1, ind])
rnn_states_batch.append(rnn_states[0:1, ind])
rnn_states_critic_batch.append(rnn_states_critic[0:1, ind])
actions_batch.append(actions[:, ind])
if self.available_actions is not None:
available_actions_batch.append(available_actions[:-1, ind])
value_preds_batch.append(value_preds[:-1, ind])
return_batch.append(returns[:-1, ind])
masks_batch.append(masks[:-1, ind])
active_masks_batch.append(active_masks[:-1, ind])
old_action_log_probs_batch.append(action_log_probs[:, ind])
adv_targ.append(advantages[:, ind])
# [N[T, dim]]
T, N = self.episode_length, num_envs_per_batch
# These are all from_numpys of size (T, N, -1)
if self._mixed_obs:
for key in share_obs_batch.keys():
share_obs_batch[key] = np.stack(share_obs_batch[key], 1)
for key in obs_batch.keys():
obs_batch[key] = np.stack(obs_batch[key], 1)
else:
share_obs_batch = np.stack(share_obs_batch, 1)
obs_batch = np.stack(obs_batch, 1)
actions_batch = np.stack(actions_batch, 1)
if self.available_actions is not None:
available_actions_batch = np.stack(available_actions_batch, 1)
value_preds_batch = np.stack(value_preds_batch, 1)
return_batch = np.stack(return_batch, 1)
masks_batch = np.stack(masks_batch, 1)
active_masks_batch = np.stack(active_masks_batch, 1)
old_action_log_probs_batch = np.stack(old_action_log_probs_batch, 1)
adv_targ = np.stack(adv_targ, 1)
# States is just a (N, dim) from_numpy [N[1,dim]]
rnn_states_batch = np.stack(rnn_states_batch).reshape(N, *self.rnn_states.shape[3:])
rnn_states_critic_batch = np.stack(rnn_states_critic_batch).reshape(N, *self.rnn_states_critic.shape[3:])
# Flatten the (T, N, ...) from_numpys to (T * N, ...)
if self._mixed_obs:
for key in share_obs_batch.keys():
share_obs_batch[key] = _flatten(T, N, share_obs_batch[key])
for key in obs_batch.keys():
obs_batch[key] = _flatten(T, N, obs_batch[key])
else:
share_obs_batch = _flatten(T, N, share_obs_batch)
obs_batch = _flatten(T, N, obs_batch)
actions_batch = _flatten(T, N, actions_batch)
if self.available_actions is not None:
available_actions_batch = _flatten(T, N, available_actions_batch)
else:
available_actions_batch = None
value_preds_batch = _flatten(T, N, value_preds_batch)
return_batch = _flatten(T, N, return_batch)
masks_batch = _flatten(T, N, masks_batch)
active_masks_batch = _flatten(T, N, active_masks_batch)
old_action_log_probs_batch = _flatten(T, N, old_action_log_probs_batch)
adv_targ = _flatten(T, N, adv_targ)
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch
def recurrent_generator(self, advantages, num_mini_batch, data_chunk_length):
episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
batch_size = n_rollout_threads * episode_length * num_agents
data_chunks = batch_size // data_chunk_length # [C=r*T*M/L]
mini_batch_size = data_chunks // num_mini_batch
assert n_rollout_threads * episode_length * num_agents >= data_chunk_length, (
"PPO requires the number of processes ({})* number of agents ({}) * episode length ({}) "
"to be greater than or equal to the number of "
"data chunk length ({}).".format(n_rollout_threads, num_agents, episode_length ,data_chunk_length))
rand = torch.randperm(data_chunks).numpy()
sampler = [rand[i*mini_batch_size:(i+1)*mini_batch_size] for i in range(num_mini_batch)]
if self._mixed_obs:
share_obs = {}
obs = {}
for key in self.share_obs.keys():
if len(self.share_obs[key].shape) == 6:
share_obs[key] = self.share_obs[key][:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.share_obs[key].shape[3:])
elif len(self.share_obs[key].shape) == 5:
share_obs[key] = self.share_obs[key][:-1].transpose(1, 2, 0, 3, 4).reshape(-1, *self.share_obs[key].shape[3:])
else:
share_obs[key] = _cast(self.share_obs[key][:-1])
for key in self.obs.keys():
if len(self.obs[key].shape) == 6:
obs[key] = self.obs[key][:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.obs[key].shape[3:])
elif len(self.obs[key].shape) == 5:
obs[key] = self.obs[key][:-1].transpose(1, 2, 0, 3, 4).reshape(-1, *self.obs[key].shape[3:])
else:
obs[key] = _cast(self.obs[key][:-1])
else:
if len(self.share_obs.shape) > 4:
share_obs = self.share_obs[:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.share_obs.shape[3:])
obs = self.obs[:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.obs.shape[3:])
else:
share_obs = _cast(self.share_obs[:-1])
obs = _cast(self.obs[:-1])
actions = _cast(self.actions)
action_log_probs = _cast(self.action_log_probs)
advantages = _cast(advantages)
value_preds = _cast(self.value_preds[:-1])
returns = _cast(self.returns[:-1])
masks = _cast(self.masks[:-1])
active_masks = _cast(self.active_masks[:-1])
# rnn_states = _cast(self.rnn_states[:-1])
# rnn_states_critic = _cast(self.rnn_states_critic[:-1])
rnn_states = self.rnn_states[:-1].transpose(1, 2, 0, 3, 4).reshape(-1, *self.rnn_states.shape[3:])
rnn_states_critic = self.rnn_states_critic[:-1].transpose(1, 2, 0, 3, 4).reshape(-1, *self.rnn_states_critic.shape[3:])
if self.available_actions is not None:
available_actions = _cast(self.available_actions[:-1])
for indices in sampler:
if self._mixed_obs:
share_obs_batch = defaultdict(list)
obs_batch = defaultdict(list)
else:
share_obs_batch = []
obs_batch = []
rnn_states_batch = []
rnn_states_critic_batch = []
actions_batch = []
available_actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
active_masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for index in indices:
ind = index * data_chunk_length
# size [T+1 N M Dim]-->[T N M Dim]-->[N,M,T,Dim]-->[N*M*T,Dim]-->[L,Dim]
if self._mixed_obs:
for key in share_obs.keys():
share_obs_batch[key].append(share_obs[key][ind:ind+data_chunk_length])
for key in obs.keys():
obs_batch[key].append(obs[key][ind:ind+data_chunk_length])
else:
share_obs_batch.append(share_obs[ind:ind+data_chunk_length])
obs_batch.append(obs[ind:ind+data_chunk_length])
actions_batch.append(actions[ind:ind+data_chunk_length])
if self.available_actions is not None:
available_actions_batch.append(available_actions[ind:ind+data_chunk_length])
value_preds_batch.append(value_preds[ind:ind+data_chunk_length])
return_batch.append(returns[ind:ind+data_chunk_length])
masks_batch.append(masks[ind:ind+data_chunk_length])
active_masks_batch.append(active_masks[ind:ind+data_chunk_length])
old_action_log_probs_batch.append(action_log_probs[ind:ind+data_chunk_length])
adv_targ.append(advantages[ind:ind+data_chunk_length])
# size [T+1 N M Dim]-->[T N M Dim]-->[N M T Dim]-->[N*M*T,Dim]-->[1,Dim]
rnn_states_batch.append(rnn_states[ind])
rnn_states_critic_batch.append(rnn_states_critic[ind])
L, N = data_chunk_length, mini_batch_size
# These are all from_numpys of size (L, N, Dim)
if self._mixed_obs:
for key in share_obs_batch.keys():
share_obs_batch[key] = np.stack(share_obs_batch[key], axis=1)
for key in obs_batch.keys():
obs_batch[key] = np.stack(obs_batch[key], axis=1)
else:
share_obs_batch = np.stack(share_obs_batch, axis=1)
obs_batch = np.stack(obs_batch, axis=1)
actions_batch = np.stack(actions_batch, axis=1)
if self.available_actions is not None:
available_actions_batch = np.stack(available_actions_batch, axis=1)
value_preds_batch = np.stack(value_preds_batch, axis=1)
return_batch = np.stack(return_batch, axis=1)
masks_batch = np.stack(masks_batch, axis=1)
active_masks_batch = np.stack(active_masks_batch, axis=1)
old_action_log_probs_batch = np.stack(old_action_log_probs_batch, axis=1)
adv_targ = np.stack(adv_targ, axis=1)
# States is just a (N, -1) from_numpy
rnn_states_batch = np.stack(rnn_states_batch).reshape(N, *self.rnn_states.shape[3:])
rnn_states_critic_batch = np.stack(rnn_states_critic_batch).reshape(N, *self.rnn_states_critic.shape[3:])
# Flatten the (L, N, ...) from_numpys to (L * N, ...)
if self._mixed_obs:
for key in share_obs_batch.keys():
share_obs_batch[key] = _flatten(L, N, share_obs_batch[key])
for key in obs_batch.keys():
obs_batch[key] = _flatten(L, N, obs_batch[key])
else:
share_obs_batch = _flatten(L, N, share_obs_batch)
obs_batch = _flatten(L, N, obs_batch)
actions_batch = _flatten(L, N, actions_batch)
if self.available_actions is not None:
available_actions_batch = _flatten(L, N, available_actions_batch)
else:
available_actions_batch = None
value_preds_batch = _flatten(L, N, value_preds_batch)
return_batch = _flatten(L, N, return_batch)
masks_batch = _flatten(L, N, masks_batch)
active_masks_batch = _flatten(L, N, active_masks_batch)
old_action_log_probs_batch = _flatten(L, N, old_action_log_probs_batch)
adv_targ = _flatten(L, N, adv_targ)
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch
| 28,769 | 52.081181 | 231 | py |
TiKick | TiKick-main/tmarl/configs/config.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
import argparse
def get_config():
parser = argparse.ArgumentParser(
description='TiKick', formatter_class=argparse.RawDescriptionHelpFormatter)
# prepare parameters
parser.add_argument("--algorithm_name", type=str,
default='rmappo', choices=["rmappo"])
parser.add_argument("--experiment_name", type=str, default="check",
help="an identifier to distinguish different experiment.")
parser.add_argument("--seed", type=int, default=1,
help="Random seed for numpy/torch")
parser.add_argument("--disable_cuda", action='store_true', default=False,
help="by default False, will use GPU to train; or else will use CPU;")
parser.add_argument("--cuda_deterministic",
action='store_false', default=True,
help="by default, make sure random seed effective. if set, bypass such function.")
parser.add_argument("--n_rollout_threads", type=int, default=2,
help="Number of parallel envs for training rollout")
parser.add_argument("--n_eval_rollout_threads", type=int, default=1,
help="Number of parallel envs for evaluating rollout")
parser.add_argument("--n_render_rollout_threads", type=int, default=1,
help="Number of parallel envs for rendering rollout")
parser.add_argument("--eval_num", type=int, default=1,
help='Number of environment steps to evaluate (default: 1)')
# env parameters
parser.add_argument("--env_name", type=str, default='StarCraft2',
help="specify the name of environment")
parser.add_argument("--use_obs_instead_of_state", action='store_true',
default=False, help="Whether to use global state or concatenated obs")
# replay buffer parameters
parser.add_argument("--episode_length", type=int,
default=200, help="Max length for any episode")
# network parameters
parser.add_argument("--separate_policy", action='store_true',
default=False, help='Whether agent seperate the policy')
parser.add_argument("--use_centralized_V", action='store_false',
default=True, help="Whether to use centralized V function")
parser.add_argument("--use_conv1d", action='store_true',
default=False, help="Whether to use conv1d")
parser.add_argument("--stacked_frames", type=int, default=1,
help="Dimension of hidden layers for actor/critic networks")
parser.add_argument("--use_stacked_frames", action='store_true',
default=False, help="Whether to use stacked_frames")
parser.add_argument("--hidden_size", type=int, default=256,
help="Dimension of hidden layers for actor/critic networks") # TODO @zoeyuchao. The same comment might in need of change.
parser.add_argument("--layer_N", type=int, default=3,
help="Number of layers for actor/critic networks")
parser.add_argument("--activation_id", type=int,
default=1, help="choose 0 to use tanh, 1 to use relu, 2 to use leaky relu, 3 to use elu")
parser.add_argument("--use_popart", action='store_true', default=False,
help="by default False, use PopArt to normalize rewards.")
parser.add_argument("--use_valuenorm", action='store_false', default=True,
help="by default True, use running mean and std to normalize rewards.")
parser.add_argument("--use_feature_normalization", action='store_false',
default=True, help="Whether to apply layernorm to the inputs")
parser.add_argument("--use_orthogonal", action='store_false', default=True,
help="Whether to use Orthogonal initialization for weights and 0 initialization for biases")
parser.add_argument("--gain", type=float, default=0.01,
help="The gain # of last action layer")
parser.add_argument("--cnn_layers_params", type=str, default=None,
help="The parameters of cnn layer")
parser.add_argument("--use_maxpool2d", action='store_true',
default=False, help="Whether to apply layernorm to the inputs")
# recurrent parameters
parser.add_argument("--use_naive_recurrent_policy", action='store_true',
default=False, help='Whether to use a naive recurrent policy')
parser.add_argument("--use_recurrent_policy", action='store_false',
default=True, help='use a recurrent policy')
parser.add_argument("--recurrent_N", type=int, default=1,
help="The number of recurrent layers.")
parser.add_argument("--data_chunk_length", type=int, default=25,
help="Time length of chunks used to train a recurrent_policy")
parser.add_argument("--use_influence_policy", action='store_true',
default=False, help='use a recurrent policy')
parser.add_argument("--influence_layer_N", type=int, default=1,
help="Number of layers for actor/critic networks")
# optimizer parameters
parser.add_argument("--lr", type=float, default=5e-4,
help='learning rate (default: 5e-4)')
parser.add_argument("--tau", type=float, default=0.995,
help='soft update polyak (default: 0.995)')
parser.add_argument("--critic_lr", type=float, default=5e-4,
help='critic learning rate (default: 5e-4)')
parser.add_argument("--opti_eps", type=float, default=1e-5,
help='RMSprop optimizer epsilon (default: 1e-5)')
parser.add_argument("--weight_decay", type=float, default=0)
# ppo parameters
parser.add_argument("--ppo_epoch", type=int, default=15,
help='number of ppo epochs (default: 15)')
parser.add_argument("--use_policy_vhead",
action='store_true', default=False,
help="by default, do not use policy vhead. if set, use policy vhead.")
parser.add_argument("--use_clipped_value_loss",
action='store_false', default=True,
help="by default, clip loss value. If set, do not clip loss value.")
parser.add_argument("--clip_param", type=float, default=0.2,
help='ppo clip parameter (default: 0.2)')
parser.add_argument("--num_mini_batch", type=int, default=1,
help='number of batches for ppo (default: 1)')
parser.add_argument("--policy_value_loss_coef", type=float,
default=1, help='policy value loss coefficient (default: 0.5)')
parser.add_argument("--entropy_coef", type=float, default=0.01,
help='entropy term coefficient (default: 0.01)')
parser.add_argument("--value_loss_coef", type=float,
default=1, help='value loss coefficient (default: 0.5)')
parser.add_argument("--use_max_grad_norm",
action='store_false', default=True,
help="by default, use max norm of gradients. If set, do not use.")
parser.add_argument("--max_grad_norm", type=float, default=10.0,
help='max norm of gradients (default: 0.5)')
parser.add_argument("--use_gae", action='store_false',
default=True, help='use generalized advantage estimation')
parser.add_argument("--gamma", type=float, default=0.99,
help='discount factor for rewards (default: 0.99)')
parser.add_argument("--gae_lambda", type=float, default=0.95,
help='gae lambda parameter (default: 0.95)')
parser.add_argument("--use_proper_time_limits", action='store_true',
default=False, help='compute returns taking into account time limits')
parser.add_argument("--use_huber_loss", action='store_false', default=True,
help="by default, use huber loss. If set, do not use huber loss.")
parser.add_argument("--use_value_active_masks",
action='store_false', default=True,
help="by default True, whether to mask useless data in value loss.")
parser.add_argument("--use_policy_active_masks",
action='store_false', default=True,
help="by default True, whether to mask useless data in policy loss.")
parser.add_argument("--huber_delta", type=float,
default=10.0, help=" coefficience of huber loss.")
# save parameters
parser.add_argument("--save_interval", type=int, default=1,
help="time duration between contiunous twice models saving.")
# log parameters
parser.add_argument("--log_interval", type=int, default=5,
help="time duration between contiunous twice log printing.")
# eval parameters
parser.add_argument("--use_eval", action='store_true', default=False,
help="by default, do not start evaluation. If set`, start evaluation alongside with training.")
parser.add_argument("--eval_interval", type=int, default=25,
help="time duration between contiunous twice evaluation progress.")
parser.add_argument("--eval_episodes", type=int, default=64,
help="number of episodes of a single evaluation.")
# pretrained parameters
parser.add_argument("--model_dir", type=str, default=None,
help="by default None. set the path to pretrained model.")
parser.add_argument("--replay_save_dir", type=str, default=None,
help="replay file save dir")
# replay buffer parameters
return parser
| 10,665 | 55.734043 | 146 | py |
TiKick | TiKick-main/tmarl/runners/base_evaluator.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
import random
import numpy as np
import torch
from tmarl.configs.config import get_config
from tmarl.runners.base_runner import Runner
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
class Evaluator(Runner):
def __init__(self, argv,program_type=None, client=None):
super().__init__(argv)
parser = get_config()
all_args = self.extra_args_func(argv, parser)
all_args.cuda = not all_args.disable_cuda
self.algorithm_name = all_args.algorithm_name
# cuda
if not all_args.disable_cuda and torch.cuda.is_available():
device = torch.device("cuda:0")
if all_args.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
else:
print("choose to use cpu...")
device = torch.device("cpu")
# run dir
run_dir = self.setup_run_dir(all_args)
# env init
Env_Class, SubprocVecEnv, DummyVecEnv = self.get_env()
eval_envs = self.env_init(
all_args, Env_Class, SubprocVecEnv, DummyVecEnv)
num_agents = all_args.num_agents
config = {
"all_args": all_args,
"envs": None,
"eval_envs": eval_envs,
"num_agents": num_agents,
"device": device,
"run_dir": run_dir,
}
self.all_args, self.envs, self.eval_envs, self.config \
= all_args, None, eval_envs, config
self.driver = self.init_driver()
def run(self):
# run experiments
self.driver.run()
self.stop()
def stop(self):
pass
def extra_args_func(self, argv, parser):
raise NotImplementedError
def get_env(self):
raise NotImplementedError
def init_driver(self):
raise NotImplementedError
def make_eval_env(self, all_args, Env_Class, SubprocVecEnv, DummyVecEnv):
def get_env_fn(rank):
def init_env():
env = Env_Class(all_args)
env.seed(all_args.seed * 50000 + rank * 10000)
return env
return init_env
if all_args.n_eval_rollout_threads == 1:
return DummyVecEnv([get_env_fn(0)])
else:
return SubprocVecEnv([get_env_fn(i) for i in range(all_args.n_eval_rollout_threads)])
def env_init(self, all_args, Env_Class, SubprocVecEnv, DummyVecEnv):
eval_envs = self.make_eval_env(
all_args, Env_Class, SubprocVecEnv, DummyVecEnv) if all_args.use_eval else None
return eval_envs
def setup_run_dir(self, all_args):
return None
| 3,402 | 28.08547 | 97 | py |
TiKick | TiKick-main/tmarl/runners/base_runner.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
import os
import random
import socket
import setproctitle
import numpy as np
from pathlib import Path
import torch
from tmarl.configs.config import get_config
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
class Runner:
def __init__(self, argv):
self.argv = argv
def run(self):
# main run
raise NotImplementedError | 1,079 | 22.478261 | 74 | py |
TiKick | TiKick-main/tmarl/utils/valuenorm.py |
import numpy as np
import torch
import torch.nn as nn
class ValueNorm(nn.Module):
""" Normalize a vector of observations - across the first norm_axes dimensions"""
def __init__(self, input_shape, norm_axes=1, beta=0.99999, per_element_update=False, epsilon=1e-5, device=torch.device("cpu")):
super(ValueNorm, self).__init__()
self.input_shape = input_shape
self.norm_axes = norm_axes
self.epsilon = epsilon
self.beta = beta
self.per_element_update = per_element_update
self.tpdv = dict(dtype=torch.float32, device=device)
self.running_mean = nn.Parameter(torch.zeros(input_shape), requires_grad=False).to(**self.tpdv)
self.running_mean_sq = nn.Parameter(torch.zeros(input_shape), requires_grad=False).to(**self.tpdv)
self.debiasing_term = nn.Parameter(torch.tensor(0.0), requires_grad=False).to(**self.tpdv)
self.reset_parameters()
def reset_parameters(self):
self.running_mean.zero_()
self.running_mean_sq.zero_()
self.debiasing_term.zero_()
def running_mean_var(self):
debiased_mean = self.running_mean / self.debiasing_term.clamp(min=self.epsilon)
debiased_mean_sq = self.running_mean_sq / self.debiasing_term.clamp(min=self.epsilon)
debiased_var = (debiased_mean_sq - debiased_mean ** 2).clamp(min=1e-2)
return debiased_mean, debiased_var
@torch.no_grad()
def update(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
batch_mean = input_vector.mean(dim=tuple(range(self.norm_axes)))
batch_sq_mean = (input_vector ** 2).mean(dim=tuple(range(self.norm_axes)))
if self.per_element_update:
batch_size = np.prod(input_vector.size()[:self.norm_axes])
weight = self.beta ** batch_size
else:
weight = self.beta
self.running_mean.mul_(weight).add_(batch_mean * (1.0 - weight))
self.running_mean_sq.mul_(weight).add_(batch_sq_mean * (1.0 - weight))
self.debiasing_term.mul_(weight).add_(1.0 * (1.0 - weight))
def normalize(self, input_vector):
# Make sure input is float32
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
mean, var = self.running_mean_var()
out = (input_vector - mean[(None,) * self.norm_axes]) / torch.sqrt(var)[(None,) * self.norm_axes]
return out
def denormalize(self, input_vector):
""" Transform normalized data back into original distribution """
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
mean, var = self.running_mean_var()
out = input_vector * torch.sqrt(var)[(None,) * self.norm_axes] + mean[(None,) * self.norm_axes]
out = out.cpu().numpy()
return out
| 3,110 | 37.8875 | 131 | py |
TiKick | TiKick-main/tmarl/utils/util.py |
import copy
import numpy as np
import math
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from torch.autograd import Variable
from gym.spaces import Box, Discrete, Tuple
def check(input):
if type(input) == np.ndarray:
return torch.from_numpy(input)
def get_gard_norm(it):
sum_grad = 0
for x in it:
if x.grad is None:
continue
sum_grad += x.grad.norm() ** 2
return math.sqrt(sum_grad)
def update_linear_schedule(optimizer, epoch, total_num_epochs, initial_lr):
"""Decreases the learning rate linearly"""
lr = initial_lr - (initial_lr * (epoch / float(total_num_epochs)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def huber_loss(e, d):
a = (abs(e) <= d).float()
b = (e > d).float()
return a*e**2/2 + b*d*(abs(e)-d/2)
def mse_loss(e):
return e**2/2
def get_shape_from_obs_space(obs_space):
if obs_space.__class__.__name__ == 'Box':
obs_shape = obs_space.shape
elif obs_space.__class__.__name__ == 'list':
obs_shape = obs_space
elif obs_space.__class__.__name__ == 'Dict':
obs_shape = obs_space.spaces
else:
raise NotImplementedError
return obs_shape
def get_shape_from_act_space(act_space):
if act_space.__class__.__name__ == 'Discrete':
act_shape = 1
elif act_space.__class__.__name__ == "MultiDiscrete":
act_shape = act_space.shape
elif act_space.__class__.__name__ == "Box":
act_shape = act_space.shape[0]
elif act_space.__class__.__name__ == "MultiBinary":
act_shape = act_space.shape[0]
else: # agar
act_shape = act_space[0].shape[0] + 1
return act_shape
def tile_images(img_nhwc):
"""
Tile N images into one big PxQ image
(P,Q) are chosen to be as close as possible, and if N
is square, then P=Q.
input: img_nhwc, list or array of images, ndim=4 once turned into array
n = batch index, h = height, w = width, c = channel
returns:
bigim_HWc, ndarray with ndim=3
"""
img_nhwc = np.asarray(img_nhwc)
N, h, w, c = img_nhwc.shape
H = int(np.ceil(np.sqrt(N)))
W = int(np.ceil(float(N)/H))
img_nhwc = np.array(
list(img_nhwc) + [img_nhwc[0]*0 for _ in range(N, H*W)])
img_HWhwc = img_nhwc.reshape(H, W, h, w, c)
img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4)
img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c)
return img_Hh_Ww_c
def to_torch(input):
return torch.from_numpy(input) if type(input) == np.ndarray else input
def to_numpy(x):
return x.detach().cpu().numpy()
class FixedCategorical(torch.distributions.Categorical):
def sample(self):
return super().sample()
def log_probs(self, actions):
return (
super()
.log_prob(actions.squeeze(-1))
.view(actions.size(0), -1)
.sum(-1)
.unsqueeze(-1)
)
def mode(self):
return self.probs.argmax(dim=-1, keepdim=True)
class MultiDiscrete(gym.Space):
"""
- The multi-discrete action space consists of a series of discrete action spaces with different parameters
- It can be adapted to both a Discrete action space or a continuous (Box) action space
- It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space
- It is parametrized by passing an array of arrays containing [min, max] for each discrete action space
where the discrete action space can take any integers from `min` to `max` (both inclusive)
Note: A value of 0 always need to represent the NOOP action.
e.g. Nintendo Game Controller
- Can be conceptualized as 3 discrete action spaces:
1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4
2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
- Can be initialized as
MultiDiscrete([ [0,4], [0,1], [0,1] ])
"""
def __init__(self, array_of_param_array):
self.low = np.array([x[0] for x in array_of_param_array])
self.high = np.array([x[1] for x in array_of_param_array])
self.num_discrete_space = self.low.shape[0]
self.n = np.sum(self.high) + 2
def sample(self):
""" Returns a array with one sample from each discrete action space """
# For each row: round(random .* (max - min) + min, 0)
random_array = np.random.rand(self.num_discrete_space)
return [int(x) for x in np.floor(np.multiply((self.high - self.low + 1.), random_array) + self.low)]
def contains(self, x):
return len(x) == self.num_discrete_space and (np.array(x) >= self.low).all() and (np.array(x) <= self.high).all()
@property
def shape(self):
return self.num_discrete_space
def __repr__(self):
return "MultiDiscrete" + str(self.num_discrete_space)
def __eq__(self, other):
return np.array_equal(self.low, other.low) and np.array_equal(self.high, other.high)
class DecayThenFlatSchedule():
def __init__(self,
start,
finish,
time_length,
decay="exp"):
self.start = start
self.finish = finish
self.time_length = time_length
self.delta = (self.start - self.finish) / self.time_length
self.decay = decay
if self.decay in ["exp"]:
self.exp_scaling = (-1) * self.time_length / \
np.log(self.finish) if self.finish > 0 else 1
def eval(self, T):
if self.decay in ["linear"]:
return max(self.finish, self.start - self.delta * T)
elif self.decay in ["exp"]:
return min(self.start, max(self.finish, np.exp(- T / self.exp_scaling)))
pass
def huber_loss(e, d):
a = (abs(e) <= d).float()
b = (e > d).float()
return a*e**2/2 + b*d*(abs(e)-d/2)
def mse_loss(e):
return e**2
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
# https://github.com/ikostrikov/pytorch-ddpg-naf/blob/master/ddpg.py#L11
def soft_update(target, source, tau):
"""
Perform DDPG soft update (move target params toward source based on weight
factor tau)
Inputs:
target (torch.nn.Module): Net to copy parameters to
source (torch.nn.Module): Net whose parameters to copy
tau (float, 0 < x < 1): Weight factor for update
"""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - tau) + param.data * tau)
# https://github.com/ikostrikov/pytorch-ddpg-naf/blob/master/ddpg.py#L15
def hard_update(target, source):
"""
Copy network parameters from source to target
Inputs:
target (torch.nn.Module): Net to copy parameters to
source (torch.nn.Module): Net whose parameters to copy
"""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
# https://github.com/seba-1511/dist_tuto.pth/blob/gh-pages/train_dist.py
def average_gradients(model):
""" Gradient averaging. """
size = float(dist.get_world_size())
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM, group=0)
param.grad.data /= size
def onehot_from_logits(logits, avail_logits=None, eps=0.0):
"""
Given batch of logits, return one-hot sample using epsilon greedy strategy
(based on given epsilon)
"""
# get best (according to current policy) actions in one-hot form
logits = to_torch(logits)
dim = len(logits.shape) - 1
if avail_logits is not None:
avail_logits = to_torch(avail_logits)
logits[avail_logits == 0] = -1e10
argmax_acs = (logits == logits.max(dim, keepdim=True)[0]).float()
if eps == 0.0:
return argmax_acs
# get random actions in one-hot form
rand_acs = Variable(torch.eye(logits.shape[1])[[np.random.choice(
range(logits.shape[1]), size=logits.shape[0])]], requires_grad=False)
# chooses between best and random actions using epsilon greedy
return torch.stack([argmax_acs[i] if r > eps else rand_acs[i] for i, r in
enumerate(torch.rand(logits.shape[0]))])
# modified for PyTorch from https://github.com/ericjang/gumbel-softmax/blob/master/Categorical%20VAE.ipynb
def sample_gumbel(shape, eps=1e-20, tens_type=torch.FloatTensor):
"""Sample from Gumbel(0, 1)"""
U = Variable(tens_type(*shape).uniform_(), requires_grad=False)
return -torch.log(-torch.log(U + eps) + eps)
# modified for PyTorch from https://github.com/ericjang/gumbel-softmax/blob/master/Categorical%20VAE.ipynb
def gumbel_softmax_sample(logits, avail_logits, temperature, device=torch.device('cpu')):
""" Draw a sample from the Gumbel-Softmax distribution"""
if str(device) == 'cpu':
y = logits + sample_gumbel(logits.shape, tens_type=type(logits.data))
else:
y = (logits.cpu() + sample_gumbel(logits.shape,
tens_type=type(logits.data))).cuda()
dim = len(logits.shape) - 1
if avail_logits is not None:
avail_logits = to_torch(avail_logits).to(device)
y[avail_logits == 0] = -1e10
return F.softmax(y / temperature, dim=dim)
# modified for PyTorch from https://github.com/ericjang/gumbel-softmax/blob/master/Categorical%20VAE.ipynb
def gumbel_softmax(logits, avail_logits=None, temperature=1.0, hard=False, device=torch.device('cpu')):
"""Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
temperature: non-negative scalar
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probabilitiy distribution that sums to 1 across classes
"""
y = gumbel_softmax_sample(logits, avail_logits, temperature, device)
if hard:
y_hard = onehot_from_logits(y)
y = (y_hard - y).detach() + y
return y
def gaussian_noise(shape, std):
return torch.empty(shape).normal_(mean=0, std=std)
def get_obs_shape(obs_space):
if obs_space.__class__.__name__ == "Box":
obs_shape = obs_space.shape
elif obs_space.__class__.__name__ == "list":
obs_shape = obs_space
else:
raise NotImplementedError
return obs_shape
def get_dim_from_space(space):
if isinstance(space, Box):
dim = space.shape[0]
elif isinstance(space, Discrete):
dim = space.n
elif isinstance(space, Tuple):
dim = sum([get_dim_from_space(sp) for sp in space])
elif "MultiDiscrete" in space.__class__.__name__:
return (space.high - space.low) + 1
elif isinstance(space, list):
dim = space[0]
else:
raise Exception("Unrecognized space: ", type(space))
return dim
def get_state_dim(observation_dict, action_dict):
combined_obs_dim = sum([get_dim_from_space(space)
for space in observation_dict.values()])
combined_act_dim = 0
for space in action_dict.values():
dim = get_dim_from_space(space)
if isinstance(dim, np.ndarray):
combined_act_dim += int(sum(dim))
else:
combined_act_dim += dim
return combined_obs_dim, combined_act_dim, combined_obs_dim+combined_act_dim
def get_cent_act_dim(action_space):
cent_act_dim = 0
for space in action_space:
dim = get_dim_from_space(space)
if isinstance(dim, np.ndarray):
cent_act_dim += int(sum(dim))
else:
cent_act_dim += dim
return cent_act_dim
def is_discrete(space):
if isinstance(space, Discrete) or "MultiDiscrete" in space.__class__.__name__:
return True
else:
return False
def is_multidiscrete(space):
if "MultiDiscrete" in space.__class__.__name__:
return True
else:
return False
def make_onehot(int_action, action_dim, seq_len=None):
if type(int_action) == torch.Tensor:
int_action = int_action.cpu().numpy()
if not seq_len:
return np.eye(action_dim)[int_action]
if seq_len:
onehot_actions = []
for i in range(seq_len):
onehot_action = np.eye(action_dim)[int_action[i]]
onehot_actions.append(onehot_action)
return np.stack(onehot_actions)
def avail_choose(x, avail_x=None):
x = to_torch(x)
if avail_x is not None:
avail_x = to_torch(avail_x)
x[avail_x == 0] = -1e10
return x # FixedCategorical(logits=x)
def tile_images(img_nhwc):
"""
Tile N images into one big PxQ image
(P,Q) are chosen to be as close as possible, and if N
is square, then P=Q.
input: img_nhwc, list or array of images, ndim=4 once turned into array
n = batch index, h = height, w = width, c = channel
returns:
bigim_HWc, ndarray with ndim=3
"""
img_nhwc = np.asarray(img_nhwc)
N, h, w, c = img_nhwc.shape
H = int(np.ceil(np.sqrt(N)))
W = int(np.ceil(float(N)/H))
img_nhwc = np.array(
list(img_nhwc) + [img_nhwc[0]*0 for _ in range(N, H*W)])
img_HWhwc = img_nhwc.reshape(H, W, h, w, c)
img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4)
img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c)
return img_Hh_Ww_c
| 13,893 | 31.846336 | 122 | py |
TiKick | TiKick-main/tmarl/utils/gpu_mem_track.py | # code from https://github.com/Oldpan/Pytorch-Memory-Utils
import gc
import datetime
import inspect
import torch
import numpy as np
dtype_memory_size_dict = {
torch.float64: 64/8,
torch.double: 64/8,
torch.float32: 32/8,
torch.float: 32/8,
torch.float16: 16/8,
torch.half: 16/8,
torch.int64: 64/8,
torch.long: 64/8,
torch.int32: 32/8,
torch.int: 32/8,
torch.int16: 16/8,
torch.short: 16/6,
torch.uint8: 8/8,
torch.int8: 8/8,
}
# compatibility of torch1.0
if getattr(torch, "bfloat16", None) is not None:
dtype_memory_size_dict[torch.bfloat16] = 16/8
if getattr(torch, "bool", None) is not None:
dtype_memory_size_dict[torch.bool] = 8/8 # pytorch use 1 byte for a bool, see https://github.com/pytorch/pytorch/issues/41571
def get_mem_space(x):
try:
ret = dtype_memory_size_dict[x]
except KeyError:
print(f"dtype {x} is not supported!")
return ret
class MemTracker(object):
"""
Class used to track pytorch memory usage
Arguments:
detail(bool, default True): whether the function shows the detail gpu memory usage
path(str): where to save log file
verbose(bool, default False): whether show the trivial exception
device(int): GPU number, default is 0
"""
def __init__(self, detail=True, path='', verbose=False, device=0):
self.print_detail = detail
self.last_tensor_sizes = set()
self.gpu_profile_fn = path + f'{datetime.datetime.now():%d-%b-%y-%H:%M:%S}-gpu_mem_track.txt'
self.verbose = verbose
self.begin = True
self.device = device
def get_tensors(self):
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
tensor = obj
else:
continue
if tensor.is_cuda:
yield tensor
except Exception as e:
if self.verbose:
print('A trivial exception occured: {}'.format(e))
def get_tensor_usage(self):
sizes = [np.prod(np.array(tensor.size())) * get_mem_space(tensor.dtype) for tensor in self.get_tensors()]
return np.sum(sizes) / 1024**2
def get_allocate_usage(self):
return torch.cuda.memory_allocated() / 1024**2
def clear_cache(self):
gc.collect()
torch.cuda.empty_cache()
def print_all_gpu_tensor(self, file=None):
for x in self.get_tensors():
print(x.size(), x.dtype, np.prod(np.array(x.size()))*get_mem_space(x.dtype)/1024**2, file=file)
def track(self):
"""
Track the GPU memory usage
"""
frameinfo = inspect.stack()[1]
where_str = frameinfo.filename + ' line ' + str(frameinfo.lineno) + ': ' + frameinfo.function
with open(self.gpu_profile_fn, 'a+') as f:
if self.begin:
f.write(f"GPU Memory Track | {datetime.datetime.now():%d-%b-%y-%H:%M:%S} |"
f" Total Tensor Used Memory:{self.get_tensor_usage():<7.1f}Mb"
f" Total Allocated Memory:{self.get_allocate_usage():<7.1f}Mb\n\n")
self.begin = False
if self.print_detail is True:
ts_list = [(tensor.size(), tensor.dtype) for tensor in self.get_tensors()]
new_tensor_sizes = {(type(x),
tuple(x.size()),
ts_list.count((x.size(), x.dtype)),
np.prod(np.array(x.size()))*get_mem_space(x.dtype)/1024**2,
x.dtype) for x in self.get_tensors()}
for t, s, n, m, data_type in new_tensor_sizes - self.last_tensor_sizes:
f.write(f'+ | {str(n)} * Size:{str(s):<20} | Memory: {str(m*n)[:6]} M | {str(t):<20} | {data_type}\n')
for t, s, n, m, data_type in self.last_tensor_sizes - new_tensor_sizes:
f.write(f'- | {str(n)} * Size:{str(s):<20} | Memory: {str(m*n)[:6]} M | {str(t):<20} | {data_type}\n')
self.last_tensor_sizes = new_tensor_sizes
f.write(f"\nAt {where_str:<50}"
f" Total Tensor Used Memory:{self.get_tensor_usage():<7.1f}Mb"
f" Total Allocated Memory:{self.get_allocate_usage():<7.1f}Mb\n\n")
| 4,432 | 36.888889 | 129 | py |
TiKick | TiKick-main/tmarl/utils/modelsize_estimate.py | # code from https://github.com/Oldpan/Pytorch-Memory-Utils
import torch.nn as nn
import numpy as np
def modelsize(model, input, type_size=4):
para = sum([np.prod(list(p.size())) for p in model.parameters()])
# print('Model {} : Number of params: {}'.format(model._get_name(), para))
print('Model {} : params: {:4f}M'.format(model._get_name(), para * type_size / 1000 / 1000))
input_ = input.clone()
input_.requires_grad_(requires_grad=False)
mods = list(model.modules())
out_sizes = []
for i in range(1, len(mods)):
m = mods[i]
if isinstance(m, nn.ReLU):
if m.inplace:
continue
out = m(input_)
out_sizes.append(np.array(out.size()))
input_ = out
total_nums = 0
for i in range(len(out_sizes)):
s = out_sizes[i]
nums = np.prod(np.array(s))
total_nums += nums
# print('Model {} : Number of intermedite variables without backward: {}'.format(model._get_name(), total_nums))
# print('Model {} : Number of intermedite variables with backward: {}'.format(model._get_name(), total_nums*2))
print('Model {} : intermedite variables: {:3f} M (without backward)'
.format(model._get_name(), total_nums * type_size / 1000 / 1000))
print('Model {} : intermedite variables: {:3f} M (with backward)'
.format(model._get_name(), total_nums * type_size*2 / 1000 / 1000))
| 1,428 | 34.725 | 116 | py |
RobDanns | RobDanns-main/deep_learning/tools/corruptions-inference-tinyimagenet.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Train a classification model."""
from __future__ import print_function
import argparse
import numpy as np
import os
import sys
import torch
import multiprocessing as mp
import math
import pdb
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pycls.config import assert_cfg
from pycls.config import cfg
from pycls.config import dump_cfg
from pycls.datasets import loader
from pycls.models import model_builder
from pycls.utils.meters import TestMeter
from pycls.utils.meters import TrainMeter
from PIL import Image
import pycls.models.losses as losses
import pycls.models.optimizer as optim
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
import pycls.utils.net as nu
import pycls.datasets.paths as dp
import time
from datetime import datetime
from tensorboardX import SummaryWriter
from torchvision.utils import save_image
from skimage.util import random_noise
print("Let's use GPU :", torch.cuda.current_device())
logger = lu.get_logger(__name__)
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser(
description='Train a classification model'
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file',
required=True,
type=str
)
parser.add_argument(
'opts',
help='See pycls/core/config.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
# TEST(VAL) DATA_LOADER FOR TINY_IMAGENET200
def parseClasses(file):
classes = []
filenames = []
with open(file) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
for x in range(0, len(lines)):
tokens = lines[x].split()
classes.append(tokens[1])
filenames.append(tokens[0])
return filenames, classes
def load_allimages(dir):
images = []
if not os.path.isdir(dir):
sys.exit(-1)
for root, _, fnames in sorted(os.walk(dir)):
for fname in sorted(fnames):
#if datasets.folder.is_image_file(fname):
if datasets.folder.has_file_allowed_extension(fname,['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']):
path = os.path.join(root, fname)
item = path
images.append(item)
return images
class TinyImageNet(torch.utils.data.Dataset):
""" TinyImageNet200 validation dataloader."""
def __init__(self, img_path, gt_path, class_to_idx=None, transform=None):
self.img_path = img_path
self.transform = transform
self.gt_path = gt_path
self.class_to_idx = class_to_idx
self.classidx = []
self.imgs, self.classnames = parseClasses(gt_path)
for classname in self.classnames:
self.classidx.append(self.class_to_idx[classname])
def __getitem__(self, index):
"""inputs: Index, retrns: tuple(im, label)"""
img = None
with open(os.path.join(self.img_path, self.imgs[index]), 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
if self.transform is not None:
img = self.transform(img)
label = self.classidx[index]
return img, label
def __len__(self):
return len(self.imgs)
def is_eval_epoch(cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (
(cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or
(cur_epoch + 1) == cfg.OPTIM.MAX_EPOCH
)
def log_model_info(model, writer_eval=None):
"""Logs model info"""
logger.info('Model:\n{}'.format(model))
params = mu.params_count(model)
flops = mu.flops_count(model)
logger.info('Params: {:,}'.format(params))
logger.info('Flops: {:,}'.format(flops))
logger.info('Number of node: {:,}'.format(cfg.RGRAPH.GROUP_NUM))
# logger.info('{}, {}'.format(params,flops))
if writer_eval is not None:
writer_eval.add_scalar('Params', params, 1)
writer_eval.add_scalar('Flops', flops, 1)
return params, flops
@torch.no_grad()
def eval_epoch(test_loader, model, test_meter, cur_epoch, writer_eval=None, params=0, flops=0, is_master=False):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.scaled_all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point)
top1_err, top5_err = top1_err.item(), top5_err.item()
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err, top5_err, inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
test_meter.log_epoch_stats(cur_epoch, writer_eval, params, flops, model, is_master=is_master)
eval_stats = test_meter.get_epoch_stats(cur_epoch)
test_meter.reset()
if cfg.RGRAPH.SAVE_GRAPH:
adj_dict = nu.model2adj(model)
adj_dict = {**adj_dict, 'top1_err': eval_stats['top1_err']}
os.makedirs('{}/graphs/{}'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN), exist_ok=True)
np.savez('{}/graphs/{}/{}.npz'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN, cur_epoch), **adj_dict)
# return eval_stats
def save_noisy_image(img, name):
if img.size(2) == 32:
img = img.view(img.size(0), 3, 32, 32)
save_image(img, name)
if img.size(2) == 64:
img = img.view(img.size(0), 3, 64, 64)
save_image(img, name)
else:
img = img.view(img.size(0), 3, 224, 224)
save_image(img, name)
## Functions to save noisy images.
# def gaussian_noise(test_loader):
# print("Adding gaussian_noise")
# for data in test_loader:
# img, _ = data[0], data[1]
# gaussian_img_05 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.05, clip=True))
# gaussian_img_2 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.2, clip=True))
# gaussian_img_4 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.4, clip=True))
# gaussian_img_6 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.6, clip=True))
# save_noisy_image(gaussian_img_05, r"noisy-images/gaussian_05.png")
# save_noisy_image(gaussian_img_2, r"noisy-images/gaussian_2.png")
# save_noisy_image(gaussian_img_4, r"noisy-images/gaussian_4.png")
# save_noisy_image(gaussian_img_6, r"noisy-images/gaussian_6.png")
# break
# def salt_pepper_noise(test_loader):
# print("Adding salt_pepper_noise")
# for data in test_loader:
# img, _ = data[0], data[1]
# s_vs_p_5 = torch.tensor(random_noise(img, mode='s&p', salt_vs_pepper=0.5, clip=True))
# s_vs_p_6 = torch.tensor(random_noise(img, mode='s&p', salt_vs_pepper=0.6, clip=True))
# s_vs_p_7 = torch.tensor(random_noise(img, mode='s&p', salt_vs_pepper=0.7, clip=True))
# save_noisy_image(s_vs_p_5, r"noisy-images/s&p_5.png")
# break
# def speckle_noise(test_loader):
# print("Adding speckle_noise")
# for data in test_loader:
# img, _ = data[0], data[1]
# speckle_img_05 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.05, clip=True))
# speckle_img_2 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.2, clip=True))
# speckle_img_4 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.4, clip=True))
# speckle_img_6 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.6, clip=True))
# save_noisy_image(speckle_img_05, r"noisy-images/speckle_05.png")
# save_noisy_image(speckle_img_2, r"noisy-images/speckle_2.png")
# save_noisy_image(speckle_img_4, r"noisy-images/speckle_4.png")
# save_noisy_image(speckle_img_6, r"noisy-images/speckle_6.png")
# break
def train_model(writer_train=None, writer_eval=None, is_master=False):
"""Trains the model."""
# Fit flops/params
if cfg.TRAIN.AUTO_MATCH and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
mode = 'flops' # flops or params
if cfg.TRAIN.DATASET == 'cifar10':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 64:
stats_baseline = 48957952
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'cifar100':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'tinyimagenet200':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 18: # ResNet18
stats_baseline = 1820000000
elif 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 34: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
cfg.defrost()
stats = model_builder.build_model_stats(mode)
if stats != stats_baseline:
# 1st round: set first stage dim
for i in range(pre_repeat):
scale = round(math.sqrt(stats_baseline / stats), 2)
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first = int(round(first * scale))
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
step = 1
while True:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first += flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if stats == stats_baseline:
break
if flag != flag_init:
if cfg.RGRAPH.UPPER == False: # make sure the stats is SMALLER than baseline
if flag < 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
else:
if flag > 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
# 2nd round: set other stage dim
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [int(round(dim / first)) for dim in cfg.RGRAPH.DIM_LIST]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
if 'share' not in cfg.RESNET.TRANS_FUN:
for i in range(1, len(cfg.RGRAPH.DIM_LIST)):
for j in range(ratio_list[i]):
cfg.RGRAPH.DIM_LIST[i] += flag_init
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if flag_init != flag:
cfg.RGRAPH.DIM_LIST[i] -= flag_init
break
stats = model_builder.build_model_stats(mode)
print('FINAL', cfg.RGRAPH.GROUP_NUM, cfg.RGRAPH.DIM_LIST, stats, stats_baseline, stats < stats_baseline)
# Build the model (before the loaders to ease debugging)
model = model_builder.build_model()
params, flops = log_model_info(model, writer_eval)
# Define the loss function
loss_fun = losses.get_loss_fun()
# Construct the optimizer
optimizer = optim.construct_optimizer(model)
# Load a checkpoint if applicable
start_epoch = 0
if cu.had_checkpoint():
print("Checking for a checkpoint")
last_checkpoint = cu.get_checkpoint_last()
print("Last Checkpoint : ", last_checkpoint)
checkpoint_epoch = cu.load_checkpoint(last_checkpoint, model, optimizer)
logger.info('Loaded checkpoint from: {}'.format(last_checkpoint))
if checkpoint_epoch == cfg.OPTIM.MAX_EPOCH:
exit()
start_epoch = checkpoint_epoch
else:
start_epoch = checkpoint_epoch + 1
print("Epoch = ", start_epoch)
# Create data loaders
data_path = dp.get_data_path(cfg.TRAIN.DATASET) # Retrieve the data path for the dataset
traindir = os.path.join(data_path, cfg.TRAIN.SPLIT)
valdir = os.path.join(data_path, cfg.TEST.SPLIT, 'images')
valgtfile = os.path.join(data_path, cfg.TEST.SPLIT, 'val_annotations.txt')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# create training dataset and loader
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Resize(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=int(cfg.TRAIN.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=True,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=True)
# create validation dataset
test_dataset = TinyImageNet(
valdir,
valgtfile,
class_to_idx=train_loader.dataset.class_to_idx.copy(),
transform=transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
normalize]))
# create validation loader
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=int(cfg.TEST.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=False,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False)
# Create meters
test_meter = TestMeter(len(test_loader))
if cfg.ONLINE_FLOPS:
model_dummy = model_builder.build_model()
IMAGE_SIZE = 224
n_flops, n_params = mu.measure_model(model_dummy, IMAGE_SIZE, IMAGE_SIZE)
logger.info('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
del (model_dummy)
# Perform the training loop
logger.info('Start epoch: {}'.format(start_epoch + 1))
if start_epoch == cfg.OPTIM.MAX_EPOCH:
cur_epoch = start_epoch - 1
eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
noise_mode = ['gaussian', 'speckle', 's&p']
noise_std = [0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6] # change the variance values as desired.
model.eval()
accuracies_gaussian = []
accuracies_saltpepper = []
accuracies_speckle = []
for mode in noise_mode:
for level in noise_std:
print("Adding noise={} at level={} to images".format(mode, level))
ctr = 0
correct = 0
total = 0
for cur_iter, (inputs, labels) in enumerate(test_loader):
if not 's&p' in mode:
noisy_img = torch.tensor(random_noise(inputs, mode=mode, mean=0, var=level, clip=True))
else:
noisy_img = torch.tensor(random_noise(inputs, mode=mode, salt_vs_pepper=0.5, clip=True))
noisy_img, labels = noisy_img.cuda(), labels.cuda(non_blocking=True)
outputs = model(noisy_img.float())
_, predicted = torch.max(outputs.data, 1)
ctr += 1
total += labels.size(0)
correct += (predicted == labels).sum()
if total > X: # replace X with the number of images to be generated for adversarial attacks.
break
acc = 100 * float(correct) / total
print("acc =", round(acc, 2), "correct =", float(correct), "total =", total)
if 'gaussian' in mode:
print('Robust Accuracy = {:.3f} with level = {:.2f}'.format(acc, level))
accuracies_gaussian.append(round(acc, 2))
print("Guassian Accuracies after append :", accuracies_gaussian)
elif 'speckle' in mode:
print('Robust Accuracy = {:.3f} with level = {:.2f}'.format(acc, level))
accuracies_speckle.append(round(acc, 2))
print("Speckle Accuracies after append :", accuracies_speckle)
elif 's&p' in mode:
print('Robust Accuracy = {:.3f} for S&P noise'.format(acc))
accuracies_saltpepper.append(round(acc, 2))
print("Salt&Pepper Accuracies after append :", accuracies_saltpepper)
break
else:
print("noise mode not supported")
# gaussian_noise(test_loader)
# salt_pepper_noise(test_loader)
# speckle_noise(test_loader)
# Change the number of variable as desired number of outputs.
gaus_001, gaus_01, gaus_05, gaus_1, gaus_2, gaus_3, gaus_4, gaus_5, gaus_6 = (items for items in accuracies_gaussian)
speck_001, speck_01, speck_05, speck_1, speck_2, speck_3, speck_4, speck_5, speck_6 = (items for items in accuracies_speckle)
saltpepper = accuracies_saltpepper[0]
# load the top1 error and top5 error from the evaluation results
f = open("{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH), "r")
c_ids = []
for i in f.readlines():
sub_id = list(map(float, i.split(",")))
c_ids.append(sub_id[3:5])
topK_errors = [sum(i) / len(c_ids) for i in zip(*c_ids)]
top1_error, top5_error = topK_errors[0], topK_errors[1]
result_gaussian = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(gaus_001), str(gaus_01), str(gaus_05), str(gaus_1), str(gaus_2), str(gaus_3), str(gaus_4), str(gaus_5), str(gaus_6)])
result_speck = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(speck_001), str(speck_01), str(speck_05), str(speck_1), str(speck_2), str(speck_3), str(speck_4), str(speck_5), str(speck_6)])
result_sp = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(saltpepper)])
with open("{}/gaus_noise_stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies Gaussian:{} ".format(accuracies_gaussian))
text_file.write(result_gaussian + '\n')
with open("{}/saltpepper_noise_stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies Salt & Pepper:{} ".format(accuracies_saltpepper))
text_file.write(result_sp + '\n')
with open("{}/speckle_noise_stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies Speckle:{} ".format(accuracies_speckle))
text_file.write(result_speck + '\n')
def single_proc_train():
"""Performs single process training."""
# Setup logging
lu.setup_logging()
# Show the config
logger.info('Config:\n{}'.format(cfg))
# Setup tensorboard if provided
writer_train = None
writer_eval = None
## If use tensorboard
if cfg.TENSORBOARD and du.is_master_proc() and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
comment = ''
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
logdir_train = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_train')
logdir_eval = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_eval')
if not os.path.exists(logdir_train):
os.makedirs(logdir_train)
if not os.path.exists(logdir_eval):
os.makedirs(logdir_eval)
writer_train = SummaryWriter(logdir_train)
writer_eval = SummaryWriter(logdir_eval)
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RGRAPH.SEED_TRAIN)
torch.manual_seed(cfg.RGRAPH.SEED_TRAIN)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Launch inference + adversarial run
train_model(writer_train, writer_eval, is_master=du.is_master_proc())
if writer_train is not None and writer_eval is not None:
writer_train.close()
writer_eval.close()
def check_seed_exists(i):
fname = "{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH)
if os.path.isfile(fname):
with open(fname, 'r') as f:
lines = f.readlines()
if len(lines) > i:
return True
return False
def main():
# Parse cmd line args
args = parse_args()
# Load config options
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
assert_cfg()
# cfg.freeze()
# Ensure that the output dir exists
os.makedirs(cfg.OUT_DIR, exist_ok=True)
# Save the config
dump_cfg()
for i, cfg.RGRAPH.SEED_TRAIN in enumerate(range(cfg.RGRAPH.SEED_TRAIN_START, cfg.RGRAPH.SEED_TRAIN_END)):
# check if a seed has been run
if not check_seed_exists(i):
print("Launching inference for seed {}".format(i))
single_proc_train()
else:
print('Inference seed {} already exists, stopping inference'.format(cfg.RGRAPH.SEED_TRAIN))
if __name__ == '__main__':
main()
| 25,928 | 41.092532 | 139 | py |
RobDanns | RobDanns-main/deep_learning/tools/train_resnet18_on_tinyimagenet200.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Train a classification model."""
from __future__ import print_function
import argparse
import numpy as np
import os
import sys
import torch
import multiprocessing as mp
import math
import pdb
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pycls.config import assert_cfg
from pycls.config import cfg
from pycls.config import dump_cfg
from pycls.datasets import loader
from pycls.models import model_builder
from pycls.utils.meters import TestMeter
from pycls.utils.meters import TrainMeter
from PIL import Image
import pycls.models.losses as losses
import pycls.models.optimizer as optim
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
import pycls.utils.net as nu
import pycls.datasets.paths as dp
import time
from datetime import datetime
from tensorboardX import SummaryWriter
logger = lu.get_logger(__name__)
print("Let's use GPU :", torch.cuda.current_device())
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser(
description='Train a classification model'
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file',
required=True,
type=str
)
parser.add_argument(
'opts',
help='See pycls/core/config.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
# TEST/VAL DATA_LOADER FOR TINY_IMAGENET200
def parseClasses(file):
classes = []
filenames = []
with open(file) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
for x in range(0, len(lines)):
tokens = lines[x].split()
classes.append(tokens[1])
filenames.append(tokens[0])
return filenames, classes
def load_allimages(dir):
images = []
if not os.path.isdir(dir):
sys.exit(-1)
for root, _, fnames in sorted(os.walk(dir)):
for fname in sorted(fnames):
#if datasets.folder.is_image_file(fname):
if datasets.folder.has_file_allowed_extension(fname,['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']):
path = os.path.join(root, fname)
item = path
images.append(item)
return images
class TinyImageNet(torch.utils.data.Dataset):
""" TinyImageNet200 validation dataloader."""
def __init__(self, img_path, gt_path, class_to_idx=None, transform=None):
self.img_path = img_path
self.transform = transform
self.gt_path = gt_path
self.class_to_idx = class_to_idx
self.classidx = []
self.imgs, self.classnames = parseClasses(gt_path)
# logger.info('Number of images: {}'.format(len(self.imgs)))
# logger.info('Number of classes: {}'.format(len(self.classnames)))
for classname in self.classnames:
self.classidx.append(self.class_to_idx[classname])
def __getitem__(self, index):
"""inputs: Index, retrns: tuple(im, label)"""
img = None
with open(os.path.join(self.img_path, self.imgs[index]), 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
if self.transform is not None:
img = self.transform(img)
label = self.classidx[index]
return img, label
def __len__(self):
return len(self.imgs)
def is_eval_epoch(cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (
(cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or
(cur_epoch + 1) == cfg.OPTIM.MAX_EPOCH
)
def log_model_info(model, writer_eval=None):
"""Logs model info"""
logger.info('Model:\n{}'.format(model))
params = mu.params_count(model)
flops = mu.flops_count(model)
logger.info('Params: {:,}'.format(params))
logger.info('Flops: {:,}'.format(flops))
logger.info('Number of node: {:,}'.format(cfg.RGRAPH.GROUP_NUM))
# logger.info('{}, {}'.format(params,flops))
if writer_eval is not None:
writer_eval.add_scalar('Params', params, 1)
writer_eval.add_scalar('Flops', flops, 1)
return params, flops
def train_epoch(
train_loader, model, loss_fun, optimizer, train_meter, cur_epoch, writer_train=None, params=0, flops=0, is_master=False):
"""Performs one epoch of training."""
# Shuffle the data
loader.shuffle(train_loader, cur_epoch)
# Update the learning rate
lr = optim.get_epoch_lr(cur_epoch)
optim.set_lr(optimizer, lr)
# Enable training mode
model.train()
train_meter.iter_tic()
for cur_iter, (inputs, labels) in enumerate(train_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Perform the forward pass
preds = model(inputs)
# Compute the loss
loss = loss_fun(preds, labels)
# Perform the backward pass
optimizer.zero_grad()
loss.backward()
# Update the parameters
optimizer.step()
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the stats across the GPUs
if cfg.NUM_GPUS > 1:
loss, top1_err, top5_err = du.scaled_all_reduce(
[loss, top1_err, top5_err]
)
# Copy the stats from GPU to CPU (sync point)
loss, top1_err, top5_err = loss.item(), top1_err.item(), top5_err.item()
train_meter.iter_toc()
# Update and log stats
train_meter.update_stats(
top1_err, top5_err, loss, lr, inputs.size(0) * cfg.NUM_GPUS
)
train_meter.log_iter_stats(cur_epoch, cur_iter)
train_meter.iter_tic()
# Log epoch stats
train_meter.log_epoch_stats(cur_epoch, writer_train, params, flops, is_master=is_master)
trg_stats = train_meter.get_epoch_stats(cur_epoch)
train_meter.reset()
return trg_stats
@torch.no_grad()
def eval_epoch(test_loader, model, test_meter, cur_epoch, writer_eval=None, params=0, flops=0, is_master=False):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.scaled_all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point)
top1_err, top5_err = top1_err.item(), top5_err.item()
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err, top5_err, inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
# test_meter.log_epoch_stats(cur_epoch,writer_eval,params,flops)
test_meter.log_epoch_stats(cur_epoch, writer_eval, params, flops, model, is_master=is_master)
eval_stats = test_meter.get_epoch_stats(cur_epoch)
test_meter.reset()
if cfg.RGRAPH.SAVE_GRAPH:
adj_dict = nu.model2adj(model)
adj_dict = {**adj_dict, 'top1_err': eval_stats['top1_err']}
os.makedirs('{}/graphs/{}'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN), exist_ok=True)
np.savez('{}/graphs/{}/{}.npz'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN, cur_epoch), **adj_dict)
return eval_stats
def train_model(writer_train=None, writer_eval=None, is_master=False):
"""Trains the model."""
# Fit flops/params
if cfg.TRAIN.AUTO_MATCH and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
mode = 'flops' # flops or params
if cfg.TRAIN.DATASET == 'cifar10':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet':
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 64:
stats_baseline = 48957952
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'cifar100':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet':
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'tinyimagenet200':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 18: # ResNet18
stats_baseline = 1820000000
elif 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 34: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
cfg.defrost()
stats = model_builder.build_model_stats(mode)
if stats != stats_baseline:
# 1st round: set first stage dim
for i in range(pre_repeat):
scale = round(math.sqrt(stats_baseline / stats), 2)
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first = int(round(first * scale))
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
step = 1
while True:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first += flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if stats == stats_baseline:
break
if flag != flag_init:
if cfg.RGRAPH.UPPER == False: # make sure the stats is SMALLER than baseline
if flag < 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
else:
if flag > 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
# 2nd round: set other stage dim
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [int(round(dim / first)) for dim in cfg.RGRAPH.DIM_LIST]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
if 'share' not in cfg.RESNET.TRANS_FUN:
for i in range(1, len(cfg.RGRAPH.DIM_LIST)):
for j in range(ratio_list[i]):
cfg.RGRAPH.DIM_LIST[i] += flag_init
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if flag_init != flag:
cfg.RGRAPH.DIM_LIST[i] -= flag_init
break
stats = model_builder.build_model_stats(mode)
print('FINAL', cfg.RGRAPH.GROUP_NUM, cfg.RGRAPH.DIM_LIST, stats, stats_baseline, stats < stats_baseline)
# Build the model (before the loaders to ease debugging)
model = model_builder.build_model()
params, flops = log_model_info(model, writer_eval)
# Define the loss function
loss_fun = losses.get_loss_fun()
# Construct the optimizer
optimizer = optim.construct_optimizer(model)
# Load a checkpoint if applicable
start_epoch = 0
if cfg.TRAIN.AUTO_RESUME and cu.has_checkpoint():
last_checkpoint = cu.get_checkpoint_last()
checkpoint_epoch = cu.load_checkpoint(last_checkpoint, model, optimizer)
logger.info('Loaded checkpoint from: {}'.format(last_checkpoint))
if checkpoint_epoch == cfg.OPTIM.MAX_EPOCH:
exit()
start_epoch = checkpoint_epoch
else:
start_epoch = checkpoint_epoch + 1
# Create data loaders
# Retrieve the data path for the dataset
data_path = dp.get_data_path(cfg.TRAIN.DATASET)
traindir = os.path.join(data_path, cfg.TRAIN.SPLIT)
valdir = os.path.join(data_path, cfg.TEST.SPLIT, 'images')
valgtfile = os.path.join(data_path, cfg.TEST.SPLIT, 'val_annotations.txt')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# create training dataset and loader
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Resize(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=int(cfg.TRAIN.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=True,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=True)
# create validation dataset
test_dataset = TinyImageNet(
valdir,
valgtfile,
class_to_idx=train_loader.dataset.class_to_idx.copy(),
transform=transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
normalize]))
# create validation loader
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=int(cfg.TEST.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=False,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False)
# Create meters
train_meter = TrainMeter(len(train_loader))
test_meter = TestMeter(len(test_loader))
# Create meters for fgsm
test_meter_fgsm = TestMeter(len(test_loader_adv))
if cfg.ONLINE_FLOPS:
model_dummy = model_builder.build_model()
IMAGE_SIZE = 224
n_flops, n_params = mu.measure_model(model_dummy, IMAGE_SIZE, IMAGE_SIZE)
logger.info('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
del (model_dummy)
# Perform the training loop
logger.info('Start epoch: {}'.format(start_epoch + 1))
# do eval at initialization
initial_eval_stats = eval_epoch(test_loader, model, test_meter, -1,
writer_eval, params, flops, is_master=is_master)
if start_epoch == cfg.OPTIM.MAX_EPOCH:
cur_epoch = start_epoch - 1
last_epoch_eval_stats = eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
else:
for cur_epoch in range(start_epoch, cfg.OPTIM.MAX_EPOCH):
print('Epoch {} Started'.format(cur_epoch))
# Train for one epoch
trg_stats = train_epoch(
train_loader, model, loss_fun, optimizer, train_meter, cur_epoch,
writer_train, is_master=is_master
)
# Compute precise BN stats
if cfg.BN.USE_PRECISE_STATS:
nu.compute_precise_bn_stats(model, train_loader)
# Save a checkpoint
if cu.is_checkpoint_epoch(cur_epoch):
checkpoint_file = cu.save_checkpoint(model, optimizer, cur_epoch)
logger.info('Wrote checkpoint to: {}'.format(checkpoint_file))
# Evaluate the model
if is_eval_epoch(cur_epoch):
eval_stats = eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
def single_proc_train():
"""Performs single process training."""
# Setup logging
lu.setup_logging()
# Show the config
logger.info('Config:\n{}'.format(cfg))
# Setup tensorboard if provided
writer_train = None
writer_eval = None
## If use tensorboard
if cfg.TENSORBOARD and du.is_master_proc() and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
comment = ''
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
logdir_train = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_train')
logdir_eval = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_eval')
if not os.path.exists(logdir_train):
os.makedirs(logdir_train)
if not os.path.exists(logdir_eval):
os.makedirs(logdir_eval)
writer_train = SummaryWriter(logdir_train)
writer_eval = SummaryWriter(logdir_eval)
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RGRAPH.SEED_TRAIN)
torch.manual_seed(cfg.RGRAPH.SEED_TRAIN)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Train the model
train_model(writer_train, writer_eval, is_master=du.is_master_proc())
if writer_train is not None and writer_eval is not None:
writer_train.close()
writer_eval.close()
def check_seed_exists(i):
fname = "{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH)
if os.path.isfile(fname):
with open(fname, 'r') as f:
lines = f.readlines()
if len(lines) > i:
return True
return False
def main():
# Parse cmd line args
args = parse_args()
# Load config options
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
assert_cfg()
# cfg.freeze()
# Ensure that the output dir exists
os.makedirs(cfg.OUT_DIR, exist_ok=True)
# Save the config
dump_cfg()
for i, cfg.RGRAPH.SEED_TRAIN in enumerate(range(cfg.RGRAPH.SEED_TRAIN_START, cfg.RGRAPH.SEED_TRAIN_END)):
# check if a seed has been run
if not check_seed_exists(i):
if cfg.NUM_GPUS > 1:
mpu.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=single_proc_train)
else:
single_proc_train()
else:
print('Seed {} exists, skip!'.format(cfg.RGRAPH.SEED_TRAIN))
if __name__ == '__main__':
main()
| 21,617 | 37.741935 | 129 | py |
RobDanns | RobDanns-main/deep_learning/tools/adversarial-inference-tinyimagenet200.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Train a classification model."""
from __future__ import print_function
import argparse
import numpy as np
import os
import sys
import torch
import multiprocessing as mp
import math
import pdb
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pycls.config import assert_cfg
from pycls.config import cfg
from pycls.config import dump_cfg
from pycls.datasets import loader
from pycls.models import model_builder
from pycls.utils.meters import TestMeter
from pycls.utils.meters import TrainMeter
from PIL import Image
import pycls.models.losses as losses
import pycls.models.optimizer as optim
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
import pycls.utils.net as nu
import pycls.datasets.paths as dp
import time
from datetime import datetime
from tensorboardX import SummaryWriter
print("Let's use GPU :", torch.cuda.current_device())
logger = lu.get_logger(__name__)
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser(
description='Train a classification model'
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file',
required=True,
type=str
)
parser.add_argument(
'opts',
help='See pycls/core/config.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
# TEST/VAL DATA_LOADER FOR TINY_IMAGENET200
def parseClasses(file):
classes = []
filenames = []
with open(file) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
for x in range(0, len(lines)):
tokens = lines[x].split()
classes.append(tokens[1])
filenames.append(tokens[0])
return filenames, classes
def load_allimages(dir):
images = []
if not os.path.isdir(dir):
sys.exit(-1)
for root, _, fnames in sorted(os.walk(dir)):
for fname in sorted(fnames):
# if datasets.folder.is_image_file(fname):
if datasets.folder.has_file_allowed_extension(fname,['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']):
path = os.path.join(root, fname)
item = path
images.append(item)
return images
class TinyImageNet(torch.utils.data.Dataset):
""" TinyImageNet200 validation dataloader."""
def __init__(self, img_path, gt_path, class_to_idx=None, transform=None):
self.img_path = img_path
self.transform = transform
self.gt_path = gt_path
self.class_to_idx = class_to_idx
self.classidx = []
self.imgs, self.classnames = parseClasses(gt_path)
for classname in self.classnames:
self.classidx.append(self.class_to_idx[classname])
def __getitem__(self, index):
"""inputs: Index, retrns: tuple(im, label)"""
img = None
with open(os.path.join(self.img_path, self.imgs[index]), 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
if self.transform is not None:
img = self.transform(img)
label = self.classidx[index]
return img, label
def __len__(self):
return len(self.imgs)
def is_eval_epoch(cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (
(cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or
(cur_epoch + 1) == cfg.OPTIM.MAX_EPOCH
)
def log_model_info(model, writer_eval=None):
"""Logs model info"""
logger.info('Model:\n{}'.format(model))
params = mu.params_count(model)
flops = mu.flops_count(model)
logger.info('Params: {:,}'.format(params))
logger.info('Flops: {:,}'.format(flops))
logger.info('Number of node: {:,}'.format(cfg.RGRAPH.GROUP_NUM))
# logger.info('{}, {}'.format(params,flops))
if writer_eval is not None:
writer_eval.add_scalar('Params', params, 1)
writer_eval.add_scalar('Flops', flops, 1)
return params, flops
@torch.no_grad()
def eval_epoch(test_loader, model, test_meter, cur_epoch, writer_eval=None, params=0, flops=0, is_master=False):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.scaled_all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point)
top1_err, top5_err = top1_err.item(), top5_err.item()
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err, top5_err, inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
# test_meter.log_epoch_stats(cur_epoch,writer_eval,params,flops)
test_meter.log_epoch_stats(cur_epoch, writer_eval, params, flops, model, is_master=is_master)
eval_stats = test_meter.get_epoch_stats(cur_epoch)
test_meter.reset()
if cfg.RGRAPH.SAVE_GRAPH:
adj_dict = nu.model2adj(model)
adj_dict = {**adj_dict, 'top1_err': eval_stats['top1_err']}
os.makedirs('{}/graphs/{}'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN), exist_ok=True)
np.savez('{}/graphs/{}/{}.npz'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN, cur_epoch), **adj_dict)
# return eval_stats
class Normalize(torch.nn.Module):
def __init__(self, mean, std):
super(Normalize, self).__init__()
self.register_buffer('mean', torch.Tensor(mean))
self.register_buffer('std', torch.Tensor(std))
def forward(self, input):
# Broadcasting
mean = self.mean.reshape(1,3,1,1)
std = self.std.reshape(1,3,1,1)
norm_img = (input - mean) / std
return norm_img
# Helper class for printing model layers
class PrintLayer(torch.nn.Module):
def __init__(self):
super(PrintLayer, self).__init__()
def forward(self, x):
# Do your print / debug stuff here
print(x)
return x
def train_model(writer_train=None, writer_eval=None, is_master=False):
"""Trains the model."""
# Fit flops/params
if cfg.TRAIN.AUTO_MATCH and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
mode = 'flops' # flops or params
if cfg.TRAIN.DATASET == 'cifar10':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 64:
stats_baseline = 48957952
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'cifar100':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'tinyimagenet200':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 18: # ResNet18
stats_baseline = 1820000000
elif 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 34: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
cfg.defrost()
stats = model_builder.build_model_stats(mode)
if stats != stats_baseline:
# 1st round: set first stage dim
for i in range(pre_repeat):
scale = round(math.sqrt(stats_baseline / stats), 2)
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first = int(round(first * scale))
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
step = 1
while True:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first += flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if stats == stats_baseline:
break
if flag != flag_init:
if cfg.RGRAPH.UPPER == False: # make sure the stats is SMALLER than baseline
if flag < 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
else:
if flag > 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
# 2nd round: set other stage dim
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [int(round(dim / first)) for dim in cfg.RGRAPH.DIM_LIST]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
if 'share' not in cfg.RESNET.TRANS_FUN:
for i in range(1, len(cfg.RGRAPH.DIM_LIST)):
for j in range(ratio_list[i]):
cfg.RGRAPH.DIM_LIST[i] += flag_init
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if flag_init != flag:
cfg.RGRAPH.DIM_LIST[i] -= flag_init
break
stats = model_builder.build_model_stats(mode)
print('FINAL', cfg.RGRAPH.GROUP_NUM, cfg.RGRAPH.DIM_LIST, stats, stats_baseline, stats < stats_baseline)
# Build the model (before the loaders to ease debugging)
model = model_builder.build_model()
params, flops = log_model_info(model, writer_eval)
# for name, param in model.named_parameters():
# print(name, param.shape)
# Define the loss function
loss_fun = losses.get_loss_fun()
# Construct the optimizer
optimizer = optim.construct_optimizer(model)
# Load a checkpoint if applicable
start_epoch = 0
if cu.had_checkpoint():
print("Checking for a checkpoint")
last_checkpoint = cu.get_checkpoint_last()
print("Last Checkpoint : ", last_checkpoint)
checkpoint_epoch = cu.load_checkpoint(last_checkpoint, model, optimizer)
logger.info('Loaded checkpoint from: {}'.format(last_checkpoint))
if checkpoint_epoch == cfg.OPTIM.MAX_EPOCH:
exit()
start_epoch = checkpoint_epoch
else:
start_epoch = checkpoint_epoch + 1
print("Epoch = ", start_epoch)
# Create data loaders
data_path = dp.get_data_path(cfg.TRAIN.DATASET) # Retrieve the data path for the dataset
traindir = os.path.join(data_path, cfg.TRAIN.SPLIT)
valdir = os.path.join(data_path, cfg.TEST.SPLIT, 'images')
valgtfile = os.path.join(data_path, cfg.TEST.SPLIT, 'val_annotations.txt')
# normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# create training dataset and loader
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Resize(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=int(cfg.TRAIN.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=True,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=True)
# create validation dataset
test_dataset = TinyImageNet(
valdir,
valgtfile,
class_to_idx=train_loader.dataset.class_to_idx.copy(),
transform=transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
normalize]))
# create validation loader
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=int(cfg.TEST.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=False,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False)
# create adversarial dataset
adv_dataset = TinyImageNet(
valdir,
valgtfile,
class_to_idx=train_loader.dataset.class_to_idx.copy(),
transform=transforms.Compose([
transforms.Resize(224),
transforms.ToTensor()]))
# create adversarial loader
test_loader_adv = torch.utils.data.DataLoader(
adv_dataset,
batch_size=1,
shuffle=True,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False)
# Create meters
test_meter = TestMeter(len(test_loader))
test_meter_adv = TestMeter(len(test_loader_adv))
if cfg.ONLINE_FLOPS:
model_dummy = model_builder.build_model()
IMAGE_SIZE = 224
n_flops, n_params = mu.measure_model(model_dummy, IMAGE_SIZE, IMAGE_SIZE)
logger.info('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
del (model_dummy)
# Perform the training loop
logger.info('Start epoch: {}'.format(start_epoch + 1))
if start_epoch == cfg.OPTIM.MAX_EPOCH:
cur_epoch = start_epoch - 1
eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
# when epsilon=0 --> PGD, epsilon=1 --> CW, otherwise FGSM-->replace eps1, eps2, ... with required epsilon of attack versions
epsilons = [0, eps1, eps2, ... epsN, 1]
# Per-channel mean and SD values in BGR order for TinyImageNet dataset
tinyimagenet_MEAN = [0.485, 0.456, 0.406]
tinyimagenet_SD = [0.229, 0.224, 0.225]
accuracies = []
# add normalization layer to the model
norm_layer = Normalize(mean=tinyimagenet_MEAN, std=tinyimagenet_SD)
net = torch.nn.Sequential(norm_layer, model).cuda()
net = net.eval()
for epsilon in epsilons:
if epsilon == 0:
print("Running PGD Attack")
atk = torchattacks.PGD(net, eps=1/510, alpha=2/225, steps=7) # for relevant dataset, use parameters from torchattacks official notebook
elif epsilon == 1:
print("Running CW Attack")
atk = torchattacks.CW(net, c=0.1, kappa=0, steps=100, lr=0.01) # choose suitable values for c, kappa, steps, and lr.
else:
print("Running FGSM Attacks on epsilon :", epsilon)
atk = torchattacks.FGSM(net, eps=epsilon)
ctr = 0
correct = 0
total = 0
for cur_iter, (inputs, labels) in enumerate(test_loader_adv):
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
adv_images = atk(inputs, labels)
outputs = net(adv_images)
_, predicted = torch.max(outputs.data, 1)
ctr += 1
total += 1
correct += (predicted == labels).sum()
if ctr > X: # replace X with the number of images to be generated for adversarial attacks.
print(ctr, " images done for epsilon:", epsilon)
break
acc = 100 * float(correct) / total
print("acc =", round(acc, 2), "correct =", float(correct), "total =", total)
accuracies.append(round(acc, 2))
print('Attack Accuracy = {:.3f} with epsilon = {:.4f}'.format(acc, epsilon))
print("accuracies after apend :", accuracies)
# save items inside accuracies list to separate float objects, update the # of variables according to requirement.
accPGD, accFGSM1, accFGSM2, accFGSM3, accFGSM4, accFGSM5, accFGSM6, accFGSM7, accCW = (items for items in accuracies)
# load the top1 error and top5 error from the evaluation results
f = open("{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH), "r")
c_ids = []
for i in f.readlines():
sub_id = list(map(float, i.split(",")))
c_ids.append(sub_id[3:5])
topK_errors = [sum(i) / len(c_ids) for i in zip(*c_ids)]
top1_error, top5_error = topK_errors[0], topK_errors[1]
result_info = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(accPGD), str(accFGSM1), str(accFGSM2), str(accFGSM3), str(accFGSM4), str(accFGSM5),
str(accFGSM6), str(accFGSM7), str(accCW)])
with open("{}/stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies {} ".format(accuracies))
text_file.write(result_info + '\n')
def single_proc_train():
"""Performs single process training."""
# Setup logging
lu.setup_logging()
# Show the config
logger.info('Config:\n{}'.format(cfg))
# Setup tensorboard if provided
writer_train = None
writer_eval = None
## If use tensorboard
if cfg.TENSORBOARD and du.is_master_proc() and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
comment = ''
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
logdir_train = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_train')
logdir_eval = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_eval')
if not os.path.exists(logdir_train):
os.makedirs(logdir_train)
if not os.path.exists(logdir_eval):
os.makedirs(logdir_eval)
writer_train = SummaryWriter(logdir_train)
writer_eval = SummaryWriter(logdir_eval)
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RGRAPH.SEED_TRAIN)
torch.manual_seed(cfg.RGRAPH.SEED_TRAIN)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Launch inference + adversarial run
train_model(writer_train, writer_eval, is_master=du.is_master_proc())
if writer_train is not None and writer_eval is not None:
writer_train.close()
writer_eval.close()
def check_seed_exists(i):
fname = "{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH)
if os.path.isfile(fname):
with open(fname, 'r') as f:
lines = f.readlines()
if len(lines) > i:
return True
return False
def main():
# Parse cmd line args
args = parse_args()
# Load config options
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
assert_cfg()
# cfg.freeze()
# Ensure that the output dir exists
os.makedirs(cfg.OUT_DIR, exist_ok=True)
# Save the config
dump_cfg()
for i, cfg.RGRAPH.SEED_TRAIN in enumerate(range(cfg.RGRAPH.SEED_TRAIN_START, cfg.RGRAPH.SEED_TRAIN_END)):
# check if a seed has been run
if not check_seed_exists(i):
print("Launching inference for seed {}".format(i))
single_proc_train()
else:
print('Inference seed {} already exists, stopping inference'.format(cfg.RGRAPH.SEED_TRAIN))
if __name__ == '__main__':
main()
| 23,184 | 38.768439 | 147 | py |
RobDanns | RobDanns-main/deep_learning/tools/adversarial-inference.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Train a classification model."""
import argparse
import pickle
import numpy as np
import os
import sys
import torch
import math
import torchvision
import torchattacks
from pycls.config import assert_cfg
from pycls.config import cfg
from pycls.config import dump_cfg
from pycls.datasets import loader
from pycls.models import model_builder
from pycls.utils.meters import TestMeter
import pycls.models.losses as losses
import pycls.models.optimizer as optim
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
import pycls.utils.net as nu
import pycls.datasets.transforms as transforms
from datetime import datetime
from tensorboardX import SummaryWriter
import foolbox as fb
import art
import art.attacks.evasion as evasion
from art.estimators.classification import PyTorchClassifier
print("Using GPU :", torch.cuda.current_device())
logger = lu.get_logger(__name__)
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser(
description='Train a classification model'
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file',
required=True,
type=str
)
parser.add_argument(
'opts',
help='See pycls/core/config.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def is_eval_epoch(cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (
(cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or
(cur_epoch + 1) == cfg.OPTIM.MAX_EPOCH
)
def log_model_info(model, writer_eval=None):
"""Logs model info"""
logger.info('Model:\n{}'.format(model))
params = mu.params_count(model)
flops = mu.flops_count(model)
logger.info('Params: {:,}'.format(params))
logger.info('Flops: {:,}'.format(flops))
logger.info('Number of node: {:,}'.format(cfg.RGRAPH.GROUP_NUM))
# logger.info('{}, {}'.format(params,flops))
if writer_eval is not None:
writer_eval.add_scalar('Params', params, 1)
writer_eval.add_scalar('Flops', flops, 1)
return params, flops
@torch.no_grad()
def eval_epoch(test_loader, model, test_meter, cur_epoch, writer_eval=None, params=0, flops=0, is_master=False):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
# val_input_imgs,
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.scaled_all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point)
top1_err, top5_err = top1_err.item(), top5_err.item()
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err, top5_err, inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
# test_meter.log_epoch_stats(cur_epoch,writer_eval,params,flops)
test_meter.log_epoch_stats(cur_epoch, writer_eval, params, flops, model, is_master=is_master)
stats = test_meter.get_epoch_stats(cur_epoch)
test_meter.reset()
if cfg.RGRAPH.SAVE_GRAPH:
adj_dict = nu.model2adj(model)
adj_dict = {**adj_dict, 'top1_err': stats['top1_err']}
os.makedirs('{}/graphs/{}'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN), exist_ok=True)
np.savez('{}/graphs/{}/{}.npz'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN, cur_epoch), **adj_dict)
class Normalize(torch.nn.Module):
def __init__(self, mean, std):
super(Normalize, self).__init__()
self.register_buffer('mean', torch.Tensor(mean))
self.register_buffer('std', torch.Tensor(std))
def forward(self, input):
# Broadcasting
mean = self.mean.reshape(1,3,1,1)
std = self.std.reshape(1,3,1,1)
norm_img = (input - mean) / std
return norm_img
# Helper class for printing model layers
class PrintLayer(torch.nn.Module):
def __init__(self):
super(PrintLayer, self).__init__()
def forward(self, x):
# Do your print / debug stuff here
print(x)
return x
def train_model(writer_eval=None, is_master=False):
"""Trains the model."""
# Fit flops/params
if cfg.TRAIN.AUTO_MATCH and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
mode = 'flops' # flops or params
if cfg.TRAIN.DATASET == 'cifar10':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 64:
stats_baseline = 48957952
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'cifar100':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet':
if cfg.MODEL.DEPTH == 20:
stats_baseline = 40813184 # ResNet20
elif cfg.MODEL.DEPTH == 26:
stats_baseline = 56140000 # ResNet26
elif cfg.MODEL.DEPTH == 34:
stats_baseline = 71480000 # ResNet34
elif cfg.MODEL.DEPTH == 38:
stats_baseline = 86819000 # ResNet38
elif cfg.MODEL.DEPTH == 50:
stats_baseline = 130000000 # ResNet50
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'tinyimagenet200':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 18: # ResNet18
stats_baseline = 1820000000
elif 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 34: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
elif cfg.TRAIN.DATASET == 'imagenet':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 18: # ResNet18
stats_baseline = 1820000000
elif 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 34: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
cfg.defrost()
stats = model_builder.build_model_stats(mode)
if stats != stats_baseline:
# 1st round: set first stage dim
for i in range(pre_repeat):
scale = round(math.sqrt(stats_baseline / stats), 2)
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first = int(round(first * scale))
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
step = 1
while True:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first += flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if stats == stats_baseline:
break
if flag != flag_init:
if cfg.RGRAPH.UPPER == False: # make sure the stats is SMALLER than baseline
if flag < 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
else:
if flag > 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
# 2nd round: set other stage dim
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [int(round(dim / first)) for dim in cfg.RGRAPH.DIM_LIST]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
if 'share' not in cfg.RESNET.TRANS_FUN:
for i in range(1, len(cfg.RGRAPH.DIM_LIST)):
for j in range(ratio_list[i]):
cfg.RGRAPH.DIM_LIST[i] += flag_init
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if flag_init != flag:
cfg.RGRAPH.DIM_LIST[i] -= flag_init
break
stats = model_builder.build_model_stats(mode)
print('FINAL', cfg.RGRAPH.GROUP_NUM, cfg.RGRAPH.DIM_LIST, stats, stats_baseline, stats < stats_baseline)
# Build the model (before the loaders to ease debugging)
model = model_builder.build_model()
params, flops = log_model_info(model, writer_eval)
if cfg.IS_INFERENCE and cfg.IS_DDP:
model = torch.nn.parallel.DataParallel(model)
# for name, param in model.named_parameters():
# print(name, param.shape)
# Define the loss function
loss_fun = losses.get_loss_fun()
# Construct the optimizer
optimizer = optim.construct_optimizer(model)
# Load a checkpoint if applicable
start_epoch = 0
if cu.had_checkpoint():
print("Checking for a checkpoint")
last_checkpoint = cu.get_checkpoint_last()
print("Last Checkpoint : ", last_checkpoint)
checkpoint_epoch = cu.load_checkpoint(last_checkpoint, model, optimizer)
logger.info('Loaded checkpoint from: {}'.format(last_checkpoint))
if checkpoint_epoch == cfg.OPTIM.MAX_EPOCH:
exit()
start_epoch = checkpoint_epoch
else:
start_epoch = checkpoint_epoch + 1
print("Epoch = ", start_epoch)
# Create data loaders
test_loader = loader.construct_test_loader()
test_loader_adv = loader.construct_test_loader_adv()
# Create meters
test_meter = TestMeter(len(test_loader))
test_meter_adv = TestMeter(len(test_loader_adv))
if cfg.ONLINE_FLOPS:
model_dummy = model_builder.build_model()
IMAGE_SIZE = 224
n_flops, n_params = mu.measure_model(model_dummy, IMAGE_SIZE, IMAGE_SIZE)
logger.info('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
del (model_dummy)
# Perform the training loop
logger.info('Start epoch: {}'.format(start_epoch + 1))
if start_epoch == cfg.OPTIM.MAX_EPOCH:
cur_epoch = start_epoch - 1
eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
# when epsilon=0, 1 --> PGD, epsilon=2, 3 --> CW, otherwise FGSM-->replace eps1, eps2, ... with required epsilon of attack versions
epsilons = [0, 1, eps1, eps2, ... epsN, 2, 3]
# Per-channel mean and SD values in BGR order for ImageNet dataset
cifar10_MEAN = [0.491, 0.482, 0.4465]
cifar10_SD = [0.247, 0.243, 0.262]
cifar100_MEAN = [0.507, 0.487, 0.441]
cifar100_SD = [0.267, 0.256, 0.276]
imagenet_MEAN = [0.406, 0.456, 0.485]
imagenet_SD = [0.225, 0.224, 0.229]
accuracies = []
# replace the MEAN and SD variable in the following line for the relevant dataset.
norm_layer = Normalize(mean=cifar10_MEAN, std=cifar10_SD)
net = torch.nn.Sequential(norm_layer, model).cuda()
# net = torch.nn.Sequential(norm_layer, PrintLayer(), model).cuda()
net = net.eval()
print("Adversarial Loader Batch Size =", test_loader_adv.batch_size)
for epsilon in epsilons:
if epsilon == 0:
print("Running PGD Attack")
atk_ta = torchattacks.PGD(net, eps=6/255, alpha=2/255, steps=7) # for relevant dataset, use parameters from torchattacks official notebook
elif epsilon == 1:
print("Running PGD Attack")
atk_ta = torchattacks.PGD(net, eps=9/255, alpha=2/255, steps=7) # for relevant dataset, use parameters from torchattacks official notebook
elif epsilon == 2:
print("Running Torchattacks.CW")
atk_ta = torchattacks.CW(net, c=0.15, kappa=0, steps=100, lr=0.01) # replace the values of c and steps according to hyperparameters reported in the paper.
elif epsilon == 3:
print("Running Torchattacks.CW")
atk_ta = torchattacks.CW(net, c=0.25, kappa=0, steps=100, lr=0.01) # replace the values of c and steps according to hyperparameters reported in the paper.
# For Foolbox or ART attacks, uncomment the following lines.
# print("-> FoolBox.CW")
# fmodel = fb.PyTorchModel(net, bounds=(0, 1))
# atk_fb = fb.attacks.L2CarliniWagnerAttack(binary_search_steps=1, initial_const=0.05,
# confidence=0, steps=100, stepsize=0.01)
# print("-> Adversarial Robustness Toolbox.CW")
# classifier = PyTorchClassifier(model=net, clip_values=(0, 1),
# loss=loss_fun,
# optimizer=optimizer,
# input_shape=(3, 32, 32), nb_classes=10)
# atk_art = evasion.CarliniL2Method(batch_size=1, classifier=classifier,
# binary_search_steps=1, initial_const=0.05,
# confidence=0, max_iter=100,
# learning_rate=0.01)
else:
print("Running FGSM Attacks on epsilon :", epsilon)
atk_ta = torchattacks.FGSM(net, eps=epsilon)
ctr = 0
correct_ta = 0
# correct_fb = 0
# correct_art = 0
total = 0
for cur_iter, (inputs, labels) in enumerate(test_loader_adv):
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
inputs = inputs.float().div(255)
adv_images_ta = atk_ta(inputs, labels)
# _, adv_images_fb, _ = atk_fb(fmodel, inputs, labels, epsilons=1)
# adv_images_art = torch.tensor(atk_art.generate(inputsnp, labelsnp)).cuda()
adv_inputs_ta = adv_images_ta.float()
# adv_inputs_fb = adv_images_fb.float()
# adv_inputs_art = adv_images_art.float()
outputs_ta = net(adv_inputs_ta)
# outputs_fb = net(adv_inputs_fb)
# outputs_art = net(adv_inputs_art)
_, predicted_ta = torch.max(outputs_ta.data, 1)
# _, predicted_fb = torch.max(outputs_fb.data, 1)
# _, predicted_art = torch.max(outputs_art.data, 1)
ctr += 1
total += 1
correct_ta += (predicted_ta == labels).sum()
# correct_fb += (predicted_fb == labels).sum()
# correct_art += (predicted_art == labels).sum()
if ctr > X: # replace X with the number of images to be generated for adversarial attacks.
print(ctr, " images done for epsilon:", epsilon)
break
acc_ta = 100 * float(correct_ta) / total
# acc_fb = 100 * float(correct_fb) / total
# acc_art = 100 * float(correct_art) / total
print("ta acc =", round(acc_ta, 2), ", ta correct =", float(correct_ta), ", total =", total)
# print("fb acc =", round(acc_fb, 2), ", fb correct =", float(correct_fb), ", total =", total)
# print("art acc =", round(acc_art, 2), ", art correct =", float(correct_art), ", total =", total)
accuracies.append(round(acc_ta, 2))
print('Attack Accuracy = {:.3f} with epsilon = {:.2f}'.format(acc_ta, epsilon))
print("accuracies after apend :", accuracies)
# save items inside accuracies list to separate float objects, update the # of variables according to requirement.
accPGD_6by255, accPGD_9by255, accFGSM1, accFGSM2, accFGSM3, accFGSM4, accFGSM5, accCW_15, accCW_25 = (items for items in accuracies)
# load the top1 error and top5 error from the evaluation results
f = open("{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH), "r")
c_ids = []
for i in f.readlines():
sub_id = list(map(float, i.split(",")))
c_ids.append(sub_id[3:5])
topK_errors = [sum(i) / len(c_ids) for i in zip(*c_ids)]
top1_error, top5_error = topK_errors[0], topK_errors[1]
result_info = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(accPGD_6by255), str(accPGD_9by255), str(accFGSM1), str(accFGSM2), str(accFGSM3), str(accFGSM4), str(accFGSM5),
str(accCW_15), str(accCW_25)])
#
with open("{}/stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies {} ".format(accuracies))
text_file.write(result_info + '\n')
def single_proc_train():
"""Performs single process training."""
# Setup logging
lu.setup_logging()
# Show the config
logger.info('Config:\n{}'.format(cfg))
# Setup tensorboard if provided
writer_train = None
writer_eval = None
# If use tensorboard
if cfg.TENSORBOARD and du.is_master_proc() and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
comment = ''
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
logdir_train = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_train')
logdir_eval = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_eval')
if not os.path.exists(logdir_train):
os.makedirs(logdir_train)
if not os.path.exists(logdir_eval):
os.makedirs(logdir_eval)
writer_train = SummaryWriter(logdir_train)
writer_eval = SummaryWriter(logdir_eval)
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RGRAPH.SEED_TRAIN)
torch.manual_seed(cfg.RGRAPH.SEED_TRAIN)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Launch inference + adversarial run
train_model(writer_eval, is_master=du.is_master_proc())
if writer_eval is not None:
# writer_train.close()
writer_eval.close()
def check_seed_exists(i):
fname = "{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH)
if os.path.isfile(fname):
with open(fname, 'r') as f:
lines = f.readlines()
if len(lines) > i:
return True
return False
def main():
# Parse cmd line args
args = parse_args()
# Load config options
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
assert_cfg()
# cfg.freeze()
# Ensure that the output dir exists
os.makedirs(cfg.OUT_DIR, exist_ok=True)
# Save the config
dump_cfg()
for i, cfg.RGRAPH.SEED_TRAIN in enumerate(range(cfg.RGRAPH.SEED_TRAIN_START, cfg.RGRAPH.SEED_TRAIN_END)):
# check if a seed has been run
if not check_seed_exists(i):
print("Launching inference for seed {}".format(i))
single_proc_train()
else:
print('Trained seed {} already exists, stopping inference'.format(cfg.RGRAPH.SEED_TRAIN))
if __name__ == '__main__':
main()
| 23,798 | 41.72711 | 166 | py |
RobDanns | RobDanns-main/deep_learning/tools/corruptions-inference.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Train a classification model."""
import argparse
import pickle
import numpy as np
import os
import sys
import torch
import math
import torchvision
import torchattacks
from pycls.config import assert_cfg
from pycls.config import cfg
from pycls.config import dump_cfg
from pycls.datasets import loader
from pycls.models import model_builder
from pycls.utils.meters import TestMeter
import pycls.models.losses as losses
import pycls.models.optimizer as optim
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
import pycls.utils.net as nu
import pycls.datasets.transforms as transforms
from datetime import datetime
from tensorboardX import SummaryWriter
from torchvision.utils import save_image
from skimage.util import random_noise
print("Using GPU :", torch.cuda.current_device())
logger = lu.get_logger(__name__)
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser(
description='Train a classification model'
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file',
required=True,
type=str
)
parser.add_argument(
'opts',
help='See pycls/core/config.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def is_eval_epoch(cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (
(cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or
(cur_epoch + 1) == cfg.OPTIM.MAX_EPOCH
)
def log_model_info(model, writer_eval=None):
"""Logs model info"""
logger.info('Model:\n{}'.format(model))
params = mu.params_count(model)
flops = mu.flops_count(model)
logger.info('Params: {:,}'.format(params))
logger.info('Flops: {:,}'.format(flops))
logger.info('Number of node: {:,}'.format(cfg.RGRAPH.GROUP_NUM))
# logger.info('{}, {}'.format(params,flops))
if writer_eval is not None:
writer_eval.add_scalar('Params', params, 1)
writer_eval.add_scalar('Flops', flops, 1)
return params, flops
@torch.no_grad()
def eval_epoch(test_loader, model, test_meter, cur_epoch, writer_eval=None, params=0, flops=0, is_master=False):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
# val_input_imgs,
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.scaled_all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point)
top1_err, top5_err = top1_err.item(), top5_err.item()
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err, top5_err, inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
test_meter.log_epoch_stats(cur_epoch, writer_eval, params, flops, model, is_master=is_master)
stats = test_meter.get_epoch_stats(cur_epoch)
test_meter.reset()
if cfg.RGRAPH.SAVE_GRAPH:
adj_dict = nu.model2adj(model)
adj_dict = {**adj_dict, 'top1_err': stats['top1_err']}
os.makedirs('{}/graphs/{}'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN), exist_ok=True)
np.savez('{}/graphs/{}/{}.npz'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN, cur_epoch), **adj_dict)
def save_noisy_image(img, name):
if img.size(2) == 32:
img = img.view(img.size(0), 3, 32, 32)
save_image(img, name)
else:
img = img.view(img.size(0), 3, 224, 224)
save_image(img, name)
## Functions to save noisy images.
# def gaussian_noise(test_loader):
# print("Adding gaussian_noise")
# for data in test_loader:
# img, _ = data[0], data[1]
# gaussian_img_05 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.05, clip=True))
# gaussian_img_2 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.2, clip=True))
# gaussian_img_4 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.4, clip=True))
# gaussian_img_6 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.6, clip=True))
# save_noisy_image(gaussian_img_05, r"noisy-images/gaussian_05.png")
# save_noisy_image(gaussian_img_2, r"noisy-images/gaussian_2.png")
# save_noisy_image(gaussian_img_4, r"noisy-images/gaussian_4.png")
# save_noisy_image(gaussian_img_6, r"noisy-images/gaussian_6.png")
# break
# def salt_pepper_noise(test_loader):
# print("Adding salt_pepper_noise")
# for data in test_loader:
# img, _ = data[0], data[1]
# s_vs_p_5 = torch.tensor(random_noise(img, mode='s&p', salt_vs_pepper=0.5, clip=True))
# s_vs_p_6 = torch.tensor(random_noise(img, mode='s&p', salt_vs_pepper=0.6, clip=True))
# s_vs_p_7 = torch.tensor(random_noise(img, mode='s&p', salt_vs_pepper=0.7, clip=True))
# save_noisy_image(s_vs_p_5, r"noisy-images/s&p_5.png")
# save_noisy_image(s_vs_p_6, r"noisy-images/s&p_6.png")
# save_noisy_image(s_vs_p_7, r"noisy-images/s&p_7.png")
# break
# def speckle_noise(test_loader):
# print("Adding speckle_noise")
# for data in test_loader:
# img, _ = data[0], data[1]
# speckle_img_05 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.05, clip=True))
# speckle_img_2 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.2, clip=True))
# speckle_img_4 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.4, clip=True))
# speckle_img_6 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.6, clip=True))
# save_noisy_image(speckle_img_05, r"noisy-images/speckle_05.png")
# save_noisy_image(speckle_img_2, r"noisy-images/speckle_2.png")
# save_noisy_image(speckle_img_4, r"noisy-images/speckle_4.png")
# save_noisy_image(speckle_img_6, r"noisy-images/speckle_6.png")
# break
def train_model(writer_eval=None, is_master=False):
"""Trains the model."""
# Fit flops/params
if cfg.TRAIN.AUTO_MATCH and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
mode = 'flops' # flops or params
if cfg.TRAIN.DATASET == 'cifar10':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 64:
stats_baseline = 48957952
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'cifar100':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
if cfg.MODEL.DEPTH == 20:
stats_baseline = 40813184 # ResNet20
elif cfg.MODEL.DEPTH == 26:
stats_baseline = 56140000 # ResNet26
elif cfg.MODEL.DEPTH == 34:
stats_baseline = 71480000 # ResNet34
elif cfg.MODEL.DEPTH == 38:
stats_baseline = 86819000 # ResNet38
elif cfg.MODEL.DEPTH == 50:
stats_baseline = 130000000 # ResNet50
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'tinyimagenet200':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 18: # ResNet18
stats_baseline = 1820000000
elif 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 34: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
elif cfg.TRAIN.DATASET == 'imagenet':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
cfg.defrost()
stats = model_builder.build_model_stats(mode)
if stats != stats_baseline:
# 1st round: set first stage dim
for i in range(pre_repeat):
scale = round(math.sqrt(stats_baseline / stats), 2)
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first = int(round(first * scale))
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
step = 1
while True:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first += flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if stats == stats_baseline:
break
if flag != flag_init:
if cfg.RGRAPH.UPPER == False: # make sure the stats is SMALLER than baseline
if flag < 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
else:
if flag > 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
# 2nd round: set other stage dim
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [int(round(dim / first)) for dim in cfg.RGRAPH.DIM_LIST]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
if 'share' not in cfg.RESNET.TRANS_FUN:
for i in range(1, len(cfg.RGRAPH.DIM_LIST)):
for j in range(ratio_list[i]):
cfg.RGRAPH.DIM_LIST[i] += flag_init
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if flag_init != flag:
cfg.RGRAPH.DIM_LIST[i] -= flag_init
break
stats = model_builder.build_model_stats(mode)
print('FINAL', cfg.RGRAPH.GROUP_NUM, cfg.RGRAPH.DIM_LIST, stats, stats_baseline, stats < stats_baseline)
# Build the model (before the loaders to ease debugging)
model = model_builder.build_model()
params, flops = log_model_info(model, writer_eval)
if cfg.IS_INFERENCE and cfg.IS_DDP:
model = torch.nn.parallel.DataParallel(model)
# for name, param in model.named_parameters():
# print(name, param.shape)
# Define the loss function
loss_fun = losses.get_loss_fun()
# Construct the optimizer
optimizer = optim.construct_optimizer(model)
# Load a checkpoint if applicable
start_epoch = 0
if cu.had_checkpoint():
print("Checking for a checkpoint")
last_checkpoint = cu.get_checkpoint_last()
print("Last Checkpoint : ", last_checkpoint)
checkpoint_epoch = cu.load_checkpoint(last_checkpoint, model, optimizer)
logger.info('Loaded checkpoint from: {}'.format(last_checkpoint))
if checkpoint_epoch == cfg.OPTIM.MAX_EPOCH:
exit()
start_epoch = checkpoint_epoch
else:
start_epoch = checkpoint_epoch + 1
print("Epoch = ", start_epoch)
# Create data loaders
test_loader = loader.construct_test_loader()
# Create meters
test_meter = TestMeter(len(test_loader))
if cfg.ONLINE_FLOPS:
model_dummy = model_builder.build_model()
IMAGE_SIZE = 224
n_flops, n_params = mu.measure_model(model_dummy, IMAGE_SIZE, IMAGE_SIZE)
logger.info('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
del (model_dummy)
# Perform the training loop
logger.info('Start epoch: {}'.format(start_epoch + 1))
if start_epoch == cfg.OPTIM.MAX_EPOCH:
cur_epoch = start_epoch - 1
eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
noise_mode = ['gaussian', 'speckle', 's&p']
noise_var = [0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6] # change the variance values as desired.
model.eval()
accuracies_gaussian = []
accuracies_saltpepper = []
accuracies_speckle = []
for mode in noise_mode:
for level in noise_var:
print("Adding noise={} at level={} to images".format(mode, level))
ctr = 0
correct = 0
total = 0
for cur_iter, (inputs, labels) in enumerate(test_loader):
if not 's&p' in mode:
noisy_img = torch.tensor(random_noise(inputs, mode=mode, mean=0, var=level, clip=True))
else:
noisy_img = torch.tensor(random_noise(inputs, mode=mode, salt_vs_pepper=0.5, clip=True))
noisy_img, labels = noisy_img.cuda(), labels.cuda(non_blocking=True)
outputs = model(noisy_img.float())
_, predicted = torch.max(outputs.data, 1)
ctr += 1
total += labels.size(0)
correct += (predicted == labels).sum()
if total > X: # replace X with the number of images to be generated for adversarial attacks.
break
acc = 100 * float(correct) / total
print("acc =", round(acc, 2), "correct =", float(correct), "total =", total)
if 'gaussian' in mode:
print('Robust Accuracy = {:.3f} with level = {:.2f}'.format(acc, level))
accuracies_gaussian.append(round(acc, 2))
print("Guassian Accuracies after append :", accuracies_gaussian)
elif 'speckle' in mode:
print('Robust Accuracy = {:.3f} with level = {:.2f}'.format(acc, level))
accuracies_speckle.append(round(acc, 2))
print("Speckle Accuracies after append :", accuracies_speckle)
elif 's&p' in mode:
print('Robust Accuracy = {:.3f} with level = {:.2f}'.format(acc, level))
accuracies_saltpepper.append(round(acc, 2))
print("Salt&Pepper Accuracies after append :", accuracies_saltpepper)
break
else:
print("noise mode not supported")
# gaussian_noise(test_loader)
# salt_pepper_noise(test_loader)
# speckle_noise(test_loader)
# Change the number of variable as desired number of outputs.
gaus_001, gaus_01, gaus_05, gaus_1, gaus_2, gaus_3, gaus_4, gaus_5, gaus_6 = (items for items in accuracies_gaussian)
speck_001, speck_01, speck_05, speck_1, speck_2, speck_3, speck_4, speck_5, speck_6 = (items for items in accuracies_speckle)
saltpepper = accuracies_saltpepper[0]
# load the top1 error and top5 error from the evaluation results
f = open("{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH), "r")
c_ids = []
for i in f.readlines():
sub_id = list(map(float, i.split(",")))
c_ids.append(sub_id[3:5])
topK_errors = [sum(i) / len(c_ids) for i in zip(*c_ids)]
top1_error, top5_error = topK_errors[0], topK_errors[1]
result_gaussian = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(gaus_001), str(gaus_01), str(gaus_05), str(gaus_1), str(gaus_2), str(gaus_3), str(gaus_4), str(gaus_5), str(gaus_6)])
result_speck = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(speck_001), str(speck_01), str(speck_05), str(speck_1), str(speck_2), str(speck_3), str(speck_4), str(speck_5), str(speck_6)])
result_sp = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(saltpepper)])
with open("{}/gaus_noise_stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies Gaussian:{} ".format(accuracies_gaussian))
text_file.write(result_gaussian + '\n')
with open("{}/saltpepper_noise_stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies Salt & Pepper:{} ".format(accuracies_saltpepper))
text_file.write(result_sp + '\n')
with open("{}/speckle_noise_stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies Speckle:{} ".format(accuracies_speckle))
text_file.write(result_speck + '\n')
def single_proc_train():
"""Performs single process training."""
# Setup logging
lu.setup_logging()
# Show the config
logger.info('Config:\n{}'.format(cfg))
# Setup tensorboard if provided
writer_train = None
writer_eval = None
# If use tensorboard
if cfg.TENSORBOARD and du.is_master_proc() and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
comment = ''
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
logdir_train = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_train')
logdir_eval = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_eval')
if not os.path.exists(logdir_train):
os.makedirs(logdir_train)
if not os.path.exists(logdir_eval):
os.makedirs(logdir_eval)
writer_train = SummaryWriter(logdir_train)
writer_eval = SummaryWriter(logdir_eval)
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RGRAPH.SEED_TRAIN)
torch.manual_seed(cfg.RGRAPH.SEED_TRAIN)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Launch inference + adversarial run
train_model(writer_eval, is_master=du.is_master_proc())
if writer_eval is not None:
# writer_train.close()
writer_eval.close()
def check_seed_exists(i):
fname = "{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH)
if os.path.isfile(fname):
with open(fname, 'r') as f:
lines = f.readlines()
if len(lines) > i:
return True
return False
def main():
# Parse cmd line args
args = parse_args()
# Load config options
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
assert_cfg()
# cfg.freeze()
# Ensure that the output dir exists
os.makedirs(cfg.OUT_DIR, exist_ok=True)
# Save the config
dump_cfg()
for i, cfg.RGRAPH.SEED_TRAIN in enumerate(range(cfg.RGRAPH.SEED_TRAIN_START, cfg.RGRAPH.SEED_TRAIN_END)):
# check if a seed has been run
if not check_seed_exists(i):
print("Launching inference for seed {}".format(i))
single_proc_train()
else:
print('Inference seed {} already exists, stopping inference'.format(cfg.RGRAPH.SEED_TRAIN))
if __name__ == '__main__':
main()
| 23,864 | 42.708791 | 139 | py |
RobDanns | RobDanns-main/deep_learning/tools/train_net.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Train a classification model."""
import argparse
import pickle
import numpy as np
import os
import sys
import torch
import math
# import torchvision
# import time
from pycls.config import assert_cfg
from pycls.config import cfg
from pycls.config import dump_cfg
from pycls.datasets import loader
from pycls.models import model_builder
from pycls.utils.meters import TestMeter
from pycls.utils.meters import TrainMeter
import pycls.models.losses as losses
import pycls.models.optimizer as optim
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
import pycls.utils.net as nu
from datetime import datetime
from tensorboardX import SummaryWriter
# import wandb
logger = lu.get_logger(__name__)
print("Let's use GPU :", torch.cuda.current_device())
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser(
description='Train a classification model'
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file',
required=True,
type=str
)
parser.add_argument(
'opts',
help='See pycls/core/config.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def is_eval_epoch(cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (
(cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or
(cur_epoch + 1) == cfg.OPTIM.MAX_EPOCH
)
def log_model_info(model, writer_eval=None):
"""Logs model info"""
logger.info('Model:\n{}'.format(model))
params = mu.params_count(model)
flops = mu.flops_count(model)
logger.info('Params: {:,}'.format(params))
logger.info('Flops: {:,}'.format(flops))
logger.info('Number of node: {:,}'.format(cfg.RGRAPH.GROUP_NUM))
# logger.info('{}, {}'.format(params,flops))
if writer_eval is not None:
writer_eval.add_scalar('Params', params, 1)
writer_eval.add_scalar('Flops', flops, 1)
return params, flops
def train_epoch(
train_loader, model, loss_fun, optimizer, train_meter, cur_epoch, writer_train=None, params=0, flops=0,
is_master=False):
"""Performs one epoch of training."""
# Shuffle the data
loader.shuffle(train_loader, cur_epoch)
# Update the learning rate
lr = optim.get_epoch_lr(cur_epoch)
optim.set_lr(optimizer, lr)
# Enable training mode
model.train()
train_meter.iter_tic()
for cur_iter, (inputs, labels) in enumerate(train_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Perform the forward pass
preds = model(inputs)
# Compute the loss
loss = loss_fun(preds, labels)
# Perform the backward pass
optimizer.zero_grad()
loss.backward()
# Update the parameters
optimizer.step()
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the stats across the GPUs
if cfg.NUM_GPUS > 1:
loss, top1_err, top5_err = du.scaled_all_reduce(
[loss, top1_err, top5_err]
)
# Copy the stats from GPU to CPU (sync point)
loss, top1_err, top5_err = loss.item(), top1_err.item(), top5_err.item()
train_meter.iter_toc()
# Update and log stats
train_meter.update_stats(
top1_err, top5_err, loss, lr, inputs.size(0) * cfg.NUM_GPUS
)
train_meter.log_iter_stats(cur_epoch, cur_iter)
train_meter.iter_tic()
# Log epoch stats
train_meter.log_epoch_stats(cur_epoch, writer_train, params, flops, is_master=is_master)
trg_stats = train_meter.get_epoch_stats(cur_epoch)
train_meter.reset()
return trg_stats
@torch.no_grad()
def eval_epoch(test_loader, model, test_meter, cur_epoch, writer_eval=None, params=0, flops=0, is_master=False):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.scaled_all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point)
top1_err, top5_err = top1_err.item(), top5_err.item()
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err, top5_err, inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
# test_meter.log_epoch_stats(cur_epoch,writer_eval,params,flops)
test_meter.log_epoch_stats(cur_epoch, writer_eval, params, flops, model, is_master=is_master)
stats = test_meter.get_epoch_stats(cur_epoch)
test_meter.reset()
if cfg.RGRAPH.SAVE_GRAPH:
adj_dict = nu.model2adj(model)
adj_dict = {**adj_dict, 'top1_err': stats['top1_err']}
os.makedirs('{}/graphs/{}'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN), exist_ok=True)
np.savez('{}/graphs/{}/{}.npz'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN, cur_epoch), **adj_dict)
return stats
def train_model(writer_train=None, writer_eval=None, is_master=False):
"""Trains the model."""
# Fit flops/params
if cfg.TRAIN.AUTO_MATCH and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
mode = 'flops' # flops or params
if cfg.TRAIN.DATASET == 'cifar10':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet':
stats_baseline = 40813184 # ResNet20
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'cifar100':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet':
if cfg.MODEL.DEPTH == 20:
stats_baseline = 40813184 # ResNet20
elif cfg.MODEL.DEPTH == 26:
stats_baseline = 56140000 # ResNet26
elif cfg.MODEL.DEPTH == 34:
stats_baseline = 71480000 # ResNet34
elif cfg.MODEL.DEPTH == 38:
stats_baseline = 86819000 # ResNet38
elif cfg.MODEL.DEPTH == 50:
stats_baseline = 130000000 # ResNet50
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'imagenet':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 18: # ResNet18
stats_baseline = 1820000000
elif 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 34: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
cfg.defrost()
stats = model_builder.build_model_stats(mode)
if stats != stats_baseline:
# 1st round: set first stage dim
for i in range(pre_repeat):
scale = round(math.sqrt(stats_baseline / stats), 2)
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first = int(round(first * scale))
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
step = 1
while True:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first += flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if stats == stats_baseline:
break
if flag != flag_init:
if cfg.RGRAPH.UPPER == False: # make sure the stats is SMALLER than baseline
if flag < 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
else:
if flag > 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
# 2nd round: set other stage dim
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [int(round(dim / first)) for dim in cfg.RGRAPH.DIM_LIST]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
if 'share' not in cfg.RESNET.TRANS_FUN:
for i in range(1, len(cfg.RGRAPH.DIM_LIST)):
for j in range(ratio_list[i]):
cfg.RGRAPH.DIM_LIST[i] += flag_init
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if flag_init != flag:
cfg.RGRAPH.DIM_LIST[i] -= flag_init
break
stats = model_builder.build_model_stats(mode)
print('FINAL', cfg.RGRAPH.GROUP_NUM, cfg.RGRAPH.DIM_LIST, stats, stats_baseline, stats < stats_baseline)
# Build the model (before the loaders to ease debugging)
model = model_builder.build_model()
params, flops = log_model_info(model, writer_eval)
# Define the loss function
loss_fun = losses.get_loss_fun()
# Construct the optimizer
optimizer = optim.construct_optimizer(model)
# wandb.watch(model)
# Load a checkpoint if applicable
start_epoch = 0
if cfg.TRAIN.AUTO_RESUME and cu.has_checkpoint():
last_checkpoint = cu.get_checkpoint_last1()
checkpoint_epoch = cu.load_checkpoint(last_checkpoint, model, optimizer)
logger.info('Loaded checkpoint from: {}'.format(last_checkpoint))
if checkpoint_epoch == cfg.OPTIM.MAX_EPOCH:
exit()
start_epoch = checkpoint_epoch
else:
start_epoch = checkpoint_epoch + 1
# Create data loaders
train_loader = loader.construct_train_loader()
test_loader = loader.construct_test_loader()
# Create meters
train_meter = TrainMeter(len(train_loader))
test_meter = TestMeter(len(test_loader))
if cfg.ONLINE_FLOPS:
model_dummy = model_builder.build_model()
IMAGE_SIZE = 224
n_flops, n_params = mu.measure_model(model_dummy, IMAGE_SIZE, IMAGE_SIZE)
logger.info('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
del (model_dummy)
# Perform the training loop
logger.info('Start epoch: {}'.format(start_epoch + 1))
# do eval at initialization
initial_eval_stats = eval_epoch(test_loader, model, test_meter, -1,
writer_eval, params, flops, is_master=is_master)
if start_epoch == cfg.OPTIM.MAX_EPOCH:
cur_epoch = start_epoch - 1
last_epoch_eval_stats = eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
else:
for cur_epoch in range(start_epoch, cfg.OPTIM.MAX_EPOCH):
print('Epoch {} Started'.format(cur_epoch))
# Train for one epoch
trg_stats = train_epoch(
train_loader, model, loss_fun, optimizer, train_meter, cur_epoch,
writer_train, is_master=is_master
)
# Compute precise BN stats
if cfg.BN.USE_PRECISE_STATS:
nu.compute_precise_bn_stats(model, train_loader)
# Save a checkpoint
if cu.is_checkpoint_epoch(cur_epoch):
checkpoint_file = cu.save_checkpoint(model, optimizer, cur_epoch)
logger.info('Wrote checkpoint to: {}'.format(checkpoint_file))
# Evaluate the model
if is_eval_epoch(cur_epoch):
eval_stats = eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
# wandb.log({'Epoch': cur_epoch, 'Train top1_err': trg_stats['top1_err'], 'Test top1_err': eval_stats['top1_err']})
def single_proc_train():
"""Performs single process training."""
# Setup logging
lu.setup_logging()
# Show the config
logger.info('Config:\n{}'.format(cfg))
# Setup tensorboard if provided
writer_train = None
writer_eval = None
## If use tensorboard
if cfg.TENSORBOARD and du.is_master_proc() and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
comment = ''
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
logdir_train = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_train')
logdir_eval = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_eval')
if not os.path.exists(logdir_train):
os.makedirs(logdir_train)
if not os.path.exists(logdir_eval):
os.makedirs(logdir_eval)
writer_train = SummaryWriter(logdir_train)
writer_eval = SummaryWriter(logdir_eval)
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RGRAPH.SEED_TRAIN)
torch.manual_seed(cfg.RGRAPH.SEED_TRAIN)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Train the model
train_model(writer_train, writer_eval, is_master=du.is_master_proc())
if writer_train is not None and writer_eval is not None:
writer_train.close()
writer_eval.close()
def check_seed_exists(i):
fname = "{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH)
if os.path.isfile(fname):
with open(fname, 'r') as f:
lines = f.readlines()
if len(lines) > i:
return True
return False
def main():
# wandb.init(project = 'Rob_G2NN', entity='rowanai-graph-robustness')
# Parse cmd line args
args = parse_args()
# wandb.config.update(args)
# Load config options
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
assert_cfg()
# cfg.freeze()
# Ensure that the output dir exists
os.makedirs(cfg.OUT_DIR, exist_ok=True)
# Save the config
dump_cfg()
for i, cfg.RGRAPH.SEED_TRAIN in enumerate(range(cfg.RGRAPH.SEED_TRAIN_START, cfg.RGRAPH.SEED_TRAIN_END)):
# check if a seed has been run
if not check_seed_exists(i):
if cfg.NUM_GPUS > 1:
mpu.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=single_proc_train)
else:
single_proc_train()
else:
print('Seed {} exists, skip!'.format(cfg.RGRAPH.SEED_TRAIN))
if __name__ == '__main__':
main()
| 18,692 | 39.113734 | 127 | py |
RobDanns | RobDanns-main/deep_learning/pycls/config.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Configuration file."""
import os
from yacs.config import CfgNode as CN
# Global config object
_C = CN()
# Example usage:
# from core.config import cfg
cfg = _C
# ---------------------------------------------------------------------------- #
# Model options
# ---------------------------------------------------------------------------- #
_C.MODEL = CN()
# Model type to use
_C.MODEL.TYPE = ''
# Number of weight layers
_C.MODEL.DEPTH = 0
# Number of classes
_C.MODEL.NUM_CLASSES = 10
# Loss function (see pycls/models/loss.py for options)
_C.MODEL.LOSS_FUN = 'cross_entropy'
# Num layers, excluding the stem and head layers. Total layers used should +2
_C.MODEL.LAYERS = 3
# ---------------------------------------------------------------------------- #
# ResNet options
# ---------------------------------------------------------------------------- #
_C.RESNET = CN()
# Transformation function (see pycls/models/resnet.py for options)
_C.RESNET.TRANS_FUN = 'basic_transform'
# Number of groups to use (1 -> ResNet; > 1 -> ResNeXt)
_C.RESNET.NUM_GROUPS = 1
# Width of each group (64 -> ResNet; 4 -> ResNeXt)
_C.RESNET.WIDTH_PER_GROUP = 64
# Apply stride to 1x1 conv (True -> MSRA; False -> fb.torch)
_C.RESNET.STRIDE_1X1 = False
# Whether append 1x1 resblock
_C.RESNET.APPEND1x1 = 0
# For group conv only
_C.RESNET.GROUP_SIZE = 2
# ---------------------------------------------------------------------------- #
# EfficientNet options
# ---------------------------------------------------------------------------- #
_C.EFFICIENT_NET = CN()
# Stem width
_C.EFFICIENT_NET.STEM_W = 32
# Depth for each stage (number of blocks in the stage)
_C.EFFICIENT_NET.DEPTHS = []
# Width for each stage (width of each block in the stage)
_C.EFFICIENT_NET.WIDTHS = []
# Expansion ratios for MBConv blocks in each stage
_C.EFFICIENT_NET.EXP_RATIOS = []
# Squeeze-and-Excitation (SE) operation
_C.EFFICIENT_NET.SE_ENABLED = True
# Squeeze-and-Excitation (SE) ratio
_C.EFFICIENT_NET.SE_RATIO = 0.25
# Linear projection
_C.EFFICIENT_NET.LIN_PROJ = True
# Strides for each stage (applies to the first block of each stage)
_C.EFFICIENT_NET.STRIDES = []
# Kernel sizes for each stage
_C.EFFICIENT_NET.KERNELS = []
# Head type ('conv_head' or 'simple_head')
_C.EFFICIENT_NET.HEAD_TYPE = 'conv_head'
# Head width (applies to 'conv_head')
_C.EFFICIENT_NET.HEAD_W = 1280
# Ativation function
_C.EFFICIENT_NET.ACT_FUN = 'swish'
# Drop connect ratio
_C.EFFICIENT_NET.DC_RATIO = 0.0
# Drop connect implementation
_C.EFFICIENT_NET.DC_IMP = 'tf'
# Dropout ratio
_C.EFFICIENT_NET.DROPOUT_RATIO = 0.0
# ---------------------------------------------------------------------------- #
# Relational graph options
# ---------------------------------------------------------------------------- #
_C.RGRAPH = CN()
# dim for first layer. NOTE: this is fixed when matching FLOPs
_C.RGRAPH.DIM_FIRST = 16
# dim for each stage
_C.RGRAPH.DIM_LIST = []
# wide stem module
_C.RGRAPH.STEM_MODE = 'default'
# How to message exchange: dense, hier (deprecated)
_C.RGRAPH.TALK_MODE = 'dense'
# Num of nodes
_C.RGRAPH.GROUP_NUM = 32
# Size of nodes in Stage 1
_C.RGRAPH.GROUP_SIZE = 1
# The type of message passing used
_C.RGRAPH.MESSAGE_TYPE = 'ws'
# Whether use directed graph
_C.RGRAPH.DIRECTED = False
# Graph sparsity
_C.RGRAPH.SPARSITY = 0.5
# Graph Randomness
_C.RGRAPH.P = 0.0
# Graph seed
_C.RGRAPH.SEED_GRAPH = 1
# training seed used
_C.RGRAPH.SEED_TRAIN = 1
# training seed, start, end
_C.RGRAPH.SEED_TRAIN_START = 1
_C.RGRAPH.SEED_TRAIN_END = 2
# Keep graph across the network
_C.RGRAPH.KEEP_GRAPH = True
# Append additaion 1x1 layers for additional talks
_C.RGRAPH.ADD_1x1 = 0
# Match upper computational bound
_C.RGRAPH.UPPER = True
# Auto match computational budget
_C.RGRAPH.AUTO_MATCH = True
# AGG func. Only sum is supported in current mask-based implementation
_C.RGRAPH.AGG_FUNC = 'sum'
# Save weight matrices as graphs. Warning: the saved matrices can be huge
_C.RGRAPH.SAVE_GRAPH = False
# ---------------------------------------------------------------------------- #
# Batch norm options
# ---------------------------------------------------------------------------- #
_C.BN = CN()
# BN epsilon
_C.BN.EPS = 1e-5
# BN momentum (BN momentum in PyTorch = 1 - BN momentum in Caffe2)
_C.BN.MOM = 0.1
# Precise BN stats
_C.BN.USE_PRECISE_STATS = True
_C.BN.NUM_SAMPLES_PRECISE = 1024
# Initialize the gamma of the final BN of each block to zero
_C.BN.ZERO_INIT_FINAL_GAMMA = False
# ---------------------------------------------------------------------------- #
# Optimizer options
# ---------------------------------------------------------------------------- #
_C.OPTIM = CN()
# Base learning rate
_C.OPTIM.BASE_LR = 0.1
# Learning rate policy select from {'cos', 'exp', 'steps'}
_C.OPTIM.LR_POLICY = 'cos'
# Exponential decay factor
_C.OPTIM.GAMMA = 0.1
# Step size for 'exp' and 'cos' policies (in epochs)
_C.OPTIM.STEP_SIZE = 1
# Steps for 'steps' policy (in epochs)
_C.OPTIM.STEPS = []
# Learning rate multiplier for 'steps' policy
_C.OPTIM.LR_MULT = 0.1
# Maximal number of epochs
_C.OPTIM.MAX_EPOCH = 200
# Momentum
_C.OPTIM.MOMENTUM = 0.9
# Momentum dampening
_C.OPTIM.DAMPENING = 0.0
# Nesterov momentum
_C.OPTIM.NESTEROV = True
# L2 regularization
_C.OPTIM.WEIGHT_DECAY = 5e-4
# Start the warm up from OPTIM.BASE_LR * OPTIM.WARMUP_FACTOR
_C.OPTIM.WARMUP_FACTOR = 0.1
# Gradually warm up the OPTIM.BASE_LR over this number of epochs
_C.OPTIM.WARMUP_EPOCHS = 0
# ---------------------------------------------------------------------------- #
# Training options
# ---------------------------------------------------------------------------- #
_C.TRAIN = CN()
# Dataset and split
_C.TRAIN.DATASET = ''
_C.TRAIN.SPLIT = 'train'
# Total mini-batch size
_C.TRAIN.BATCH_SIZE = 128
# Evaluate model on test data every eval period epochs
_C.TRAIN.EVAL_PERIOD = 1
# Save model checkpoint every checkpoint period epochs
_C.TRAIN.CHECKPOINT_PERIOD = 50
# Resume training from the latest checkpoint in the output directory
_C.TRAIN.AUTO_RESUME = True
# Checkpoint to start training from (if no automatic checkpoint saved)
_C.TRAIN.START_CHECKPOINT = ''
_C.TRAIN.AUTO_MATCH = False
# ---------------------------------------------------------------------------- #
# Testing options
# ---------------------------------------------------------------------------- #
_C.TEST = CN()
# Dataset and split
_C.TEST.DATASET = ''
_C.TEST.SPLIT = 'val'
# Total mini-batch size
_C.TEST.BATCH_SIZE = 200
# ---------------------------------------------------------------------------- #
# Common train/test data loader options
# ---------------------------------------------------------------------------- #
_C.DATA_LOADER = CN()
# Number of data loader workers per training process
_C.DATA_LOADER.NUM_WORKERS = 4
# Load data to pinned host memory
_C.DATA_LOADER.PIN_MEMORY = True
# ---------------------------------------------------------------------------- #
# Memory options
# ---------------------------------------------------------------------------- #
_C.MEM = CN()
# Perform ReLU inplace
_C.MEM.RELU_INPLACE = True
# ---------------------------------------------------------------------------- #
# CUDNN options
# ---------------------------------------------------------------------------- #
_C.CUDNN = CN()
# Perform benchmarking to select the fastest CUDNN algorithms to use
# Note that this may increase the memory usage and will likely not result
# in overall speedups when variable size inputs are used (e.g. COCO training)
_C.CUDNN.BENCHMARK = False
# ---------------------------------------------------------------------------- #
# Misc options
# ---------------------------------------------------------------------------- #
# Number of GPUs to use (applies to both training and testing)
_C.NUM_GPUS = 1
# Output directory
_C.OUT_DIR = '/tmp'
# Checkpoint directory for inference
_C.CHECKPT_DIR = '/tmp'
_C.IS_INFERENCE = False
_C.IS_DDP = False
# Config destination (in OUT_DIR)
_C.CFG_DEST = 'config.yaml'
# Note that non-determinism may still be present due to non-deterministic
# operator implementations in GPU operator libraries
_C.RNG_SEED = 1
# Log destination ('stdout' or 'file')
_C.LOG_DEST = 'file'
# Log period in iters
_C.LOG_PERIOD = 10
# Distributed backend
_C.DIST_BACKEND = 'nccl'
# Hostname and port for initializing multi-process groups
_C.HOST = 'localhost'
_C.PORT = 12002
# Computing flops by online foward pass
_C.ONLINE_FLOPS = False
# Whether use Tensorboard
_C.TENSORBOARD = False
def assert_cfg():
"""Checks config values invariants."""
assert not _C.OPTIM.STEPS or _C.OPTIM.STEPS[0] == 0, \
'The first lr step must start at 0'
assert _C.TRAIN.SPLIT in ['train', 'val', 'test'], \
'Train split \'{}\' not supported'.format(_C.TRAIN.SPLIT)
assert _C.TRAIN.BATCH_SIZE % _C.NUM_GPUS == 0, \
'Train mini-batch size should be a multiple of NUM_GPUS.'
assert _C.TEST.SPLIT in ['train', 'val', 'test'], \
'Test split \'{}\' not supported'.format(_C.TEST.SPLIT)
assert _C.TEST.BATCH_SIZE % _C.NUM_GPUS == 0, \
'Test mini-batch size should be a multiple of NUM_GPUS.'
# assert not _C.BN.USE_PRECISE_STATS or _C.NUM_GPUS == 1, \
# 'Precise BN stats computation not verified for > 1 GPU'
assert _C.LOG_DEST in ['stdout', 'file'], \
'Log destination \'{}\' not supported'.format(_C.LOG_DEST)
def dump_cfg():
"""Dumps the config to the output directory."""
cfg_file = os.path.join(_C.OUT_DIR, _C.CFG_DEST)
with open(cfg_file, 'w') as f:
_C.dump(stream=f)
def load_cfg(out_dir, cfg_dest='config.yaml'):
"""Loads config from specified output directory."""
cfg_file = os.path.join(out_dir, cfg_dest)
_C.merge_from_file(cfg_file)
| 10,201 | 24.378109 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/losses.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Loss functions."""
import torch.nn as nn
from pycls.config import cfg
# Supported losses
_LOSS_FUNS = {
'cross_entropy': nn.CrossEntropyLoss,
}
def get_loss_fun():
"""Retrieves the loss function."""
assert cfg.MODEL.LOSS_FUN in _LOSS_FUNS.keys(), \
'Loss function \'{}\' not supported'.format(cfg.TRAIN.LOSS)
return _LOSS_FUNS[cfg.MODEL.LOSS_FUN]().cuda()
| 730 | 26.074074 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/efficientnet.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""EfficientNet models."""
import math
import torch
import torch.nn as nn
from pycls.config import cfg
import pycls.utils.net as nu
import pycls.utils.logging as logging
from .relation_graph import *
logger = logging.get_logger(__name__)
def get_conv(name):
"""Retrieves the transformation function by name."""
trans_funs = {
'mbconv_transform': MBConv,
'mbtalkconv_transform': MBTalkConv,
}
assert name in trans_funs.keys(), \
'Transformation function \'{}\' not supported'.format(name)
return trans_funs[name]
def drop_connect_tf(x, drop_ratio):
"""Drop connect (tensorflow port)."""
keep_ratio = 1.0 - drop_ratio
rt = torch.rand([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)
rt.add_(keep_ratio)
bt = torch.floor(rt)
x.div_(keep_ratio)
x.mul_(bt)
return x
def drop_connect_pt(x, drop_ratio):
"""Drop connect (pytorch version)."""
keep_ratio = 1.0 - drop_ratio
mask = torch.empty([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)
mask.bernoulli_(keep_ratio)
x.div_(keep_ratio)
x.mul_(mask)
return x
def get_act_fun(act_type):
"""Retrieves the activations function."""
act_funs = {
'swish': Swish,
'relu': nn.ReLU,
}
assert act_type in act_funs.keys(), \
'Activation function \'{}\' not supported'.format(act_type)
return act_funs[act_type]
class SimpleHead(nn.Module):
"""Simple head."""
def __init__(self, dim_in, num_classes):
super(SimpleHead, self).__init__()
# AvgPool
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
# Dropout
if cfg.EFFICIENT_NET.DROPOUT_RATIO > 0.0:
self.dropout = nn.Dropout(p=cfg.EFFICIENT_NET.DROPOUT_RATIO)
# FC
self.fc = nn.Linear(dim_in, num_classes, bias=True)
def forward(self, x):
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.dropout(x) if hasattr(self, 'dropout') else x
x = self.fc(x)
return x
class ConvHead(nn.Module):
"""EfficientNet conv head."""
def __init__(self, in_w, out_w, num_classes, act_fun):
super(ConvHead, self).__init__()
self._construct_class(in_w, out_w, num_classes, act_fun)
def _construct_class(self, in_w, out_w, num_classes, act_fun):
# 1x1, BN, Swish
self.conv = nn.Conv2d(
in_w, out_w,
kernel_size=1, stride=1, padding=0, bias=False
)
self.conv_bn = nn.BatchNorm2d(
out_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.conv_swish = act_fun()
# AvgPool
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
# Dropout
if cfg.EFFICIENT_NET.DROPOUT_RATIO > 0.0:
self.dropout = nn.Dropout(p=cfg.EFFICIENT_NET.DROPOUT_RATIO)
# FC
self.fc = nn.Linear(out_w, num_classes, bias=True)
def forward(self, x):
# 1x1, BN, Swish
x = self.conv_swish(self.conv_bn(self.conv(x)))
# AvgPool
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
# Dropout
x = self.dropout(x) if hasattr(self, 'dropout') else x
# FC
x = self.fc(x)
return x
class LinearHead(nn.Module):
"""EfficientNet linear head."""
def __init__(self, in_w, out_w, num_classes, act_fun):
super(LinearHead, self).__init__()
self._construct_class(in_w, out_w, num_classes, act_fun)
def _construct_class(self, in_w, out_w, num_classes, act_fun):
# AvgPool
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
# FC0
self.fc0 = nn.Linear(in_w, out_w, bias=False)
self.fc0_bn = nn.BatchNorm1d(
out_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.fc0_swish = act_fun()
# FC
self.fc = nn.Linear(out_w, num_classes, bias=True)
def forward(self, x):
# AvgPool
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
# Linear, BN, Swish
x = self.fc0_swish(self.fc0_bn(self.fc0(x)))
# FC
x = self.fc(x)
return x
class MBConv(nn.Module):
"""Mobile inverted bottleneck block with SE (MBConv)."""
def __init__(self, in_w, exp_r, kernel, stride, se_r, out_w, act_fun, seed=None, exp_w=None):
super(MBConv, self).__init__()
self._construct_class(in_w, exp_r, kernel, stride, se_r, out_w, act_fun)
def _construct_class(self, in_w, exp_r, kernel, stride, se_r, out_w, act_fun):
# Expansion: 1x1, BN, Swish
self.expand = None
exp_w = int(in_w * exp_r)
# Include exp ops only if the exp ratio is different from 1
if exp_w != in_w:
self.expand = nn.Conv2d(
in_w, exp_w,
kernel_size=1, stride=1, padding=0, bias=False
)
self.expand_bn = nn.BatchNorm2d(
exp_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.expand_swish = act_fun()
# Depthwise: 3x3 dwise, BN, Swish
self.dwise = nn.Conv2d(
exp_w, exp_w,
kernel_size=kernel, stride=stride, groups=exp_w, bias=False,
# Hacky padding to preserve res (supports only 3x3 and 5x5)
padding=(1 if kernel == 3 else 2)
)
self.dwise_bn = nn.BatchNorm2d(
exp_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.dwise_swish = act_fun()
# SE: x * F_ex(x)
if cfg.EFFICIENT_NET.SE_ENABLED:
se_w = int(in_w * se_r)
self.se = SE(exp_w, se_w, act_fun)
# Linear projection: 1x1, BN
self.lin_proj = nn.Conv2d(
exp_w, out_w,
kernel_size=1, stride=1, padding=0, bias=False
)
self.lin_proj_bn = nn.BatchNorm2d(
out_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
# Nonlinear projection
if not cfg.EFFICIENT_NET.LIN_PROJ:
self.lin_proj_swish = act_fun()
# Skip connections on blocks w/ same in and out shapes (MN-V2, Fig. 4)
self.has_skip = (stride == 1) and (in_w == out_w)
def forward(self, x):
f_x = x
# Expansion
if self.expand:
f_x = self.expand_swish(self.expand_bn(self.expand(f_x)))
# Depthwise
f_x = self.dwise_swish(self.dwise_bn(self.dwise(f_x)))
# SE
if cfg.EFFICIENT_NET.SE_ENABLED:
f_x = self.se(f_x)
# Linear projection
f_x = self.lin_proj_bn(self.lin_proj(f_x))
# Nonlinear projection
if not cfg.EFFICIENT_NET.LIN_PROJ:
f_x = self.lin_proj_swish(f_x)
# Skip connection
if self.has_skip:
# Drop connect
if self.training and cfg.EFFICIENT_NET.DC_RATIO > 0.0:
if cfg.EFFICIENT_NET.DC_IMP == 'tf':
f_x = drop_connect_tf(f_x, cfg.EFFICIENT_NET.DC_RATIO)
else:
f_x = drop_connect_pt(f_x, cfg.EFFICIENT_NET.DC_RATIO)
f_x = x + f_x
return f_x
class MBTalkConv(nn.Module):
"""Mobile inverted bottleneck block with SE (MBConv)."""
def __init__(self, in_w, exp_r, kernel, stride, se_r, out_w, act_fun, seed=None, exp_w=None):
super(MBTalkConv, self).__init__()
self.seed=seed
self._construct_class(in_w, exp_r, kernel, stride, se_r, out_w, act_fun, exp_w)
def _construct_class(self, in_w, exp_r, kernel, stride, se_r, out_w, act_fun, exp_w):
# Expansion: 1x1, BN, Swish
self.expand = None
if int(exp_r)==1:
exp_w = in_w
else:
self.expand = TalkConv2d(
in_w, exp_w, cfg.RGRAPH.GROUP_NUM, kernel_size=1,
stride=1, padding=0, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.expand_bn = nn.BatchNorm2d(
exp_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.expand_swish = act_fun()
# Depthwise: 3x3 dwise, BN, Swish
self.dwise = nn.Conv2d(
exp_w, exp_w,
kernel_size=kernel, stride=stride, groups=exp_w, bias=False,
# Hacky padding to preserve res (supports only 3x3 and 5x5)
padding=(1 if kernel == 3 else 2)
)
self.dwise_bn = nn.BatchNorm2d(
exp_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.dwise_swish = act_fun()
# SE: x * F_ex(x)
if cfg.EFFICIENT_NET.SE_ENABLED:
se_w = int(in_w * se_r)
self.se = SE(exp_w, se_w, act_fun)
# Linear projection: 1x1, BN
self.lin_proj = TalkConv2d(
exp_w, out_w, cfg.RGRAPH.GROUP_NUM, kernel_size=1,
stride=1, padding=0, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.lin_proj_bn = nn.BatchNorm2d(
out_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
# Nonlinear projection
if not cfg.EFFICIENT_NET.LIN_PROJ:
self.lin_proj_swish = act_fun()
# Skip connections on blocks w/ same in and out shapes (MN-V2, Fig. 4)
self.has_skip = (stride == 1) and (in_w == out_w)
def forward(self, x):
f_x = x
# Expansion
if self.expand:
f_x = self.expand_swish(self.expand_bn(self.expand(f_x)))
# Depthwise
f_x = self.dwise_swish(self.dwise_bn(self.dwise(f_x)))
# SE
if cfg.EFFICIENT_NET.SE_ENABLED:
f_x = self.se(f_x)
# Linear projection
f_x = self.lin_proj_bn(self.lin_proj(f_x))
# Nonlinear projection
if not cfg.EFFICIENT_NET.LIN_PROJ:
f_x = self.lin_proj_swish(f_x)
# Skip connection
if self.has_skip:
# Drop connect
if self.training and cfg.EFFICIENT_NET.DC_RATIO > 0.0:
if cfg.EFFICIENT_NET.DC_IMP == 'tf':
f_x = drop_connect_tf(f_x, cfg.EFFICIENT_NET.DC_RATIO)
else:
f_x = drop_connect_pt(f_x, cfg.EFFICIENT_NET.DC_RATIO)
f_x = x + f_x
return f_x
class Stage(nn.Module):
"""EfficientNet stage."""
def __init__(self, in_w, exp_r, kernel, stride, se_r, out_w, d, act_fun, exp_w=None):
super(Stage, self).__init__()
self._construct_class(in_w, exp_r, kernel, stride, se_r, out_w, d, act_fun, exp_w)
def _construct_class(self, in_w, exp_r, kernel, stride, se_r, out_w, d, act_fun, exp_w):
if cfg.RGRAPH.KEEP_GRAPH:
seed = cfg.RGRAPH.SEED_GRAPH
else:
seed = int(cfg.RGRAPH.SEED_GRAPH*100)
# Construct a sequence of blocks
for i in range(d):
trans_fun = get_conv(cfg.RESNET.TRANS_FUN)
# Stride and input width apply to the first block of the stage
stride_b = stride if i == 0 else 1
in_w_b = in_w if i == 0 else out_w
# Construct the block
self.add_module(
'b{}'.format(i + 1),
trans_fun(in_w_b, exp_r, kernel, stride_b, se_r, out_w, act_fun, seed=seed, exp_w=exp_w)
)
if not cfg.RGRAPH.KEEP_GRAPH:
seed += 1
def forward(self, x):
for block in self.children():
x = block(x)
return x
class StemIN(nn.Module):
"""EfficientNet stem for ImageNet."""
def __init__(self, in_w, out_w, act_fun):
super(StemIN, self).__init__()
self._construct_class(in_w, out_w, act_fun)
def _construct_class(self, in_w, out_w, act_fun):
self.conv = nn.Conv2d(
in_w, out_w,
kernel_size=3, stride=2, padding=1, bias=False
)
self.bn = nn.BatchNorm2d(
out_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.swish = act_fun()
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class EfficientNet(nn.Module):
"""EfficientNet model."""
def __init__(self):
assert cfg.TRAIN.DATASET in ['imagenet'], \
'Training on {} is not supported'.format(cfg.TRAIN.DATASET)
assert cfg.TEST.DATASET in ['imagenet'], \
'Testing on {} is not supported'.format(cfg.TEST.DATASET)
assert cfg.TRAIN.DATASET == cfg.TEST.DATASET, \
'Train and test dataset must be the same for now'
assert cfg.EFFICIENT_NET.HEAD_TYPE in ['conv_head', 'simple_head', 'linear_head'], \
'Unsupported head type: {}'.format(cfg.EFFICIENT_NET.HEAD_TYPE)
super(EfficientNet, self).__init__()
self._construct_class(
stem_w=cfg.EFFICIENT_NET.STEM_W,
ds=cfg.EFFICIENT_NET.DEPTHS,
ws=cfg.EFFICIENT_NET.WIDTHS,
exp_rs=cfg.EFFICIENT_NET.EXP_RATIOS,
se_r=cfg.EFFICIENT_NET.SE_RATIO,
ss=cfg.EFFICIENT_NET.STRIDES,
ks=cfg.EFFICIENT_NET.KERNELS,
head_type=cfg.EFFICIENT_NET.HEAD_TYPE,
head_w=cfg.EFFICIENT_NET.HEAD_W,
act_type=cfg.EFFICIENT_NET.ACT_FUN,
nc=cfg.MODEL.NUM_CLASSES
)
self.apply(nu.init_weights)
def _construct_class(
self, stem_w, ds, ws, exp_rs, se_r, ss, ks,
head_type, head_w, act_type, nc
):
"""Constructs imagenet models."""
# Group params by stage
stage_params = list(zip(ds, ws, exp_rs, ss, ks))
# Activation function
act_fun = get_act_fun(act_type)
# Set dim for each stage
dim_list = cfg.RGRAPH.DIM_LIST
expdim_list = [int(cfg.EFFICIENT_NET.WIDTHS[i]*cfg.EFFICIENT_NET.EXP_RATIOS[i])
for i in range(len(cfg.EFFICIENT_NET.WIDTHS))]
# Construct the stems
self.stem = StemIN(3, stem_w, act_fun)
prev_w = stem_w
# Construct the stages
for i, (d, w, exp_r, stride, kernel) in enumerate(stage_params):
if cfg.RESNET.TRANS_FUN != 'mbconv_transform':
w = dim_list[i]
exp_w = expdim_list[i]
self.add_module(
's{}'.format(i + 1),
Stage(prev_w, exp_r, kernel, stride, se_r, w, d, act_fun, exp_w=exp_w)
)
prev_w = w
# Construct the head
if head_type == 'conv_head':
self.head = ConvHead(prev_w, head_w, nc, act_fun)
elif head_type == 'linear_head':
self.head = LinearHead(prev_w, head_w, nc, act_fun)
else:
self.head = SimpleHead(prev_w, nc)
def forward(self, x):
for module in self.children():
x = module(x)
return x | 15,385 | 33.809955 | 108 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/resnet.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""ResNet or ResNeXt model."""
import torch.nn as nn
import torch
from pycls.config import cfg
import pycls.utils.logging as lu
import pycls.utils.net as nu
from .relation_graph import *
import time
import pdb
logger = lu.get_logger(__name__)
# Stage depths for an ImageNet model {model depth -> (d2, d3, d4, d5)}
_IN_MODEL_STAGE_DS = {
18: (2, 2, 2, 2),
34: (3, 4, 6, 3),
50: (3, 4, 6, 3),
101: (3, 4, 23, 3),
152: (3, 8, 36, 3),
}
def get_trans_fun(name):
"""Retrieves the transformation function by name."""
trans_funs = {
############ Res-34
'channelbasic_transform': ChannelBasicTransform,
'groupbasictalk_transform': GroupBasicTalkTransform,
############ Res-34-sep
'channelsep_transform': ChannelSepTransform,
'groupseptalk_transform': GroupSepTalkTransform,
############ Res-50
'bottleneck_transform': BottleneckTransform,
'talkbottleneck_transform': TalkBottleneckTransform,
}
assert name in trans_funs.keys(), \
'Transformation function \'{}\' not supported'.format(name)
return trans_funs[name]
############ Res-34
class ChannelBasicTransform(nn.Module):
"""Basic transformation: 3x3, 3x3"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
super(ChannelBasicTransform, self).__init__()
self._construct_class(dim_in, dim_out, stride)
def _construct_class(self, dim_in, dim_out, stride):
# 3x3, BN, ReLU
self.a = nn.Conv2d(
dim_in, dim_out, kernel_size=3,
stride=stride, padding=1, bias=False
)
self.a_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# 3x3, BN
self.b = nn.Conv2d(
dim_out, dim_out, kernel_size=3,
stride=1, padding=1, bias=False
)
self.b_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.b_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class GroupBasicTalkTransform(nn.Module):
"""Basic transformation: 3x3, 3x3, relational graph"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
self.seed = seed
super(GroupBasicTalkTransform, self).__init__()
self._construct_class(dim_in, dim_out, stride)
def _construct_class(self, dim_in, dim_out, stride):
# 3x3, BN, ReLU
self.a = TalkConv2d(
dim_in, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=3,
stride=stride, padding=1, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.a_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# 3x3, BN
self.b = TalkConv2d(
dim_out, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=3,
stride=1, padding=1, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.b_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.b_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
############ Res-34-sep
class ChannelSepTransform(nn.Module):
"""Separable transformation: 3x3, 3x3"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
super(ChannelSepTransform, self).__init__()
self._construct_class(dim_in, dim_out, stride)
def _construct_class(self, dim_in, dim_out, stride):
# ReLU, 3x3, BN, 1x1, BN
self.a_3x3 = nn.Conv2d(
dim_in, dim_in, kernel_size=3,
stride=stride, padding=1, bias=False, groups=dim_in
)
self.a_1x1 = nn.Conv2d(
dim_in, dim_out, kernel_size=1,
stride=1, padding=0, bias=False
)
self.a_1x1_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# ReLU, 3x3, BN, 1x1, BN
self.b_3x3 = nn.Conv2d(
dim_out, dim_out, kernel_size=3,
stride=1, padding=1, bias=False, groups=dim_out
)
self.b_1x1 = nn.Conv2d(
dim_out, dim_out, kernel_size=1,
stride=1, padding=0, bias=False
)
self.b_1x1_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.b_1x1_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class GroupSepTalkTransform(nn.Module):
"""Separable transformation: 3x3, 3x3, relational graph"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
self.seed = seed
super(GroupSepTalkTransform, self).__init__()
self._construct_class(dim_in, dim_out, stride)
def _construct_class(self, dim_in, dim_out, stride):
# ReLU, 3x3, BN, 1x1, BN
self.a_3x3 = nn.Conv2d(
dim_in, dim_in, kernel_size=3,
stride=stride, padding=1, bias=False, groups=dim_in
)
self.a_1x1 = TalkConv2d(
dim_in, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=1,
stride=1, padding=0, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.a_1x1_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# ReLU, 3x3, BN, 1x1, BN
self.b_3x3 = nn.Conv2d(
dim_out, dim_out, kernel_size=3,
stride=1, padding=1, bias=False, groups=dim_out
)
self.b_1x1 = TalkConv2d(
dim_out, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=1,
stride=1, padding=0, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.b_1x1_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.b_1x1_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
############ Res-50
class BottleneckTransform(nn.Module):
"""Bottleneck transformation: 1x1, 3x3, 1x1"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
super(BottleneckTransform, self).__init__()
dim_inner = int(round(dim_out / 4))
self._construct_class(dim_in, dim_out, stride, dim_inner, num_gs, seed)
def _construct_class(self, dim_in, dim_out, stride, dim_inner, num_gs, seed):
# MSRA -> stride=2 is on 1x1; TH/C2 -> stride=2 is on 3x3
# (str1x1, str3x3) = (stride, 1) if cfg.RESNET.STRIDE_1X1 else (1, stride)
(str1x1, str3x3) = (1, stride)
# 1x1, BN, ReLU
self.a = nn.Conv2d(
dim_in, dim_inner, kernel_size=1,
stride=str1x1, padding=0, bias=False
)
self.a_bn = nn.BatchNorm2d(
dim_inner, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# 3x3, BN, ReLU
self.b = nn.Conv2d(
dim_inner, dim_inner, kernel_size=3,
stride=str3x3, padding=1, groups=num_gs, bias=False
)
self.b_bn = nn.BatchNorm2d(
dim_inner, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.b_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# 1x1, BN
self.c = nn.Conv2d(
dim_inner, dim_out, kernel_size=1,
stride=1, padding=0, bias=False
)
self.c_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.c_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class TalkBottleneckTransform(nn.Module):
"""Bottleneck transformation: 1x1, 3x3, 1x1, relational graph"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
super(TalkBottleneckTransform, self).__init__()
dim_inner = int(round(dim_out / 4))
self.seed = seed
self._construct_class(dim_in, dim_out, stride, dim_inner, num_gs, seed)
def _construct_class(self, dim_in, dim_out, stride, dim_inner, num_gs, seed):
# MSRA -> stride=2 is on 1x1; TH/C2 -> stride=2 is on 3x3
# (str1x1, str3x3) = (stride, 1) if cfg.RESNET.STRIDE_1X1 else (1, stride)
(str1x1, str3x3) = (1, stride)
# 1x1, BN, ReLU
self.a = TalkConv2d(
dim_in, dim_inner, cfg.RGRAPH.GROUP_NUM, kernel_size=1,
stride=str1x1, padding=0, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.a_bn = nn.BatchNorm2d(
dim_inner, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# 3x3, BN, ReLU
self.b = TalkConv2d(
dim_inner, dim_inner, cfg.RGRAPH.GROUP_NUM, kernel_size=3,
stride=str3x3, padding=1, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.b_bn = nn.BatchNorm2d(
dim_inner, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.b_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# 1x1, BN
self.c = TalkConv2d(
dim_inner, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=1,
stride=1, padding=0, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.c_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.c_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
##### Remaining ResNet code
class ResBlock(nn.Module):
"""Residual block: x + F(x)"""
def __init__(
self, dim_in, dim_out, stride, trans_fun, dim_inner=None, num_gs=1, seed=None):
super(ResBlock, self).__init__()
self.seed = seed
self._construct_class(dim_in, dim_out, stride, trans_fun, dim_inner, num_gs, seed)
def _add_skip_proj(self, dim_in, dim_out, stride):
if 'group' in cfg.RESNET.TRANS_FUN and 'share' not in cfg.RESNET.TRANS_FUN:
self.proj = TalkConv2d(
dim_in, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=1,
stride=stride, padding=0, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
else:
self.proj = nn.Conv2d(
dim_in, dim_out, kernel_size=1,
stride=stride, padding=0, bias=False
)
self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
def _construct_class(self, dim_in, dim_out, stride, trans_fun, dim_inner, num_gs, seed):
# Use skip connection with projection if dim or res change
self.proj_block = (dim_in != dim_out) or (stride != 1)
if self.proj_block:
self._add_skip_proj(dim_in, dim_out, stride)
self.f = trans_fun(dim_in, dim_out, stride, dim_inner, num_gs, seed)
self.act = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
def forward(self, x):
if self.proj_block:
x = self.bn(self.proj(x)) + self.f(x)
else:
x = x + self.f(x)
x = self.act(x)
return x
class ResStage(nn.Module):
"""Stage of ResNet."""
def __init__(
self, dim_in, dim_out, stride, num_bs, dim_inner=None, num_gs=1):
super(ResStage, self).__init__()
self._construct_class(dim_in, dim_out, stride, num_bs, dim_inner, num_gs)
def _construct_class(self, dim_in, dim_out, stride, num_bs, dim_inner, num_gs):
if cfg.RGRAPH.KEEP_GRAPH:
seed = cfg.RGRAPH.SEED_GRAPH
else:
seed = int(cfg.RGRAPH.SEED_GRAPH * 100)
for i in range(num_bs):
# Stride and dim_in apply to the first block of the stage
b_stride = stride if i == 0 else 1
b_dim_in = dim_in if i == 0 else dim_out
# Retrieve the transformation function
trans_fun = get_trans_fun(cfg.RESNET.TRANS_FUN)
# Construct the block
res_block = ResBlock(
b_dim_in, dim_out, b_stride, trans_fun, dim_inner, num_gs, seed=seed
)
if not cfg.RGRAPH.KEEP_GRAPH:
seed += 1
self.add_module('b{}'.format(i + 1), res_block)
for j in range(cfg.RGRAPH.ADD_1x1):
trans_fun = get_trans_fun(cfg.RESNET.TRANS_FUN + '1x1')
# Construct the block
res_block = ResBlock(
dim_out, dim_out, 1, trans_fun, dim_inner, num_gs, seed=seed
)
if not cfg.RGRAPH.KEEP_GRAPH:
seed += 1
self.add_module('b{}_{}1x1'.format(i + 1, j + 1), res_block)
def forward(self, x):
for block in self.children():
x = block(x)
return x
class ResStem(nn.Module):
"""Stem of ResNet."""
def __init__(self, dim_in, dim_out):
assert cfg.TRAIN.DATASET == cfg.TEST.DATASET, \
'Train and test dataset must be the same for now'
super(ResStem, self).__init__()
if cfg.TRAIN.DATASET == 'cifar10':
self._construct_cifar(dim_in, dim_out)
else:
self._construct_imagenet(dim_in, dim_out)
def _construct_cifar(self, dim_in, dim_out):
# 3x3, BN, ReLU
# self.conv = nn.Conv2d(
# dim_in, dim_out, kernel_size=3,
# stride=1, padding=1, bias=False
# )
self.conv = nn.Conv2d(
dim_in, dim_out, kernel_size=7,
stride=1, padding=3, bias=False
)
self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
def _construct_imagenet(self, dim_in, dim_out):
# 7x7, BN, ReLU, pool
self.conv = nn.Conv2d(
dim_in, dim_out, kernel_size=7,
stride=2, padding=3, bias=False
)
self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class ResHead(nn.Module):
"""ResNet head."""
def __init__(self, dim_in, num_classes):
super(ResHead, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(dim_in, num_classes, bias=True)
def forward(self, x):
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class ResNet(nn.Module):
"""ResNet model."""
def __init__(self):
assert cfg.TRAIN.DATASET in ['cifar10', 'cifar100', 'tinyimagenet200', 'imagenet'], \
'Training ResNet on {} is not supported'.format(cfg.TRAIN.DATASET)
assert cfg.TEST.DATASET in ['cifar10', 'cifar100', 'tinyimagenet200', 'imagenet'], \
'Testing ResNet on {} is not supported'.format(cfg.TEST.DATASET)
assert cfg.TRAIN.DATASET == cfg.TEST.DATASET, \
'Train and test dataset must be the same for now'
super(ResNet, self).__init__()
if cfg.TRAIN.DATASET == 'cifar10':
self._construct_cifar()
elif cfg.TRAIN.DATASET == 'cifar100':
self._construct_cifar()
else:
self._construct_imagenet()
self.apply(nu.init_weights)
# # ##### basic transform
def _construct_cifar(self):
assert (cfg.MODEL.DEPTH - 2) % 6 == 0, \
'Model depth should be of the format 6n + 2 for cifar'
logger.info('Constructing: ResNet-{}, cifar'.format(cfg.MODEL.DEPTH))
# Each stage has the same number of blocks for cifar
num_blocks = int((cfg.MODEL.DEPTH - 2) / 6)
# length = num of stages (excluding stem and head)
dim_list = cfg.RGRAPH.DIM_LIST
# Stage 1: (N, 3, 32, 32) -> (N, 16, 32, 32)*8
# self.s1 = ResStem(dim_in=3, dim_out=16)
self.s1 = ResStem(dim_in=3, dim_out=64)
# Stage 2: (N, 16, 32, 32) -> (N, 16, 32, 32)
# self.s2 = ResStage(dim_in=16, dim_out=dim_list[0], stride=1, num_bs=num_blocks)
self.s2 = ResStage(dim_in=64, dim_out=dim_list[0], stride=1, num_bs=num_blocks)
# Stage 3: (N, 16, 32, 32) -> (N, 32, 16, 16)
self.s3 = ResStage(dim_in=dim_list[0], dim_out=dim_list[1], stride=2, num_bs=num_blocks)
# Stage 4: (N, 32, 16, 16) -> (N, 64, 8, 8)
self.s4 = ResStage(dim_in=dim_list[1], dim_out=dim_list[2], stride=2, num_bs=num_blocks)
# Head: (N, 64, 8, 8) -> (N, num_classes)
self.head = ResHead(dim_in=dim_list[2], num_classes=cfg.MODEL.NUM_CLASSES)
# smaller imagenet
def _construct_imagenet(self):
logger.info('Constructing: ResNet-{}, Imagenet'.format(cfg.MODEL.DEPTH))
# Retrieve the number of blocks per stage (excluding base)
(d2, d3, d4, d5) = _IN_MODEL_STAGE_DS[cfg.MODEL.DEPTH]
# Compute the initial inner block dim
dim_list = cfg.RGRAPH.DIM_LIST
print(dim_list)
# Stage 1: (N, 3, 224, 224) -> (N, 64, 56, 56)
self.s1 = ResStem(dim_in=3, dim_out=64)
# Stage 2: (N, 64, 56, 56) -> (N, 256, 56, 56)
self.s2 = ResStage(
dim_in=64, dim_out=dim_list[0], stride=1, num_bs=d2
)
# Stage 3: (N, 256, 56, 56) -> (N, 512, 28, 28)
self.s3 = ResStage(
dim_in=dim_list[0], dim_out=dim_list[1], stride=2, num_bs=d3
)
# Stage 4: (N, 512, 56, 56) -> (N, 1024, 14, 14)
self.s4 = ResStage(
dim_in=dim_list[1], dim_out=dim_list[2], stride=2, num_bs=d4
)
# Stage 5: (N, 1024, 14, 14) -> (N, 2048, 7, 7)
self.s5 = ResStage(
dim_in=dim_list[2], dim_out=dim_list[3], stride=2, num_bs=d5
)
# Head: (N, 2048, 7, 7) -> (N, num_classes)
self.head = ResHead(dim_in=dim_list[3], num_classes=cfg.MODEL.NUM_CLASSES)
def forward(self, x):
for module in self.children():
x = module(x)
return x
| 20,015 | 37.198473 | 108 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/cnn.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""CNN model."""
import torch.nn as nn
import torch
from pycls.config import cfg
import pycls.utils.logging as lu
import pycls.utils.net as nu
from .relation_graph import *
logger = lu.get_logger(__name__)
def get_trans_fun(name):
"""Retrieves the transformation function by name."""
trans_funs = {
##### (1) Level 1: channel
### (1.1) Basic Conv
'convbasic_transform': ConvBasicTransform,
'symconvbasic_transform': SymConvBasicTransform,
'convtalk_transform': ConvTalkTransform, # relational graph
}
assert name in trans_funs.keys(), \
'Transformation function \'{}\' not supported'.format(name)
return trans_funs[name]
##### (1) Level 1: channel
### (1.1) Basic Conv
class ConvBasicTransform(nn.Module):
"""Basic transformation: 3x3"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
super(ConvBasicTransform, self).__init__()
self._construct_class(dim_in, dim_out, stride)
def _construct_class(self, dim_in, dim_out, stride):
# 3x3, BN, ReLU
self.a = nn.Conv2d(
dim_in, dim_out, kernel_size=3,
stride=stride, padding=1, bias=False
)
self.a_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
# self.a_bn.final_bn = True
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class SymConvBasicTransform(nn.Module):
"""Basic transformation: 3x3 conv, symmetric"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
super(SymConvBasicTransform, self).__init__()
self._construct_class(dim_in, dim_out, stride)
def _construct_class(self, dim_in, dim_out, stride):
# 3x3, BN, ReLU
self.a = SymConv2d(
dim_in, dim_out, kernel_size=3,
stride=stride, padding=1, bias=False
)
self.a_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
# self.a_bn.final_bn = True
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class ConvTalkTransform(nn.Module):
"""Basic transformation: 3x3 conv, relational graph"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
self.seed = seed
super(ConvTalkTransform, self).__init__()
self._construct_class(dim_in, dim_out, stride)
def _construct_class(self, dim_in, dim_out, stride):
# 3x3, BN, ReLU
self.a = TalkConv2d(
dim_in, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=3,
stride=stride, padding=1, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.a_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
# self.a_bn.final_bn = True
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
##### Remaining CNN code
class CNNStage(nn.Module):
"""Stage of CNN."""
def __init__(
self, dim_in, dim_out, stride, num_bs, dim_inner=None, num_gs=1):
super(CNNStage, self).__init__()
self._construct_class(dim_in, dim_out, stride, num_bs, dim_inner, num_gs)
def _construct_class(self, dim_in, dim_out, stride, num_bs, dim_inner, num_gs):
if cfg.RGRAPH.KEEP_GRAPH:
seed = cfg.RGRAPH.SEED_GRAPH
else:
seed = int(cfg.RGRAPH.SEED_GRAPH * 100)
for i in range(num_bs):
# Stride and dim_in apply to the first block of the stage
b_stride = stride if i == 0 else 1
b_dim_in = dim_in if i == 0 else dim_out
# Retrieve the transformation function
trans_fun = get_trans_fun(cfg.RESNET.TRANS_FUN)
# Construct the block
res_block = trans_fun(
b_dim_in, dim_out, b_stride, dim_inner, num_gs, seed=seed
)
if not cfg.RGRAPH.KEEP_GRAPH:
seed += 1
self.add_module('b{}'.format(i + 1), res_block)
def forward(self, x):
for block in self.children():
x = block(x)
return x
class CNNStem(nn.Module):
"""Stem of CNN."""
def __init__(self, dim_in, dim_out):
assert cfg.TRAIN.DATASET == cfg.TEST.DATASET, \
'Train and test dataset must be the same for now'
super(CNNStem, self).__init__()
if cfg.TRAIN.DATASET == 'cifar10':
self._construct_cifar(dim_in, dim_out)
elif cfg.TRAIN.DATASET == 'cifar100':
self._construct_cifar(dim_in, dim_out)
else:
self._construct_imagenet(dim_in, dim_out)
def _construct_cifar(self, dim_in, dim_out):
# 3x3, BN, ReLU
if cfg.RGRAPH.STEM_MODE == 'default':
self.conv = nn.Conv2d(
dim_in, dim_out, kernel_size=3,
stride=1, padding=1, bias=False
)
self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS,
momentum=cfg.BN.MOM)
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
elif cfg.RGRAPH.STEM_MODE == 'downsample':
self.conv = nn.Conv2d(
dim_in, dim_out, kernel_size=3,
stride=1, padding=1, bias=False
)
self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS,
momentum=cfg.BN.MOM)
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _construct_imagenet(self, dim_in, dim_out):
# 3x3, BN, ReLU, pool
self.conv = nn.Conv2d(
dim_in, dim_out, kernel_size=3,
stride=2, padding=1, bias=False
)
self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class CNNHead(nn.Module):
"""CNN head."""
def __init__(self, dim_in, num_classes):
super(CNNHead, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(p=0.15)
self.fc = nn.Linear(dim_in, num_classes, bias=True)
def forward(self, x):
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.dropout(x)
x = self.fc(x)
return x
class CNN(nn.Module):
"""CNN model."""
def __init__(self):
assert cfg.TRAIN.DATASET in ['cifar10', 'cifar100', 'tinyimagenet200', 'imagenet'], \
'Training CNN on {} is not supported'.format(cfg.TRAIN.DATASET)
assert cfg.TEST.DATASET in ['cifar10', 'cifar100', 'tinyimagenet200', 'imagenet'], \
'Testing CNN on {} is not supported'.format(cfg.TEST.DATASET)
assert cfg.TRAIN.DATASET == cfg.TEST.DATASET, \
'Train and test dataset must be the same for now'
super(CNN, self).__init__()
self._construct()
self.apply(nu.init_weights)
# # ##### basic transform
def _construct(self):
# Each stage has the same number of blocks for cifar
dim_list = cfg.RGRAPH.DIM_LIST
num_bs = cfg.MODEL.LAYERS // 3
self.s1 = CNNStem(dim_in=3, dim_out=cfg.RGRAPH.DIM_FIRST)
self.s2 = CNNStage(dim_in=cfg.RGRAPH.DIM_FIRST, dim_out=dim_list[0], stride=2, num_bs=num_bs)
self.s3 = CNNStage(dim_in=dim_list[0], dim_out=dim_list[1], stride=2, num_bs=num_bs)
self.s4 = CNNStage(dim_in=dim_list[1], dim_out=dim_list[2], stride=2, num_bs=num_bs)
# self.s5 = CNNStage(dim_in=dim_list[2], dim_out=dim_list[3], stride=2, num_bs=num_bs)
self.head = CNNHead(dim_in=dim_list[2], num_classes=cfg.MODEL.NUM_CLASSES)
def forward(self, x):
for module in self.children():
x = module(x)
return x
# #!/usr/bin/env python3
# # Copyright (c) Facebook, Inc. and its affiliates.
# #
# # This source code is licensed under the MIT license found in the
# # LICENSE file in the root directory of this source tree.
# """CNN model."""
# import torch.nn as nn
# import torch
# from pycls.config import cfg
# import pycls.utils.logging as lu
# import pycls.utils.net as nu
# from .relation_graph import *
# logger = lu.get_logger(__name__)
# def get_trans_fun(name):
# """Retrieves the transformation function by name."""
# trans_funs = {
# ##### (1) Level 1: channel
# ### (1.1) Basic Conv
# 'convbasic_transform': ConvBasicTransform,
# 'symconvbasic_transform': SymConvBasicTransform,
# 'convtalk_transform': ConvTalkTransform, # relational graph
# }
# assert name in trans_funs.keys(), \
# 'Transformation function \'{}\' not supported'.format(name)
# return trans_funs[name]
# ##### (1) Level 1: channel
# ### (1.1) Basic Conv
# class ConvBasicTransform(nn.Module):
# """Basic transformation: 3x3"""
# def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
# super(ConvBasicTransform, self).__init__()
# self._construct_class(dim_in, dim_out, stride)
# def _construct_class(self, dim_in, dim_out, stride):
# # 3x3, BN, ReLU
# self.a = nn.Conv2d(
# dim_in, dim_out, kernel_size=3,
# stride=stride, padding=1, bias=False
# )
# self.a_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
# # self.a_bn.final_bn = True
# self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# def forward(self, x):
# for layer in self.children():
# x = layer(x)
# return x
# class SymConvBasicTransform(nn.Module):
# """Basic transformation: 3x3 conv, symmetric"""
# def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
# super(SymConvBasicTransform, self).__init__()
# self._construct_class(dim_in, dim_out, stride)
# def _construct_class(self, dim_in, dim_out, stride):
# # 3x3, BN, ReLU
# self.a = SymConv2d(
# dim_in, dim_out, kernel_size=3,
# stride=stride, padding=1, bias=False
# )
# self.a_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
# # self.a_bn.final_bn = True
# self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# def forward(self, x):
# for layer in self.children():
# x = layer(x)
# return x
# class ConvTalkTransform(nn.Module):
# """Basic transformation: 3x3 conv, relational graph"""
# def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
# self.seed = seed
# super(ConvTalkTransform, self).__init__()
# self._construct_class(dim_in, dim_out, stride)
# def _construct_class(self, dim_in, dim_out, stride):
# # 3x3, BN, ReLU
# self.a = TalkConv2d(
# dim_in, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=3,
# stride=stride, padding=1, bias=False,
# message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
# sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
# )
# self.a_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
# # self.a_bn.final_bn = True
# self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# def forward(self, x):
# for layer in self.children():
# x = layer(x)
# return x
# ##### Remaining CNN code
# class CNNStage(nn.Module):
# """Stage of CNN."""
# def __init__(
# self, dim_in, dim_out, stride, num_bs, dim_inner=None, num_gs=1):
# super(CNNStage, self).__init__()
# self._construct_class(dim_in, dim_out, stride, num_bs, dim_inner, num_gs)
# def _construct_class(self, dim_in, dim_out, stride, num_bs, dim_inner, num_gs):
# if cfg.RGRAPH.KEEP_GRAPH:
# seed = cfg.RGRAPH.SEED_GRAPH
# else:
# seed = int(cfg.RGRAPH.SEED_GRAPH * 100)
# for i in range(num_bs):
# # Stride and dim_in apply to the first block of the stage
# b_stride = stride if i == 0 else 1
# b_dim_in = dim_in if i == 0 else dim_out
# # Retrieve the transformation function
# trans_fun = get_trans_fun(cfg.RESNET.TRANS_FUN)
# # Construct the block
# res_block = trans_fun(
# b_dim_in, dim_out, b_stride, dim_inner, num_gs, seed=seed
# )
# if not cfg.RGRAPH.KEEP_GRAPH:
# seed += 1
# self.add_module('b{}'.format(i + 1), res_block)
# def forward(self, x):
# for block in self.children():
# x = block(x)
# return x
# class CNNStem(nn.Module):
# """Stem of CNN."""
# def __init__(self, dim_in, dim_out):
# assert cfg.TRAIN.DATASET == cfg.TEST.DATASET, \
# 'Train and test dataset must be the same for now'
# super(CNNStem, self).__init__()
# if cfg.TRAIN.DATASET == 'cifar10':
# self._construct_cifar(dim_in, dim_out)
# else:
# self._construct_imagenet(dim_in, dim_out)
# def _construct_cifar(self, dim_in, dim_out):
# # 3x3, BN, ReLU
# if cfg.RGRAPH.STEM_MODE == 'default':
# self.conv = nn.Conv2d(
# dim_in, dim_out, kernel_size=3,
# stride=1, padding=1, bias=False
# )
# self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS,
# momentum=cfg.BN.MOM)
# self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
# elif cfg.RGRAPH.STEM_MODE == 'downsample':
# self.conv = nn.Conv2d(
# dim_in, dim_out, kernel_size=3,
# stride=1, padding=1, bias=False
# )
# self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS,
# momentum=cfg.BN.MOM)
# self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
# self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# def _construct_imagenet(self, dim_in, dim_out):
# # 3x3, BN, ReLU, pool
# self.conv = nn.Conv2d(
# dim_in, dim_out, kernel_size=3,
# stride=2, padding=1, bias=False
# )
# self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
# self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
# self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# def forward(self, x):
# for layer in self.children():
# x = layer(x)
# return x
# class CNNHead(nn.Module):
# """CNN head."""
# def __init__(self, dim_in, num_classes):
# super(CNNHead, self).__init__()
# self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
# self.fc = nn.Linear(dim_in, num_classes, bias=True)
# def forward(self, x):
# x = self.avg_pool(x)
# x = x.view(x.size(0), -1)
# x = self.fc(x)
# return x
# class CNN(nn.Module):
# """CNN model."""
# def __init__(self):
# assert cfg.TRAIN.DATASET in ['cifar10', 'imagenet'], \
# 'Training ResNet on {} is not supported'.format(cfg.TRAIN.DATASET)
# assert cfg.TEST.DATASET in ['cifar10', 'imagenet'], \
# 'Testing ResNet on {} is not supported'.format(cfg.TEST.DATASET)
# assert cfg.TRAIN.DATASET == cfg.TEST.DATASET, \
# 'Train and test dataset must be the same for now'
# super(CNN, self).__init__()
# self._construct()
# self.apply(nu.init_weights)
# # # ##### basic transform
# def _construct(self):
# # Each stage has the same number of blocks for cifar
# dim_list = cfg.RGRAPH.DIM_LIST
# num_bs = cfg.MODEL.LAYERS // 3
# self.s1 = CNNStem(dim_in=3, dim_out=cfg.RGRAPH.DIM_FIRST)
# self.s2 = CNNStage(dim_in=cfg.RGRAPH.DIM_FIRST, dim_out=dim_list[0], stride=2, num_bs=num_bs)
# self.s3 = CNNStage(dim_in=dim_list[0], dim_out=dim_list[1], stride=2, num_bs=num_bs)
# self.s4 = CNNStage(dim_in=dim_list[1], dim_out=dim_list[2], stride=2, num_bs=num_bs)
# self.head = CNNHead(dim_in=dim_list[2], num_classes=cfg.MODEL.NUM_CLASSES)
# def forward(self, x):
# for module in self.children():
# x = module(x)
# return x
| 17,388 | 34.779835 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/vgg.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""VGG example"""
import torch.nn as nn
import torch.nn.functional as F
from pycls.config import cfg
import pycls.utils.net as nu
from .relation_graph import *
class VGG(nn.Module):
def __init__(self, num_classes=1024):
super(VGG, self).__init__()
self.seed = cfg.RGRAPH.SEED_GRAPH
def conv_bn(dim_in, dim_out, stride, stem=False):
if stem:
conv = get_conv('convbasic_transform', dim_in, dim_out, stride)
else:
conv = get_conv(cfg.RESNET.TRANS_FUN, dim_in, dim_out, stride)
return nn.Sequential(
conv,
nn.BatchNorm2d(dim_out),
nn.ReLU(inplace=True)
)
def get_conv(name, dim_in, dim_out, stride=1):
if not cfg.RGRAPH.KEEP_GRAPH:
self.seed += 1
if name == 'convbasic_transform':
return nn.Conv2d(dim_in, dim_out,
kernel_size=3, stride=stride,
padding=1, bias=False)
elif name == 'convtalk_transform':
return TalkConv2d(
dim_in, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=3,
stride=stride, padding=1, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE,
directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P,
talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.dim_list = cfg.RGRAPH.DIM_LIST
# print(self.dim_list)
self.model = nn.Sequential(
conv_bn(3, 64, 1, stem=True),
conv_bn(64, self.dim_list[0], 1),
nn.MaxPool2d(kernel_size=2, stride=2),
conv_bn(self.dim_list[0], self.dim_list[1], 1),
conv_bn(self.dim_list[1], self.dim_list[1], 1),
nn.MaxPool2d(kernel_size=2, stride=2),
conv_bn(self.dim_list[1], self.dim_list[2], 1),
conv_bn(self.dim_list[2], self.dim_list[2], 1),
nn.MaxPool2d(kernel_size=2, stride=2),
conv_bn(self.dim_list[2], self.dim_list[3], 1),
conv_bn(self.dim_list[3], self.dim_list[3], 1),
nn.MaxPool2d(kernel_size=2, stride=2),
conv_bn(self.dim_list[3], self.dim_list[3], 1),
conv_bn(self.dim_list[3], self.dim_list[3], 1),
)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(self.dim_list[3], num_classes)
self.apply(nu.init_weights)
def forward(self, x):
x = self.model(x)
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
| 3,097 | 35.880952 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/mlp.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""MLP model."""
import torch.nn as nn
import torch
from pycls.config import cfg
import pycls.utils.logging as lu
import pycls.utils.net as nu
from .relation_graph import *
import time
import pdb
logger = lu.get_logger(__name__)
def get_trans_fun(name):
"""Retrieves the transformation function by name."""
trans_funs = {
##### (1) Level 1: channel
'linear_transform': LinearTransform,
'symlinear_transform': SymLinearTransform,
'grouplinear_transform': GroupLinearTransform,
'groupshufflelinear_transform': GroupShuffleLinearTransform,
'talklinear_transform': TalkLinearTransform, # relational graph
}
assert name in trans_funs.keys(), \
'Transformation function \'{}\' not supported'.format(name)
return trans_funs[name]
##### (0) Basic
class LinearTransform(nn.Module):
"""Basic transformation: linear"""
def __init__(self, dim_in, dim_out, seed=None):
super(LinearTransform, self).__init__()
self._construct_class(dim_in, dim_out)
def _construct_class(self, dim_in, dim_out):
# 3x3, BN, ReLU
self.a = nn.Linear(
dim_in, dim_out, bias=False
)
self.a_bn = nn.BatchNorm1d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_bn.final_bn = True
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class SymLinearTransform(nn.Module):
"""Basic transformation: linear, symmetric"""
def __init__(self, dim_in, dim_out, seed=None):
super(SymLinearTransform, self).__init__()
self._construct_class(dim_in, dim_out)
def _construct_class(self, dim_in, dim_out):
# 3x3, BN, ReLU
self.a = SymLinear(
dim_in, dim_out, bias=False
)
self.a_bn = nn.BatchNorm1d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_bn.final_bn = True
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class GroupLinearTransform(nn.Module):
"""Basic transformation: linear, group"""
def __init__(self, dim_in, dim_out, seed=None):
super(GroupLinearTransform, self).__init__()
self._construct_class(dim_in, dim_out)
def _construct_class(self, dim_in, dim_out):
# 3x3, BN, ReLU
self.a = GroupLinear(
dim_in, dim_out, bias=False, group_size=cfg.RGRAPH.GROUP_SIZE
)
self.a_bn = nn.BatchNorm1d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_bn.final_bn = True
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class GroupShuffleLinearTransform(nn.Module):
"""Basic transformation: linear, shuffle"""
def __init__(self, dim_in, dim_out, seed=None):
super(GroupShuffleLinearTransform, self).__init__()
self._construct_class(dim_in, dim_out)
def _construct_class(self, dim_in, dim_out):
# 3x3, BN, ReLU
self.a = GroupLinear(
dim_in, dim_out, bias=False, group_size=cfg.RGRAPH.GROUP_SIZE
)
self.a_bn = nn.BatchNorm1d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_bn.final_bn = True
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
self.shuffle_shape = (dim_out // cfg.RGRAPH.GROUP_NUM, cfg.RGRAPH.GROUP_NUM)
def forward(self, x):
x = self.a(x)
x = x.view(x.shape[0], self.shuffle_shape[0], self.shuffle_shape[1]).permute(0, 2, 1).contiguous()
x = x.view(x.shape[0], x.shape[1] * x.shape[2])
x = self.a_bn(x)
x = self.relu(x)
return x
class TalkLinearTransform(nn.Module):
"""Basic transformation: linear, relational graph"""
def __init__(self, dim_in, dim_out, seed=None):
self.seed = seed
super(TalkLinearTransform, self).__init__()
self._construct_class(dim_in, dim_out)
def _construct_class(self, dim_in, dim_out):
self.a = TalkLinear(
dim_in, dim_out, cfg.RGRAPH.GROUP_NUM, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, sparsity=cfg.RGRAPH.SPARSITY,
p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed)
self.a_bn = nn.BatchNorm1d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_bn.final_bn = True
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class MLPStage(nn.Module):
"""Stage of MLPNet."""
def __init__(
self, dim_in, dim_out, num_bs):
super(MLPStage, self).__init__()
self._construct_class(dim_in, dim_out, num_bs)
def _construct_class(self, dim_in, dim_out, num_bs):
if cfg.RGRAPH.KEEP_GRAPH:
seed = cfg.RGRAPH.SEED_GRAPH
else:
seed = int(dim_out * 100 * cfg.RGRAPH.SPARSITY)
for i in range(num_bs):
b_dim_in = dim_in if i == 0 else dim_out
trans_fun = get_trans_fun(cfg.RESNET.TRANS_FUN)
res_block = trans_fun(
b_dim_in, dim_out, seed=seed
)
if not cfg.RGRAPH.KEEP_GRAPH:
seed += 1
self.add_module('b{}'.format(i + 1), res_block)
def forward(self, x):
for block in self.children():
x = block(x)
return x
class MLPStem(nn.Module):
"""Stem of MLPNet."""
def __init__(self, dim_in, dim_out):
super(MLPStem, self).__init__()
if cfg.TRAIN.DATASET == 'cifar10':
self._construct_cifar(dim_in, dim_out)
else:
raise NotImplementedError
def _construct_cifar(self, dim_in, dim_out):
self.linear = nn.Linear(
dim_in, dim_out, bias=False
)
self.bn = nn.BatchNorm1d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
def forward(self, x):
x = x.view(x.size(0), -1)
for layer in self.children():
x = layer(x)
return x
class MLPHead(nn.Module):
"""MLPNet head."""
def __init__(self, dim_in, num_classes):
super(MLPHead, self).__init__()
self.fc = nn.Linear(dim_in, num_classes, bias=True)
def forward(self, x):
x = self.fc(x)
return x
class MLPNet(nn.Module):
"""MLPNet model."""
def __init__(self):
assert cfg.TRAIN.DATASET in ['cifar10'], \
'Training MLPNet on {} is not supported'.format(cfg.TRAIN.DATASET)
assert cfg.TEST.DATASET in ['cifar10'], \
'Testing MLPNet on {} is not supported'.format(cfg.TEST.DATASET)
assert cfg.TRAIN.DATASET == cfg.TEST.DATASET, \
'Train and test dataset must be the same for now'
super(MLPNet, self).__init__()
if cfg.TRAIN.DATASET == 'cifar10':
self._construct_cifar()
else:
raise NotImplementedError
self.apply(nu.init_weights)
# ##### basic transform
def _construct_cifar(self):
num_layers = cfg.MODEL.LAYERS
dim_inner = cfg.RGRAPH.DIM_LIST[0]
dim_first = cfg.RGRAPH.DIM_FIRST
self.s1 = MLPStem(dim_in=3072, dim_out=dim_first)
self.s2 = MLPStage(dim_in=dim_first, dim_out=dim_inner, num_bs=num_layers)
self.head = MLPHead(dim_in=dim_inner, num_classes=cfg.MODEL.NUM_CLASSES)
def forward(self, x):
for module in self.children():
x = module(x)
return x
| 8,012 | 30.300781 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/model_builder.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Model construction functions."""
import torch
from pycls.config import cfg
from pycls.models.resnet import ResNet
from pycls.models.mlp import MLPNet
from pycls.models.cnn import CNN
from pycls.models.mobilenet import MobileNetV1
from pycls.models.efficientnet import EfficientNet
from pycls.models.vgg import VGG
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
logger = lu.get_logger(__name__)
# Supported model types
_MODEL_TYPES = {
'resnet': ResNet,
'mlpnet': MLPNet,
'cnn': CNN,
'mobilenet': MobileNetV1,
'efficientnet': EfficientNet,
'vgg': VGG,
}
def build_model():
"""Builds the model."""
assert cfg.MODEL.TYPE in _MODEL_TYPES.keys(), \
'Model type \'{}\' not supported'.format(cfg.MODEL.TYPE)
assert cfg.NUM_GPUS <= torch.cuda.device_count(), \
'Cannot use more GPU devices than available'
# Construct the model
model = _MODEL_TYPES[cfg.MODEL.TYPE]()
# Determine the GPU used by the current process
cur_device = torch.cuda.current_device()
# Transfer the model to the current GPU device
model = model.cuda(device=cur_device)
# Use multi-process data parallel model in the multi-gpu setting
if cfg.NUM_GPUS > 1:
# Make model replica operate on the current device
model = torch.nn.parallel.DistributedDataParallel(
module=model,
device_ids=[cur_device],
output_device=cur_device
)
return model
## auto match flop
def build_model_stats(mode='flops'):
"""Builds the model."""
assert cfg.MODEL.TYPE in _MODEL_TYPES.keys(), \
'Model type \'{}\' not supported'.format(cfg.MODEL.TYPE)
assert cfg.NUM_GPUS <= torch.cuda.device_count(), \
'Cannot use more GPU devices than available'
# Construct the model
model = _MODEL_TYPES[cfg.MODEL.TYPE]()
if mode == 'flops':
flops = mu.flops_count(model)
return flops
else:
params = mu.params_count(model)
return params
| 2,355 | 30 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/mobilenet.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""MobileNet example"""
import torch.nn as nn
import torch.nn.functional as F
from pycls.config import cfg
import pycls.utils.net as nu
from .relation_graph import *
class MobileNetV1(nn.Module):
def __init__(self, num_classes=1024):
super(MobileNetV1, self).__init__()
if cfg.RGRAPH.KEEP_GRAPH:
self.seed = cfg.RGRAPH.SEED_GRAPH
else:
self.seed = int(cfg.RGRAPH.SEED_GRAPH * 100)
def conv_bn(dim_in, dim_out, stride):
return nn.Sequential(
nn.Conv2d(dim_in, dim_out, 3, stride, 1, bias=False),
nn.BatchNorm2d(dim_out),
nn.ReLU(inplace=True)
)
def get_conv(name, dim_in, dim_out):
if not cfg.RGRAPH.KEEP_GRAPH:
self.seed += 1
if name == 'channelbasic_transform':
return nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
elif name == 'groupbasictalk_transform':
return TalkConv2d(
dim_in, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=1,
stride=1, padding=0, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE,
directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P,
talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
def conv_dw(dim_in, dim_out, stride):
conv1x1 = get_conv(cfg.RESNET.TRANS_FUN, dim_in, dim_out)
return nn.Sequential(
nn.Conv2d(dim_in, dim_in, 3, stride, 1, groups=dim_in,
bias=False),
nn.BatchNorm2d(dim_in),
nn.ReLU(inplace=True),
conv1x1,
nn.BatchNorm2d(dim_out),
nn.ReLU(inplace=True),
)
self.dim_list = cfg.RGRAPH.DIM_LIST
# print(self.dim_list)
self.model = nn.Sequential(
conv_bn(3, 32, 2),
conv_dw(32, self.dim_list[1], 1),
conv_dw(self.dim_list[1], self.dim_list[2], 2),
conv_dw(self.dim_list[2], self.dim_list[2], 1),
conv_dw(self.dim_list[2], self.dim_list[3], 2),
conv_dw(self.dim_list[3], self.dim_list[3], 1),
conv_dw(self.dim_list[3], self.dim_list[4], 2),
conv_dw(self.dim_list[4], self.dim_list[4], 1),
conv_dw(self.dim_list[4], self.dim_list[4], 1),
conv_dw(self.dim_list[4], self.dim_list[4], 1),
conv_dw(self.dim_list[4], self.dim_list[4], 1),
conv_dw(self.dim_list[4], self.dim_list[4], 1),
conv_dw(self.dim_list[4], self.dim_list[5], 2),
conv_dw(self.dim_list[5], self.dim_list[5], 1),
)
self.fc = nn.Linear(self.dim_list[5], num_classes)
self.apply(nu.init_weights)
def forward(self, x):
x = self.model(x)
x = F.avg_pool2d(x, 7)
x = x.view(-1, self.dim_list[5])
x = self.fc(x)
return x
| 3,404 | 35.223404 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/optimizer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Optimizer."""
import torch
from pycls.config import cfg
import pycls.utils.lr_policy as lr_policy
def construct_optimizer(model):
"""Constructs the optimizer.
Note that the momentum update in PyTorch differs from the one in Caffe2.
In particular,
Caffe2:
V := mu * V + lr * g
p := p - V
PyTorch:
V := mu * V + g
p := p - lr * V
where V is the velocity, mu is the momentum factor, lr is the learning rate,
g is the gradient and p are the parameters.
Since V is defined independently of the learning rate in PyTorch,
when the learning rate is changed there is no need to perform the
momentum correction by scaling V (unlike in the Caffe2 case).
"""
return torch.optim.SGD(
model.parameters(),
lr=cfg.OPTIM.BASE_LR,
momentum=cfg.OPTIM.MOMENTUM,
weight_decay=cfg.OPTIM.WEIGHT_DECAY,
dampening=cfg.OPTIM.DAMPENING,
nesterov=cfg.OPTIM.NESTEROV
)
def get_epoch_lr(cur_epoch):
"""Retrieves the lr for the given epoch (as specified by the lr policy)."""
return lr_policy.get_epoch_lr(cur_epoch)
def set_lr(optimizer, new_lr):
"""Sets the optimizer lr to the specified value."""
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
| 1,678 | 27.457627 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/relation_graph.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Relational graph modules"""
import math
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import torch.nn.init as init
import networkx as nx
import numpy as np
from torch.nn.modules.utils import _pair
from torch.nn.modules.conv import _ConvNd
from torch.autograd import Function
from itertools import repeat
from networkx.utils import py_random_state
from pycls.datasets.load_graph import load_graph
import pdb
import time
import random
def compute_count(channel, group):
divide = channel // group
remain = channel % group
out = np.zeros(group, dtype=int)
out[:remain] = divide + 1
out[remain:] = divide
return out
@py_random_state(3)
def ws_graph(n, k, p, seed=1):
"""Returns a ws-flex graph, k can be real number in [2,n]
"""
assert k >= 2 and k <= n
# compute number of edges:
edge_num = int(round(k * n / 2))
count = compute_count(edge_num, n)
# print(count)
G = nx.Graph()
for i in range(n):
source = [i] * count[i]
target = range(i + 1, i + count[i] + 1)
target = [node % n for node in target]
# print(source, target)
G.add_edges_from(zip(source, target))
# rewire edges from each node
nodes = list(G.nodes())
for i in range(n):
u = i
target = range(i + 1, i + count[i] + 1)
target = [node % n for node in target]
for v in target:
if seed.random() < p:
w = seed.choice(nodes)
# Enforce no self-loops or multiple edges
while w == u or G.has_edge(u, w):
w = seed.choice(nodes)
if G.degree(u) >= n - 1:
break # skip this rewiring
else:
G.remove_edge(u, v)
G.add_edge(u, w)
return G
@py_random_state(4)
def connected_ws_graph(n, k, p, tries=100, seed=1):
"""Returns a connected ws-flex graph.
"""
for i in range(tries):
# seed is an RNG so should change sequence each call
G = ws_graph(n, k, p, seed)
if nx.is_connected(G):
return G
raise nx.NetworkXError('Maximum number of tries exceeded')
def nx_to_edge(graph, directed=False, add_self_loops=True,
shuffle_id=False, seed=1):
'''nx graph to edge index'''
graph.remove_edges_from(graph.selfloop_edges())
# relabel graphs
keys = list(graph.nodes)
vals = list(range(graph.number_of_nodes()))
# shuffle node id assignment
if shuffle_id:
random.seed(seed)
random.shuffle(vals)
mapping = dict(zip(keys, vals))
graph = nx.relabel_nodes(graph, mapping, copy=True)
# get edges
edge_index = np.array(list(graph.edges))
if not directed:
edge_index = np.concatenate((edge_index, edge_index[:, ::-1]), axis=0)
if add_self_loops:
edge_self = np.arange(graph.number_of_nodes())[:, np.newaxis]
edge_self = np.tile(edge_self, (1, 2))
edge_index = np.concatenate((edge_index, edge_self), axis=0)
# sort edges
idx = np.argsort(edge_index[:, 0])
edge_index = edge_index[idx, :]
return edge_index
# edge index generator
def generate_index(message_type='ba', n=16, sparsity=0.5, p=0.2,
directed=False, seed=123):
degree = n * sparsity
known_names = ['mcwhole', 'mcwholeraw', 'mcvisual', 'mcvisualraw', 'cat', 'catraw']
if message_type == 'er':
graph = nx.gnm_random_graph(n=n, m=n * degree // 2, seed=seed)
elif message_type == 'random':
edge_num = int(n * n * sparsity)
edge_id = np.random.choice(n * n, edge_num, replace=False)
edge_index = np.zeros((edge_num, 2), dtype=int)
for i in range(edge_num):
edge_index[i, 0] = edge_id[i] // n
edge_index[i, 1] = edge_id[i] % n
elif message_type == 'ws':
graph = connected_ws_graph(n=n, k=degree, p=p, seed=seed)
elif message_type == 'ba':
graph = nx.barabasi_albert_graph(n=n, m=degree // 2, seed=seed)
elif message_type == 'hypercube':
graph = nx.hypercube_graph(n=int(np.log2(n)))
elif message_type == 'grid':
m = degree
n = n // degree
graph = nx.grid_2d_graph(m=m, n=n)
elif message_type == 'cycle':
graph = nx.cycle_graph(n=n)
elif message_type == 'tree':
graph = nx.random_tree(n=n, seed=seed)
elif message_type == 'regular':
graph = nx.connected_watts_strogatz_graph(n=n, k=degree, p=0, seed=seed)
elif message_type in known_names:
graph = load_graph(message_type)
edge_index = nx_to_edge(graph, directed=True, seed=seed)
else:
raise NotImplementedError
if message_type != 'random' and message_type not in known_names:
edge_index = nx_to_edge(graph, directed=directed, seed=seed)
return edge_index
def compute_size(channel, group, seed=1):
np.random.seed(seed)
divide = channel // group
remain = channel % group
out = np.zeros(group, dtype=int)
out[:remain] = divide + 1
out[remain:] = divide
out = np.random.permutation(out)
return out
def compute_densemask(in_channels, out_channels, group_num, edge_index):
repeat_in = compute_size(in_channels, group_num)
repeat_out = compute_size(out_channels, group_num)
mask = np.zeros((group_num, group_num))
mask[edge_index[:, 0], edge_index[:, 1]] = 1
mask = np.repeat(mask, repeat_out, axis=0)
mask = np.repeat(mask, repeat_in, axis=1)
return mask
def get_mask(in_channels, out_channels, group_num,
message_type='ba', directed=False, sparsity=0.5, p=0.2, talk_mode='dense', seed=123):
assert group_num <= in_channels and group_num <= out_channels
# high-level graph edge index
edge_index_high = generate_index(message_type=message_type,
n=group_num, sparsity=sparsity, p=p, directed=directed, seed=seed)
# get in/out size for each high-level node
in_sizes = compute_size(in_channels, group_num)
out_sizes = compute_size(out_channels, group_num)
# decide low-level node num
group_num_low = int(min(np.min(in_sizes), np.min(out_sizes)))
# decide how to fill each node
mask_high = compute_densemask(in_channels, out_channels, group_num, edge_index_high)
return mask_high
############## Linear model
class TalkLinear(nn.Linear):
'''Relational graph version of Linear. Neurons "talk" according to the graph structure'''
def __init__(self, in_channels, out_channels, group_num, bias=False,
message_type='ba', directed=False,
sparsity=0.5, p=0.2, talk_mode='dense', seed=None):
group_num_max = min(in_channels, out_channels)
if group_num > group_num_max:
group_num = group_num_max
# print(group_num, in_channels, out_channels, kernel_size, stride)
super(TalkLinear, self).__init__(
in_channels, out_channels, bias)
self.mask = get_mask(in_channels, out_channels, group_num,
message_type, directed, sparsity, p, talk_mode, seed)
nonzero = np.sum(self.mask)
self.mask = torch.from_numpy(self.mask).float().cuda()
self.flops_scale = nonzero / (in_channels * out_channels)
self.params_scale = self.flops_scale
self.init_scale = torch.sqrt(out_channels / torch.sum(self.mask.cpu(), dim=0, keepdim=True))
def forward(self, x):
weight = self.weight * self.mask
# pdb.set_trace()
return F.linear(x, weight, self.bias)
class SymLinear(nn.Module):
'''Linear with symmetric weight matrices'''
def __init__(self, in_features, out_features, bias=True):
super(SymLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
weight = self.weight + self.weight.permute(1, 0)
return F.linear(input, weight, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
############## Conv model
class TalkConv2d(_ConvNd):
'''Relational graph version of Conv2d. Neurons "talk" according to the graph structure'''
def __init__(self, in_channels, out_channels, group_num, kernel_size, stride=1,
padding=0, dilation=1, bias=False, message_type='ba', directed=False, agg='sum',
sparsity=0.5, p=0.2, talk_mode='dense', seed=None):
group_num_max = min(in_channels, out_channels)
if group_num > group_num_max:
group_num = group_num_max
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(TalkConv2d, self).__init__(
in_channels, out_channels,
kernel_size, stride, padding, dilation,
False, _pair(0), 1, bias, 'zeros')
self.mask = get_mask(in_channels, out_channels, group_num,
message_type, directed, sparsity, p, talk_mode, seed)
nonzero = np.sum(self.mask)
self.mask = torch.from_numpy(self.mask[:, :, np.newaxis, np.newaxis]).float().cuda()
self.init_scale = torch.sqrt(out_channels / torch.sum(self.mask.cpu(), dim=0, keepdim=True))
self.flops_scale = nonzero / (in_channels * out_channels)
self.params_scale = self.flops_scale
def forward(self, input):
weight = self.weight * self.mask
return F.conv2d(input, weight, self.bias, self.stride, self.padding, self.dilation, 1)
class SymConv2d(_ConvNd):
'''Conv2d with symmetric weight matrices'''
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=True, padding_mode='zeros'):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(SymConv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
def forward(self, input):
weight = self.weight + self.weight.permute(1, 0, 2, 3)
if self.padding_mode == 'circular':
expanded_padding = ((self.padding[1] + 1) // 2, self.padding[1] // 2,
(self.padding[0] + 1) // 2, self.padding[0] // 2)
return F.conv2d(F.pad(input, expanded_padding, mode='circular'),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
########### Other OPs
class Swish(nn.Module):
"""Swish activation function: x * sigmoid(x)"""
def __init__(self):
super(Swish, self).__init__()
def forward(self, x):
return x * torch.sigmoid(x)
class SE(nn.Module):
"""Squeeze-and-Excitation (SE) block w/ Swish activation fun."""
def __init__(self, in_w, se_w, act_fun):
super(SE, self).__init__()
self._construct_class(in_w, se_w, act_fun)
def _construct_class(self, in_w, se_w, act_fun):
# AvgPool
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
# FC, Swish, FC, Sigmoid
self.f_ex = nn.Sequential(
nn.Conv2d(in_w, se_w, kernel_size=1, bias=True),
act_fun(),
nn.Conv2d(se_w, in_w, kernel_size=1, bias=True),
nn.Sigmoid()
)
def forward(self, x):
return x * self.f_ex(self.avg_pool(x))
class SparseLinear(nn.Linear):
'''Sparse Linear layer'''
def __init__(self, group_num, in_scale, out_scale, bias=False,
edge_index=None, flops_scale=0.5, params_scale=0.5):
# mask is used for reset to zero
mask_one = np.ones((out_scale, in_scale), dtype=bool)
mask_zero = np.zeros((out_scale, in_scale), dtype=bool)
mask_list = [[mask_one for i in range(group_num)] for j in range(group_num)]
for i in range(edge_index.shape[0]):
mask_list[edge_index[i, 0]][edge_index[i, 1]] = mask_zero
self.mask = np.block(mask_list)
self.edge_index = edge_index
# todo: update to pytorch 1.2.0, then use bool() dtype
self.mask = torch.from_numpy(self.mask).byte().cuda()
self.flops_scale = flops_scale
self.params_scale = params_scale
super(SparseLinear, self).__init__(
group_num * in_scale, group_num * out_scale, bias)
def forward(self, x):
weight = self.weight.clone().masked_fill_(self.mask, 0)
# pdb.set_trace()
return F.linear(x, weight, self.bias)
class GroupLinear(nn.Module):
'''Group conv style linear layer'''
def __init__(self, in_channels, out_channels, bias=False, group_size=1):
super(GroupLinear, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.group_size = group_size
self.group_num = in_channels // group_size
self.in_scale = in_channels // self.group_num
self.out_scale = out_channels // self.group_num
assert in_channels % self.group_num == 0
assert out_channels % self.group_num == 0
assert in_channels % self.group_size == 0
# Note: agg_fun is always sum
self.edge_index = np.arange(self.group_num)[:, np.newaxis].repeat(2, axis=1)
self.edge_num = self.edge_index.shape[0]
flops_scale = self.edge_num / (self.group_num * self.group_num)
params_scale = self.edge_num / (self.group_num * self.group_num)
self.linear = SparseLinear(self.group_num, self.in_scale, self.out_scale, bias,
edge_index=self.edge_index, flops_scale=flops_scale, params_scale=params_scale)
def forward(self, x):
x = self.linear(x)
return x
| 15,045 | 35.877451 | 114 | py |
RobDanns | RobDanns-main/deep_learning/pycls/datasets/cifar100.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""CIFAR100 dataset."""
import numpy as np
import os
import pickle
import torch
import torch.utils.data
import pycls.datasets.transforms as transforms
from torchvision import datasets
import pycls.utils.logging as lu
logger = lu.get_logger(__name__)
# Per-channel mean and SD values in BGR order
_MEAN = [129.3, 124.1, 112.4]
_SD = [68.2, 65.4, 70.4]
class Cifar100(torch.utils.data.Dataset):
"""CIFAR-100 dataset."""
def __init__(self, data_path, split, batch_size):
assert os.path.exists(data_path), \
'Data path \'{}\' not found'.format(data_path)
assert split in ['train', 'test'], \
'Split \'{}\' not supported for cifar'.format(split)
logger.info('Constructing CIFAR-100 {}...'.format(split))
self._data_path = data_path
self._split = split
self._batch_size = batch_size
# Data format:
# self._inputs - (split_size, 3, 32, 32) ndarray
# self._labels - split_size list
self._inputs, self._labels = self._load_data()
def _load_batch(self, batch_path):
with open(batch_path, 'rb') as f:
d = pickle.load(f, encoding='bytes')
return d[b'data'], d[b'fine_labels']
# return d[b'data'], d[b'labels']
def _load_data(self):
"""Loads data in memory."""
logger.info('{} data path: {}'.format(self._split, self._data_path))
# Compute data batch names
if self._split == 'train':
batch_names = ['train']
# datasets.CIFAR100(self._data_path, train=True)
# batch_names = ['data_batch_{}'.format(i) for i in range(1, 6)]
else:
batch_names = ['test']
# Load data batches
inputs, labels = [], []
for batch_name in batch_names:
batch_path = os.path.join(self._data_path, batch_name)
inputs_batch, labels_batch = self._load_batch(batch_path)
inputs.append(inputs_batch)
labels += labels_batch
# Combine and reshape the inputs
inputs = np.vstack(inputs).astype(np.float32)
inputs = inputs.reshape((-1, 3, 32, 32))
return inputs, labels
def _transform_image(self, image):
"""Transforms the image for network input."""
if self._batch_size != 1:
image = transforms.color_normalization(image, _MEAN, _SD)
if self._split == 'train':
image = transforms.horizontal_flip(image=image, prob=0.5)
image = transforms.random_crop(image=image, size=32, pad_size=4)
return image
def __getitem__(self, index):
image, label = self._inputs[index, ...], self._labels[index]
image = self._transform_image(image)
return image, label
def __len__(self):
return self._inputs.shape[0]
| 3,163 | 34.155556 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/datasets/cifar10.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""CIFAR10 dataset."""
import numpy as np
import os
import pickle
import torch
import torch.utils.data
import pycls.datasets.transforms as transforms
import pycls.utils.logging as lu
from pycls.config import cfg
logger = lu.get_logger(__name__)
# Per-channel mean and SD values in BGR order
_MEAN = [125.3, 123.0, 113.9]
_SD = [63.0, 62.1, 66.7]
class Cifar10(torch.utils.data.Dataset):
"""CIFAR-10 dataset."""
def __init__(self, data_path, split, batch_size):
assert os.path.exists(data_path), \
'Data path \'{}\' not found'.format(data_path)
assert split in ['train', 'test'], \
'Split \'{}\' not supported for cifar'.format(split)
logger.info('Constructing CIFAR-10 {}...'.format(split))
self._data_path = data_path
self._split = split
self._batch_size = batch_size
# Data format:
# self._inputs - (split_size, 3, 32, 32) ndarray
# self._labels - split_size list
self._inputs, self._labels = self._load_data()
def _load_batch(self, batch_path):
with open(batch_path, 'rb') as f:
d = pickle.load(f, encoding='bytes')
return d[b'data'], d[b'labels']
def _load_data(self):
"""Loads data in memory."""
logger.info('{} data path: {}'.format(self._split, self._data_path))
# Compute data batch names
if self._split == 'train':
batch_names = ['data_batch_{}'.format(i) for i in range(1, 6)]
else:
batch_names = ['test_batch']
# Load data batches
inputs, labels = [], []
for batch_name in batch_names:
batch_path = os.path.join(self._data_path, batch_name)
inputs_batch, labels_batch = self._load_batch(batch_path)
inputs.append(inputs_batch)
labels += labels_batch
# Combine and reshape the inputs
inputs = np.vstack(inputs).astype(np.float32)
inputs = inputs.reshape((-1, 3, 32, 32))
return inputs, labels
def _transform_image(self, image):
"""Transforms the image for network input."""
if self._batch_size != 1:
# Normalizing input images
image = transforms.color_normalization(image, _MEAN, _SD)
if self._split == 'train':
image = transforms.horizontal_flip(image=image, prob=0.5)
image = transforms.random_crop(image=image, size=32, pad_size=4)
return image
def __getitem__(self, index):
image, label = self._inputs[index, ...], self._labels[index]
image = self._transform_image(image)
return image, label
def __len__(self):
return self._inputs.shape[0]
| 3,048 | 33.647727 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/datasets/loader.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Data loader."""
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler
import torch
from pycls.config import cfg
from pycls.datasets.cifar10 import Cifar10
from pycls.datasets.cifar100 import Cifar100
from pycls.datasets.tinyimagenet200 import TinyImageNet200
from pycls.datasets.imagenet import ImageNet
import pycls.datasets.paths as dp
# Supported datasets
_DATASET_CATALOG = {
'cifar10': Cifar10,
'cifar100': Cifar100,
'tinyimagenet200': TinyImageNet200,
'imagenet': ImageNet
}
def _construct_loader(dataset_name, split, batch_size, shuffle, drop_last):
"""Constructs the data loader for the given dataset."""
assert dataset_name in _DATASET_CATALOG.keys(), \
'Dataset \'{}\' not supported'.format(dataset_name)
assert dp.has_data_path(dataset_name), \
'Dataset \'{}\' has no data path'.format(dataset_name)
# Retrieve the data path for the dataset
data_path = dp.get_data_path(dataset_name)
# Construct the dataset
dataset = _DATASET_CATALOG[dataset_name](data_path, split, batch_size)
# Create a sampler for multi-process training
sampler = DistributedSampler(dataset) if cfg.NUM_GPUS > 1 else None
# Create a loader
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=(False if sampler else shuffle),
sampler=sampler,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=drop_last
)
return loader
def construct_train_loader():
"""Train loader wrapper."""
return _construct_loader(
dataset_name=cfg.TRAIN.DATASET,
split=cfg.TRAIN.SPLIT,
batch_size=int(cfg.TRAIN.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=True,
drop_last=True
)
def construct_test_loader():
"""Test loader wrapper."""
return _construct_loader(
dataset_name=cfg.TEST.DATASET,
split=cfg.TEST.SPLIT,
batch_size=int(cfg.TEST.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=False,
drop_last=False
)
def construct_test_loader_adv():
"""Test loader wrapper."""
return _construct_loader(
dataset_name=cfg.TEST.DATASET,
split=cfg.TEST.SPLIT,
batch_size=1,
shuffle=False,
drop_last=False
)
def shuffle(loader, cur_epoch):
""""Shuffles the data."""
assert isinstance(loader.sampler, (RandomSampler, DistributedSampler)), \
'Sampler type \'{}\' not supported'.format(type(loader.sampler))
# RandomSampler handles shuffling automatically
if isinstance(loader.sampler, DistributedSampler):
# DistributedSampler shuffles data based on epoch
loader.sampler.set_epoch(cur_epoch)
| 3,131 | 30.009901 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/datasets/imagenet.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""ImageNet dataset."""
import cv2
import numpy as np
import os
import torch
import torch.utils.data
import pycls.datasets.transforms as transforms
import pycls.utils.logging as lu
logger = lu.get_logger(__name__)
# Per-channel mean and SD values in BGR order
_MEAN = [0.406, 0.456, 0.485]
_SD = [0.225, 0.224, 0.229]
# Eig vals and vecs of the cov mat
_EIG_VALS = [0.2175, 0.0188, 0.0045]
_EIG_VECS = np.array([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]
])
class ImageNet(torch.utils.data.Dataset):
"""ImageNet dataset."""
def __init__(self, data_path, split, batch_size):
assert os.path.exists(data_path), \
'Data path \'{}\' not found'.format(data_path)
assert split in ['train', 'val'], \
'Split \'{}\' not supported for ImageNet'.format(split)
logger.info('Constructing ImageNet {}...'.format(split))
self._data_path = data_path
self._split = split
self._batch_size = batch_size
self._construct_imdb()
def _construct_imdb(self):
"""Constructs the imdb."""
# Compile the split data path
split_path = os.path.join(self._data_path, self._split)
logger.info('{} data path: {}'.format(self._split, split_path))
# Map ImageNet class ids to contiguous ids
self._class_ids = os.listdir(split_path)
self._class_id_cont_id = {v: i for i, v in enumerate(self._class_ids)}
# Construct the image db
self._imdb = []
for class_id in self._class_ids:
cont_id = self._class_id_cont_id[class_id]
im_dir = os.path.join(split_path, class_id)
for im_name in os.listdir(im_dir):
self._imdb.append({
'im_path': os.path.join(im_dir, im_name),
'class': cont_id,
})
logger.info('Number of images: {}'.format(len(self._imdb)))
logger.info('Number of classes: {}'.format(len(self._class_ids)))
def _prepare_im(self, im):
"""Prepares the image for network input."""
# Train and test setups differ
if self._split == 'train':
# Scale and aspect ratio
im = transforms.random_sized_crop(
image=im, size=224, area_frac=0.08
)
# Horizontal flip
im = transforms.horizontal_flip(image=im, prob=0.5, order='HWC')
else:
# Scale and center crop
im = transforms.scale(256, im)
im = transforms.center_crop(224, im)
# HWC -> CHW
im = transforms.HWC2CHW(im)
# [0, 255] -> [0, 1]
im = im / 255.0
# PCA jitter
if self._split == 'train':
im = transforms.lighting(im, 0.1, _EIG_VALS, _EIG_VECS)
# Color normalization
if self._batch_size != 1:
im = transforms.color_normalization(im, _MEAN, _SD)
return im
def __getitem__(self, index):
# Load the image
im = cv2.imread(self._imdb[index]['im_path'])
im = im.astype(np.float32, copy=False)
# Prepare the image for training / testing
im = self._prepare_im(im)
# Retrieve the label
label = self._imdb[index]['class']
return im, label
def __len__(self):
return len(self._imdb)
# class ImageNet(torch.utils.data.Dataset):
# """ImageNet dataset."""
# def __init__(self, data_path, split):
# assert os.path.exists(data_path), \
# 'Data path \'{}\' not found'.format(data_path)
# assert split in ['train', 'val'], \
# 'Split \'{}\' not supported for ImageNet'.format(split)
# logger.info('Constructing ImageNet {}...'.format(split))
# self._data_path = data_path
# self._split = split
# self._construct_imdb()
# def _construct_imdb(self):
# """Constructs the imdb."""
# # Compile the split data path
# split_path = os.path.join(self._data_path, self._split)
# logger.info('{} data path: {}'.format(self._split, split_path))
# # Map ImageNet class ids to contiguous ids
# self._class_ids = os.listdir(split_path)
# self._class_id_cont_id = {v: i for i, v in enumerate(self._class_ids)}
# # Construct the image db
# self._imdb = []
# counter = 1
# for class_id in self._class_ids:
# print('progress: {}/{}'.format(counter,len(self._class_ids)))
# counter += 1
# cont_id = self._class_id_cont_id[class_id]
# im_dir = os.path.join(split_path, class_id)
# for im_name in os.listdir(im_dir):
# self._imdb.append({
# 'im_path': os.path.join(im_dir, im_name),
# 'class': cont_id,
# 'img': cv2.imread(os.path.join(im_dir, im_name)).astype(np.float32, copy=False)
# })
# logger.info('Number of images: {}'.format(len(self._imdb)))
# logger.info('Number of classes: {}'.format(len(self._class_ids)))
# def _prepare_im(self, im):
# """Prepares the image for network input."""
# # Train and test setups differ
# if self._split == 'train':
# # Scale and aspect ratio
# im = transforms.random_sized_crop(
# image=im, size=224, area_frac=0.08
# )
# # Horizontal flip
# im = transforms.horizontal_flip(image=im, prob=0.5, order='HWC')
# else:
# # Scale and center crop
# im = transforms.scale(256, im)
# im = transforms.center_crop(224, im)
# # HWC -> CHW
# im = transforms.HWC2CHW(im)
# # [0, 255] -> [0, 1]
# im = im / 255.0
# # PCA jitter
# if self._split == 'train':
# im = transforms.lighting(im, 0.1, _EIG_VALS, _EIG_VECS)
# # Color normalization
# im = transforms.color_normalization(im, _MEAN, _SD)
# return im
# def __getitem__(self, index):
# # Load the image
# im = self._imdb[index]['img']
# # Prepare the image for training / testing
# im = self._prepare_im(im)
# # Retrieve the label
# label = self._imdb[index]['class']
# return im, label
# def __len__(self):
# return len(self._imdb)
| 6,759 | 35.344086 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/checkpoint.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Functions that handle saving and loading of checkpoints."""
import os
import torch
from collections import OrderedDict
from pycls.config import cfg
import pycls.utils.distributed as du
# Common prefix for checkpoint file names
_NAME_PREFIX = 'model_epoch_'
# Checkpoints directory name
_DIR_NAME = 'checkpoints'
def get_checkpoint_dir():
"""Get location for storing checkpoints."""
return os.path.join(cfg.OUT_DIR, _DIR_NAME)
def got_checkpoint_dir():
"""Get location for storing checkpoints for inference time."""
return os.path.join(cfg.CHECKPT_DIR, _DIR_NAME)
def get_checkpoint(epoch):
"""Get the full path to a checkpoint file."""
name = '{}{:04d}.pyth'.format(_NAME_PREFIX, epoch)
return os.path.join(get_checkpoint_dir(), name)
def got_checkpoint(epoch):
"""Get the full path to a checkpoint file for inference time."""
name = '{}{:04d}.pyth'.format(_NAME_PREFIX, epoch)
return os.path.join(got_checkpoint_dir(), name)
def get_checkpoint_last():
d = get_checkpoint_dir()
names = os.listdir(d) if os.path.exists(d) else []
names = [f for f in names if _NAME_PREFIX in f]
assert len(names), 'No checkpoints found in \'{}\'.'.format(d)
name = sorted(names)[-1]
return os.path.join(d, name)
def got_checkpoint_last():
d = got_checkpoint_dir()
names = os.listdir(d) if os.path.exists(d) else []
names = [f for f in names if _NAME_PREFIX in f]
assert len(names), 'No checkpoints found in \'{}\'.'.format(d)
name = sorted(names)[-1]
return os.path.join(d, name)
def has_checkpoint():
"""Determines if the given directory contains a checkpoint."""
d = get_checkpoint_dir()
print("checkpoint directory =", d)
files = os.listdir(d) if os.path.exists(d) else []
return any(_NAME_PREFIX in f for f in files)
def had_checkpoint():
"""Determines if the given directory contains a checkpoint for inference time."""
d = got_checkpoint_dir()
print("checkpoint directory =", d)
files = os.listdir(d) if os.path.exists(d) else []
return any(_NAME_PREFIX in f for f in files)
def is_checkpoint_epoch(cur_epoch):
"""Determines if a checkpoint should be saved on current epoch."""
return (cur_epoch + 1) % cfg.TRAIN.CHECKPOINT_PERIOD == 0
def save_checkpoint(model, optimizer, epoch):
"""Saves a checkpoint."""
# Save checkpoints only from the master process
if not du.is_master_proc():
return
os.makedirs(get_checkpoint_dir(), exist_ok=True)
checkpoint = {
'epoch': epoch,
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
'cfg': cfg.dump()
}
checkpoint_file = get_checkpoint(epoch + 1)
torch.save(checkpoint, checkpoint_file)
return checkpoint_file
def load_checkpoint(checkpoint_file, model, optimizer=None):
"""Loads the checkpoint from the given file."""
assert os.path.exists(checkpoint_file), \
'Checkpoint \'{}\' not found'.format(checkpoint_file)
# if cfg.IS_INFERENCE and cfg.IS_DDP:
# state_dict = torch.load(checkpoint_file, map_location='cpu')
# new_state_dict = OrderedDict()
# print("state_dict.items() :", state_dict)
# for k, v in state_dict.items():
# name = k[7:] # remove `module.`
# new_state_dict[name] = v
# # load params
# epoch = state_dict['epoch']
# model.load_state_dict(new_state_dict['model_state'])
# if optimizer:
# optimizer.load_state_dict(new_state_dict['optimizer_state'])
if cfg.IS_INFERENCE:
print("Mapping model to CPU")
checkpoint = torch.load(checkpoint_file, map_location='cpu')
# print(checkpoint)
else:
checkpoint = torch.load(checkpoint_file)
epoch = checkpoint['epoch']
print("Epochs from checkpoint = ", epoch)
model.load_state_dict(checkpoint['model_state'], strict=False)
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state'])
return epoch
| 4,392 | 31.540741 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/net.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Functions for manipulating networks."""
import itertools
import math
import torch
import torch.nn as nn
from pycls.config import cfg
from ..models.relation_graph import *
def init_weights(m):
"""Performs ResNet style weight initialization."""
if isinstance(m, nn.Conv2d) or isinstance(m, SymConv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=math.sqrt(2.0 / fan_out))
elif isinstance(m, TalkConv2d):
# Note that there is no bias due to BN
### uniform init
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels * m.params_scale
### node specific init
# fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=math.sqrt(2.0 / fan_out))
# m.weight.data = m.weight.data*m.init_scale
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
zero_init_gamma = (
hasattr(m, 'final_bn') and m.final_bn and
cfg.BN.ZERO_INIT_FINAL_GAMMA
)
m.weight.data.fill_(0.0 if zero_init_gamma else 1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear) or isinstance(m, TalkLinear) or isinstance(m, SymLinear):
m.weight.data.normal_(mean=0.0, std=0.01)
if m.bias is not None:
m.bias.data.zero_()
@torch.no_grad()
def compute_precise_bn_stats(model, loader):
"""Computes precise BN stats on training data."""
# Compute the number of minibatches to use
num_iter = min(cfg.BN.NUM_SAMPLES_PRECISE // loader.batch_size, len(loader))
# Retrieve the BN layers
bns = [m for m in model.modules() if isinstance(m, torch.nn.BatchNorm2d)]
# Initialize stats storage
mus = [torch.zeros_like(bn.running_mean) for bn in bns]
sqs = [torch.zeros_like(bn.running_var) for bn in bns]
# Remember momentum values
moms = [bn.momentum for bn in bns]
# Disable momentum
for bn in bns:
bn.momentum = 1.0
# Accumulate the stats across the data samples
for inputs, _labels in itertools.islice(loader, num_iter):
model(inputs.cuda())
# Accumulate the stats for each BN layer
for i, bn in enumerate(bns):
m, v = bn.running_mean, bn.running_var
sqs[i] += (v + m * m) / num_iter
mus[i] += m / num_iter
# Set the stats and restore momentum values
for i, bn in enumerate(bns):
bn.running_var = sqs[i] - mus[i] * mus[i]
bn.running_mean = mus[i]
bn.momentum = moms[i]
def get_flat_weights(model):
"""Gets all model weights as a single flat vector."""
return torch.cat([p.data.view(-1, 1) for p in model.parameters()], 0)
def set_flat_weights(model, flat_weights):
"""Sets all model weights from a single flat vector."""
k = 0
for p in model.parameters():
n = p.data.numel()
p.data.copy_(flat_weights[k:(k + n)].view_as(p.data))
k += n
assert k == flat_weights.numel()
def model2adj(model):
adj_dict = {}
i = 0
for n, m in model.named_modules():
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
adj_dict['weight_{}'.format(i)] = m.weight.data.squeeze().cpu().numpy()
i += 1
elif isinstance(m, SymLinear):
weight = m.weight.data + m.weight.data.permute(1, 0)
adj_dict['weight_{}'.format(i)] = weight.squeeze().cpu().numpy()
i += 1
elif isinstance(m, SymConv2d):
weight = m.weight.data + m.weight.data.permute(1, 0, 2, 3)
adj_dict['weight_{}'.format(i)] = weight.squeeze().cpu().numpy()
i += 1
elif isinstance(m, TalkLinear) or isinstance(m, TalkConv2d):
adj_dict['weight_{}'.format(i)] = m.weight.data.squeeze().cpu().numpy()
adj_dict['mask_{}'.format(i)] = m.mask.data.squeeze().cpu().numpy()
i += 1
return adj_dict
| 4,360 | 37.59292 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/distributed.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Distributed helpers."""
import torch
from pycls.config import cfg
def is_master_proc():
"""Determines if the current process is the master process.
Master process is responsible for logging, writing and loading checkpoints.
In the multi GPU setting, we assign the master role to the rank 0 process.
When training using a single GPU, there is only one training processes
which is considered the master processes.
"""
return cfg.NUM_GPUS == 1 or torch.distributed.get_rank() == 0
def init_process_group(proc_rank, world_size):
"""Initializes the default process group."""
# Set the GPU to use
torch.cuda.set_device(proc_rank)
# Initialize the process group
# print('--rank{},world{}--'.format(proc_rank, world_size))
# torch.distributed.init_process_group(
# backend=cfg.DIST_BACKEND,
# init_method="tcp://{}:{}".format(cfg.HOST, cfg.PORT),
# world_size=world_size,
# rank=proc_rank
# )
torch.distributed.init_process_group(
backend=cfg.DIST_BACKEND,
init_method='env://',
world_size=world_size,
rank=proc_rank
)
def destroy_process_group():
"""Destroys the default process group."""
torch.distributed.destroy_process_group()
def scaled_all_reduce(tensors):
"""Performs the scaled all_reduce operation on the provided tensors.
The input tensors are modified in-place. Currently supports only the sum
reduction operator. The reduced values are scaled by the inverse size of
the process group (equivalent to cfg.NUM_GPUS).
"""
# Queue the reductions
reductions = []
for tensor in tensors:
reduction = torch.distributed.all_reduce(tensor, async_op=True)
reductions.append(reduction)
# Wait for reductions to finish
for reduction in reductions:
reduction.wait()
# Scale the results
for tensor in tensors:
tensor.mul_(1.0 / cfg.NUM_GPUS)
return tensors
| 2,323 | 31.277778 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/metrics.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Functions for computing metrics."""
import numpy as np
import torch
import torch.nn as nn
import pdb
from pycls.config import cfg
from functools import reduce
import operator
from ..models.relation_graph import *
# Number of bytes in a megabyte
_B_IN_MB = 1024 * 1024
def topks_correct(preds, labels, ks):
"""Computes the number of top-k correct predictions for each k."""
assert preds.size(0) == labels.size(0), \
'Batch dim of predictions and labels must match'
# Find the top max_k predictions for each sample
_top_max_k_vals, top_max_k_inds = torch.topk(
preds, max(ks), dim=1, largest=True, sorted=True
)
# (batch_size, max_k) -> (max_k, batch_size)
top_max_k_inds = top_max_k_inds.t()
# (batch_size, ) -> (max_k, batch_size)
rep_max_k_labels = labels.view(1, -1).expand_as(top_max_k_inds)
# (i, j) = 1 if top i-th prediction for the j-th sample is correct
top_max_k_correct = top_max_k_inds.eq(rep_max_k_labels)
# Compute the number of topk correct predictions for each k
topks_correct = [
top_max_k_correct[:k, :].view(-1).float().sum() for k in ks
]
return topks_correct
def topk_errors(preds, labels, ks):
"""Computes the top-k error for each k."""
num_topks_correct = topks_correct(preds, labels, ks)
return [(1.0 - x / preds.size(0)) * 100.0 for x in num_topks_correct]
def topk_accuracies(preds, labels, ks):
"""Computes the top-k accuracy for each k."""
num_topks_correct = topks_correct(preds, labels, ks)
return [(x / preds.size(0)) * 100.0 for x in num_topks_correct]
def params_count(model):
"""Computes the number of parameters."""
count = 0
for n,m in model.named_modules():
if isinstance(m, TalkConv2d) or isinstance(m, TalkLinear):
count += np.sum([p.numel()*m.params_scale for p in m.parameters(recurse=False)]).item()
else:
count += np.sum([p.numel() for p in m.parameters(recurse=False)]).item()
return int(count)
def flops_count(model):
"""Computes the number of flops."""
assert cfg.TRAIN.DATASET in ['cifar10', 'cifar100', 'tinyimagenet200', 'imagenet'], \
'Computing flops for {} is not supported'.format(cfg.TRAIN.DATASET)
# im_size = 32 if cfg.TRAIN.DATASET == 'cifar10' else 224
if cfg.TRAIN.DATASET == 'cifar10':
im_size = 32
elif cfg.TRAIN.DATASET == 'cifar100':
im_size = 32
elif cfg.TRAIN.DATASET == 'tinyimagenet200':
im_size = 64
else:
im_size = 224
h, w = im_size, im_size
count = 0
for n, m in model.named_modules():
if isinstance(m, nn.Conv2d):
if '.se' in n:
count += m.in_channels * m.out_channels + m.bias.numel()
continue
h_out = (h + 2 * m.padding[0] - m.kernel_size[0]) // m.stride[0] + 1
w_out = (w + 2 * m.padding[1] - m.kernel_size[1]) // m.stride[1] + 1
count += np.prod([
m.weight.numel(),
h_out, w_out
])
if 'proj' not in n:
h, w = h_out, w_out
elif isinstance(m, TalkConv2d):
h_out = (h + 2 * m.padding[0] - m.kernel_size[0]) // m.stride[0] + 1
w_out = (w + 2 * m.padding[1] - m.kernel_size[1]) // m.stride[1] + 1
count += int(np.prod([
m.weight.numel()*m.flops_scale,
h_out, w_out
]))
if 'proj' not in n and 'pool' not in n:
h, w = h_out, w_out
elif isinstance(m, nn.MaxPool2d):
h = (h + 2 * m.padding - m.kernel_size) // m.stride + 1
w = (w + 2 * m.padding - m.kernel_size) // m.stride + 1
elif isinstance(m, TalkLinear):
count += int(m.in_features * m.out_features * m.flops_scale)
elif isinstance(m, nn.Linear):
count += m.in_features * m.out_features
return count
def gpu_mem_usage():
"""Computes the GPU memory usage for the current device (MB)."""
mem_usage_bytes = torch.cuda.max_memory_allocated()
return mem_usage_bytes / _B_IN_MB
# Online FLOPs/Params calculation from CondenseNet codebase
count_ops = 0
count_params = 0
def get_num_gen(gen):
return sum(1 for x in gen)
def is_pruned(layer):
try:
layer.mask
return True
except AttributeError:
return False
def is_leaf(model):
return get_num_gen(model.children()) == 0
def get_layer_info(layer):
layer_str = str(layer)
type_name = layer_str[:layer_str.find('(')].strip()
return type_name
def get_layer_param(model):
return sum([reduce(operator.mul, i.size(), 1) for i in model.parameters()])
### The input batch size should be 1 to call this function
def measure_layer(layer, x):
global count_ops, count_params
delta_ops = 0
delta_params = 0
multi_add = 1
type_name = get_layer_info(layer)
### ops_conv
if type_name in ['Conv2d']:
out_h = int((x.size()[2] + 2 * layer.padding[0] - layer.kernel_size[0]) /
layer.stride[0] + 1)
out_w = int((x.size()[3] + 2 * layer.padding[1] - layer.kernel_size[1]) /
layer.stride[1] + 1)
delta_ops = layer.in_channels * layer.out_channels * layer.kernel_size[0] * \
layer.kernel_size[1] * out_h * out_w / layer.groups * multi_add
print(layer)
print('out_h: ', out_h, 'out_w:', out_w)
delta_params = get_layer_param(layer)
### ops_nonlinearity
elif type_name in ['ReLU']:
delta_ops = x.numel()
delta_params = get_layer_param(layer)
### ops_pooling
elif type_name in ['AvgPool2d', 'MaxPool2d']:
in_w = x.size()[2]
kernel_ops = layer.kernel_size * layer.kernel_size
out_w = int((in_w + 2 * layer.padding - layer.kernel_size) / layer.stride + 1)
out_h = int((in_w + 2 * layer.padding - layer.kernel_size) / layer.stride + 1)
delta_ops = x.size()[0] * x.size()[1] * out_w * out_h * kernel_ops
delta_params = get_layer_param(layer)
elif type_name in ['AdaptiveAvgPool2d']:
delta_ops = x.size()[0] * x.size()[1] * x.size()[2] * x.size()[3]
delta_params = get_layer_param(layer)
### ops_linear
elif type_name in ['Linear']:
weight_ops = layer.weight.numel() * multi_add
bias_ops = layer.bias.numel()
delta_ops = x.size()[0] * (weight_ops + bias_ops)
delta_params = get_layer_param(layer)
elif type_name in ['WeightedSumTransform']:
weight_ops = layer.weight.numel() * multi_add
delta_ops = x.size()[0] * (weight_ops)
delta_params = get_layer_param(layer)
### ops_nothing
elif type_name in ['BatchNorm2d', 'Dropout2d', 'DropChannel', 'Dropout', 'Sigmoid', 'DirichletWeightedSumTransform', 'Softmax', 'Identity', 'Sequential']:
delta_params = get_layer_param(layer)
### unknown layer type
else:
raise TypeError('unknown layer type: %s' % type_name)
count_ops += delta_ops
count_params += delta_params
return
def measure_model(model, H, W):
global count_ops, count_params
count_ops = 0
count_params = 0
data = torch.zeros(1, 3, H, W).cuda()
def should_measure(x):
return is_leaf(x) or is_pruned(x)
def modify_forward(model):
for child in model.children():
if should_measure(child):
def new_forward(m):
def lambda_forward(x):
measure_layer(m, x)
return m.old_forward(x)
return lambda_forward
child.old_forward = child.forward
child.forward = new_forward(child)
else:
modify_forward(child)
def restore_forward(model):
for child in model.children():
# leaf node
if is_leaf(child) and hasattr(child, 'old_forward'):
child.forward = child.old_forward
child.old_forward = None
else:
restore_forward(child)
modify_forward(model)
model.forward(data)
restore_forward(model)
return count_ops, count_params
| 8,557 | 33.095618 | 158 | py |
kge_ecotox_regression | kge_ecotox_regression-main/main.py |
"""
TODO:
- Train embedding model.
- Apply embeddings to data.
- Encode data.
- Train,valid,test model
"""
from autoencoder import create_auto_encoder
from model import create_model, CorrelelatedFeatures, ApproxKerasSVM, coeff_determination
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV
from sklearn.preprocessing import OneHotEncoder
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import KFold
from random import shuffle
from collections import defaultdict
import tensorflow as tf
from sklearn.svm import SVR
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression, LinearRegression, HuberRegressor, BayesianRidge
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, VotingRegressor, BaggingRegressor, ExtraTreesRegressor, GradientBoostingRegressor
from sklearn.compose import TransformedTargetRegressor
from sklearn.preprocessing import QuantileTransformer, RobustScaler
from sklearn.tree import DecisionTreeRegressor
from itertools import product
from random import choice, choices
from sklearn.pipeline import Pipeline
from tqdm import tqdm
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA,FastICA
from sklearn.cluster import FeatureAgglomeration
from sklearn.feature_selection import RFE
from sklearn.metrics import r2_score
from sklearn.isotonic import IsotonicRegression
from sklearn.feature_selection import VarianceThreshold
from sklearn.dummy import DummyRegressor
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.model_selection import cross_val_score, LeaveOneOut
MAX_ENCODER_EPOCHS = 1000
MAX_EPOCHS = 1000
EPSILON = 1e-10
MODEL = 'ComplEx'
hidden_dim = (128,)
SEED = 42
np.random.seed(SEED)
import tensorflow as tf
tf.get_logger().setLevel('ERROR')
import warnings
warnings.filterwarnings('ignore')
def load_fingerprints(filename):
df = pd.read_csv(filename,index_col='chemical')
l = len(df.iloc[0]['fingerprint'])
out = {}
for c in df.index:
fp = df.loc[c]['fingerprint']
v = [int(f) for f in fp]
out[c] = np.asarray(v)
return out
def load_features(filename):
df = pd.read_csv(filename,index_col='chemical')
df = df.dropna()
columns = df.columns
out = {}
for c in df.index:
v = [df.loc[c][col] for col in columns]
out[c] = np.asarray(v)
return out
def load_one_hot(entities):
all_entities = list(set(entities))
out = {}
for e in entities:
v = np.zeros((len(all_entities),))
v[all_entities.index(e)] = 1
out[e] = np.asarray(v)
return out
def load_embeddings(filename,filename_ids):
df = np.load(filename)
ids = dict(np.load(filename_ids))
return {k:df[int(ids[k])] for k in ids}
def load_data(filename,filter_chemicals=None, filter_species=None):
df = pd.read_csv(filename)
X,y = [],[]
if filter_chemicals:
to_drop = set(df.chemical) - filter_chemicals
for c in to_drop:
df = df.drop(df[df.chemical == c].index)
if filter_species:
to_drop = set(df.species) - filter_species
for s in to_drop:
df = df.drop(df[df.species == s].index)
df = df.drop(df[df.study_duration > 24*14].index)
df = df.groupby(['chemical','species'],as_index=False).mean()
X = list(zip(df['chemical'],df['species']))
y = np.log(df.concentration+EPSILON)
tmp = np.asarray(df.study_duration).reshape((-1,1))
mms = StandardScaler()
tmp = mms.fit_transform(tmp)
experimental_features = dict(zip(X,tmp.reshape(-1,1)))
y = np.asarray(y).reshape((-1,1))
#y = MinMaxScaler().fit_transform(y)
return X, y, experimental_features
def data_split(X,Y,restrictions=None,method = 1, variant = 1, prop=0.33):
"""
C_x - chemical set
S_x - species set
t,v - training,validation
1. C_t \cap C_v == Ø and S_t \cap S_v != Ø,
2. C_t \cap C_v == Ø and S_t \cap S_v == Ø,
3. C_t \cap C_v != Ø and S_t \cap S_v != Ø,
4. C_t \cap C_v != Ø and S_t \cap S_v == Ø,
Variants where C_t \cap C_v != Ø (same for S_x):
1. C_t == C_v
2. |C_t \cap C_v| < |C_t \cup C_v|
Restrictions:
Retriction of a set. eg. s_1 \in S_v and |S_v|=1, {'S_v':{'content:[s_1],'max_len',1}}
"""
C_t,C_v,S_t,S_v=map(set,[[]]*4)
restrictions = {**{'C_t':{},'C_v':{},'S_t':{},'S_v':{}},**restrictions}
def filter_restrictions(C_t,C_v,S_t,S_v):
for _set,_inv_set,k in zip([C_t,C_v,S_t,S_v],[C_v,C_t,S_v,S_t],['C_t','C_v','S_t','S_v']):
if k in restrictions:
if 'content' in restrictions[k]:
_set |= restrictions[k]['content']
if 'not content' in restrictions[k]:
_set -= restrictions[k]['not content']
if 'max_len' in restrictions[k]:
while restrictions[k]['max_len'] < len(_set):
entity = choice(list(_set))
if not ('content' in restrictions[k] and entity in restrictions[k]['content']):
_set.remove(entity)
return C_t,C_v,S_t,S_v
def check_restrictions(C_t,C_v,S_t,S_v):
for _set,k,inv_k in zip([C_t,C_v,S_t,S_v],['C_t','C_v','S_t','S_v'],['C_v','C_t','S_v','S_t']):
if k in restrictions:
if 'content' in restrictions[k] and 'not content' in restrictions[k]:
try:
assert len(restrictions[k]['content'].intersection(restrictions[k]['not content'])) < 1
except AssertionError:
raise AssertionError('Set %s content conflict.' % k)
if 'content' in restrictions[k] and 'max_len' in restrictions[k]:
try:
assert len(restrictions[k]['content']) <= restrictions[k]['max_len']
except AssertionError:
raise AssertionError('Set %s content is longer than max length' % k)
if ((method == 1 and 'C' in k) or (method == 4 and 'S' in k) or method == 2) and 'content' in restrictions[inv_k]:
try:
assert restrictions[k]['content'].intersection(restrictions[inv_k]['content']) == set()
except AssertionError:
raise AssertionError('Intersection in %s content is not allowed in method %s.' % ('chemical' if method==1 else 'species',str(method)))
if method == 3 and 'content' in restrictions[inv_k]:
try:
assert restrictions[k]['content'].intersection(restrictions[inv_k]['content']) == set()
except AssertionError:
raise AssertionError('Intersection in set content is not allowed in method 3.')
C,S = map(set,zip(*X))
if method == 1:
C_t,C_v = train_test_split(list(C),test_size=prop)
if variant == 1:
S_t,S_v = S, S
else:
S_t = choices(list(S),k=int((1-prop)*len(S)))
S_v = choices(list(S),k=int(prop*len(S)))
if method == 2:
S_t,S_v = train_test_split(list(S),test_size=prop)
C_t,C_v = train_test_split(list(C),test_size=prop)
if method == 3:
X_t, X_v = train_test_split(X,test_size=prop)
C_t,S_t = map(set,zip(*X_t))
C_v,S_v = map(set,zip(*X_v))
if method == 4:
S_t,S_v = train_test_split(list(S),test_size=prop)
if variant == 1:
C_t,C_v = C, C
else:
C_t = choices(list(C),k=int((1-prop)*len(C)))
C_v = choices(list(C),k=int(prop*len(C)))
C_t,C_v,S_t,S_v = map(set,[C_t,C_v,S_t,S_v])
C_t,C_v,S_t,S_v = filter_restrictions(C_t,C_v,S_t,S_v)
if method == 1: C_t -= C_v
if method == 2:
C_t -= C_v
S_t -= S_v
if method == 4: S_t -= S_v
if method == 1:
assert C_t.intersection(C_v) == set()
if variant == 1:
S_t = S_v
assert S_t == S_v
else:
assert len(S_t.intersection(S_v)) < len(S_t.union(S_v))
if method == 2:
assert C_t.intersection(C_v) == set() and S_t.intersection(S_v) == set()
if method == 3:
assert len(C_t.intersection(C_v)) > 0 and len(S_t.intersection(S_v)) > 0
if method == 4:
assert S_t.intersection(S_v) == set()
if variant == 1:
C_t = C_v
assert C_t == C_v
else:
assert len(C_t.intersection(C_v)) < len(C_t.union(C_v))
check_restrictions(C_t,C_v,S_t,S_v)
Xtr = []
Xte = []
ytr = []
yte = []
for x,y in zip(X,Y):
c,s = x
if c in C_t and s in S_t:
Xtr.append(x)
ytr.append(y)
if c in C_v and s in S_v:
Xte.append(x)
yte.append(y)
return Xtr,Xte,ytr,yte
class FilterFingerprints:
def __init__(self):
pass
def fit(self,X):
idx = []
for i,a in enumerate(X.T):
if len(np.unique(a)) > 1:
idx.append(i)
self.idx = idx
def transform(self,X):
if len(X.shape) > 1:
return X[:,self.idx]
else:
return X[self.idx]
def fit_transform(self,X):
self.fit(X)
return self.transform(X)
def compile_model(model):
model.compile(optimizer='adagrad',loss='log_cosh',metrics=['mae','mse',R2(name='r2')])
import math
def lcm(a, b):
return abs(a*b) // math.gcd(a, b)
def combine(Xs):
n = map(len,Xs)
l = max(*map(lambda x: lcm(len(x[0]),len(x[1])),product(Xs,Xs)))
r = [l//a for a in n]
tmp = []
for X,a in zip(Xs,r):
tmp.append(np.repeat(X,a,axis=0))
return np.concatenate(tmp,axis=1)
def list_duplicates(seq):
tally = defaultdict(list)
for i,item in enumerate(seq):
tally[item].append(i)
return ((key,locs) for key,locs in tally.items() if len(locs)>1)
def run_model(C_t,C_v,S_t,S_v,y,
experimental_features,
fingerprints,
chemical_embedding,
species_embedding,
chemical_features,
merge_species=False):
"""
Take four classes of chemicals, two pairs of siblings, test these on one-two species, combine siblings, combine cusins, see performance drop. Repeat on species side.
Repeat with embeddings for chemicals and species and see the same performance on lower levels, but imporved over baseline on higher levels.
"""
"""
5-fold validation
+ 1-fold test set
"""
keys = set(y.keys())
keys_t = keys.intersection(set(product(C_t,S_t)))
keys_v = keys.intersection(set(product(C_v,S_v)))
ytr,yte = map(lambda x:np.asarray([y[i] for i in x]),[keys_t,keys_v])
if len(yte) < 1 or len(ytr) < 1:
return None,None,None
fingerprints_train,fingerprints_test = map(lambda x:np.asarray([fingerprints[i] for i,_ in x]),[keys_t,keys_v])
chemical_embedding_train,chemical_embedding_test = map(lambda x:np.asarray([chemical_embedding[i] for i,_ in x]),[keys_t,keys_v])
chemical_features_train,chemical_features_test = map(lambda x:np.asarray([chemical_features[i] for i,_ in x]),[keys_t,keys_v])
species_embedding_train,species_embedding_test = map(lambda x:np.asarray([species_embedding[i] for _,i in x]),[keys_t,keys_v])
experimental_features_train,experimental_features_test = map(lambda x:np.asarray([experimental_features[i] for i in x]),[keys_t,keys_v])
species_one_hot_encoder = OneHotEncoder(sparse=False)
sp_t = set(list(zip(*keys_t))[1])
sp_v = set(list(zip(*keys_v))[1])
sp = np.asarray(list(sp_t|sp_v)).reshape((-1,1))
species_one_hot_encoder.fit(sp)
species_one_hot_train,species_one_hot_test = map(lambda x:species_one_hot_encoder.transform(np.asarray(list(zip(*x))[1]).reshape((-1,1))),[keys_t,keys_v])
if merge_species:
for array in [species_embedding_train,species_one_hot_train,ytr]:
for elem,loc in list_duplicates([c for c,_ in keys_t]): #i.e. mean where c is the same
array[loc] = np.mean(array[loc])
for array in [species_embedding_test,species_one_hot_test,yte]:
for elem,loc in list_duplicates([c for c,_ in keys_v]):
array[loc] = np.mean(array[loc])
n_tr = ytr.shape[1]
n_te = yte.shape[1]
train_1 = combine([fingerprints_train,chemical_features_train,species_one_hot_train,experimental_features_train,ytr])
train_2 = combine([fingerprints_train,chemical_features_train,species_embedding_train,chemical_embedding_train,experimental_features_train,ytr])
test_1 = combine([fingerprints_test,chemical_features_test,species_one_hot_test,experimental_features_test,yte])
test_2 = combine([fingerprints_test,chemical_features_test,species_embedding_test,chemical_embedding_test,experimental_features_test,yte])
Xtr_1,ytr = train_1[:,:-n_tr],train_1[:,-n_tr:]
Xtr_2,ytr = train_2[:,:-n_tr],train_2[:,-n_tr:]
Xte_1,yte = test_1[:,:-n_te],test_1[:,-n_te:]
Xte_2,yte = test_2[:,:-n_te],test_2[:,-n_te:]
res1 = np.zeros(yte.ravel().shape)
res2 = np.zeros(yte.ravel().shape)
params = {'n_neighbors':[2,5,10,25,50,100],
'weights':['uniform','distance']}
n = min(len(ytr),5)
FOLDS = 10
for Xtr,Xte,res in zip([Xtr_1,Xtr_2],[Xte_1,Xte_2],[res1,res2]):
for _ in range(FOLDS):
regr = AdaBoostRegressor(n_estimators=10,loss='square')
regr.fit(Xtr,ytr.ravel())
res += regr.predict(Xte)/FOLDS
return res1,res2,yte
from SPARQLWrapper import SPARQLWrapper, JSON
sparql = SPARQLWrapper("https://query.wikidata.org/sparql")
sparql.setReturnFormat(JSON)
def get_species_name(ncbi_id):
q = """
select ?label where {
?s wdt:P685 "%s" ;
wdt:P225 ?label .
}
""" % ncbi_id
sparql.setQuery(q)
try:
results = sparql.query().convert()
for result in results["results"]["bindings"]:
out = result["label"]["value"]
return out
except:
return ncbi_id
def encode_fingerprints(fingerprints_all):
fingerprint_encoder, fingerprint_ae = create_auto_encoder(input_size=len(fingerprints_all[0]),dense_layers=(128,),noise=0.1)
fingerprint_ae.compile(optimizer='adagrad',loss='binary_crossentropy')
fingerprint_ae.fit(fingerprints_all,fingerprints_all,
epochs=MAX_ENCODER_EPOCHS,
callbacks=[EarlyStopping('loss',min_delta=1e-5)],
verbose=0)
return fingerprint_encoder.predict(fingerprints_all)
from sklearn.cluster import KMeans
# function returns WSS score for k values from 1 to kmax
def calculate_WSS(points, kmax):
sse = []
for k in range(1, kmax+1):
kmeans = KMeans(n_clusters = k).fit(points)
centroids = kmeans.cluster_centers_
pred_clusters = kmeans.predict(points)
curr_sse = 0
# calculate square of Euclidean distance of each point from its cluster center and add to current WSS
for i in range(len(points)):
curr_center = centroids[pred_clusters[i]]
curr_sse += (points[i, 0] - curr_center[0]) ** 2 + (points[i, 1] - curr_center[1]) ** 2
sse.append(curr_sse)
return sse
def define_chemical_clusters(fingerprints,k=15,use_pca=True):
if not isinstance(fingerprints,list):
fingerprints = [fingerprints]
keys = set.intersection(*[set(f.keys()) for f in fingerprints])
array = np.concatenate([np.asarray([v[k] for k in keys]) for v in fingerprints],axis=1)
if use_pca:
array = PCA(2).fit_transform(array)
if k < 0:
sse = calculate_WSS(array,25)
k = np.argmin(sse) + 1
plt.plot(sse)
plt.show()
clusters = defaultdict(set)
kmeans = KMeans(n_clusters = k).fit(array)
cp = kmeans.predict(array)
for k,v in zip(keys,cp):
clusters[v].add(k)
return clusters, kmeans.cluster_centers_
def merge_closest(clusters,cluster_centers,ord=2):
dist = {}
for i,cc1 in enumerate(cluster_centers):
for j,cc2 in enumerate(cluster_centers):
if i == j: continue
dist[(i,j)] = np.linalg.norm(cc1-cc2,ord=ord)
if len(dist) > 1:
merge,_ = sorted(dist.items(),key=lambda x:x[1])[0]
else:
merge = (i,j)
k1,k2 = merge
cluster_centers[k1] = np.mean([cluster_centers[k1],cluster_centers[k2]],axis=0)
cluster_centers = np.delete(cluster_centers,k2,axis=0)
clusters[k1] |= clusters[k2]
clusters.pop(k2,None)
return clusters, cluster_centers
def filter_data(X,Y,C_t,C_v,S_t,S_v):
Xtr,Xte,ytr,yte = [],[],[],[]
for x,y in zip(X,Y):
c,s = x
if c in C_t and s in S_t:
Xtr.append(x)
ytr.append(y)
if c in C_v and s in S_v:
Xte.append(x)
yte.append(y)
return Xtr,Xte,ytr,yte
import sys
# insert at 1, 0 is the script path (or '' in REPL)
sys.path.insert(1, '/media/erik/Mass/Dropbox/NIVA_GITLAB/pySMIfp')
from smiles_fingerprints import smiles_fingerprint
def load_smiles_fingerprints():
q = """
select ?chembl ?smiles where {
?c wdt:P233 ?smiles ;
wdt:P592 ?chembl .
}
"""
converter = {}
sparql.setQuery(q)
results = sparql.query().convert()
for result in results["results"]["bindings"]:
ch = result["chembl"]["value"]
smi = result['smiles']['value']
smifp = smiles_fingerprint(smi)
converter['http://rdf.ebi.ac.uk/resource/chembl/molecule/'+ch] = smifp
return converter
def save_smiles_fingerprints(fp,filename='data/smiles_fingerprints.csv'):
a = {}
for i in range(len(smiles_fingerprint('C'))):
a['sig%s'%str(i)] = [array[i] for _,array in fp.items()]
df = pd.DataFrame(data={'chemical':list(fp.keys()),**a})
df.to_csv(filename)
def read_smiles_fingerprints(filename):
df = pd.read_csv(filename)
cols = [c for c in df.columns if 'sig' in c]
chemicals = df['chemical'].values
arrays = df[cols].values
return dict(zip(chemicals,np.asarray(arrays)))
def chemical_similarities(fingerprints):
keys = fingerprints.keys()
array = np.asarray([i for k,i in fingerprints.items()])
sim = []
for a in array:
v = a @ array.T
w = np.sum(a) + np.sum(array,axis=1)
sim_score = 2*v/w
sim.append(sim_score)
return {k:s for k,s in zip(keys,sim)}
def main():
"""
organic = obo['CHEBI_50860']
inorganic = obo['CHEBI_24835']
"""
model = 'ComplEx'
g1_parts = [[0],[0,1],[0,1,2]]
g2_parts = [[0],[0,1]]
p = list(product(g1_parts,g2_parts))
p += [p[-1]]
ul = (False,False)
f1,f2=[],[]
for g1p,g2p,in p:
for lit,gp,fs,name in zip([*ul],[g1p,g2p],[f1,f2],['_chemical_','_taxonomy_']):
fs.append(model+name+str(hash((lit,*gp))))
if (g1p,g2p) == p[-1]:
ul = (True,True)
organic_chemicals = set()
inorganic_chemicals = set()
salts = set()
for i in range(1,10):
df = pd.read_csv('./data/chemical_group_%s.csv' % str(i),index_col='parent')
try:
organic_chemicals |= set(df.loc['http://purl.obolibrary.org/obo/CHEBI_50860','children'].split(','))
except:
pass
try:
inorganic_chemicals |= set(df.loc['http://purl.obolibrary.org/obo/CHEBI_24835','children'].split(','))
except:
pass
try:
salts |= set(df.loc['http://purl.obolibrary.org/obo/CHEBI_24866','children'].split(','))
except:
pass
print('Num organic chemicals',len(organic_chemicals))
print('Num inorganic chemicals',len(inorganic_chemicals))
print('Num salts',len(salts))
C = organic_chemicals
try:
smiles_fingerprints = read_smiles_fingerprints('./data/smiles_fingerprints.csv')
except FileNotFoundError:
smiles_fingerprints = load_smiles_fingerprints()
save_smiles_fingerprints(smiles_fingerprints,'./data/smiles_fingerprints.csv')
mms = MinMaxScaler().fit_transform(np.asarray([smiles_fingerprints[k] for k in smiles_fingerprints]))
smiles_fingerprints = dict(zip(smiles_fingerprints,mms))
X,Y,experimental_features = load_data('./data/experiments.csv',filter_chemicals=None, filter_species=None)
pubchem_fingerprints = load_fingerprints('./data/chemicals_fingerprints.csv')
Y = {k:y for k,y in zip(X,Y)}
pubchem_fingerprints = chemical_similarities(pubchem_fingerprints)
chemical_embedding = load_embeddings('./data/embeddings/%s_entity_embeddings.npy' % f1[0],
'./data/embeddings/%s_entity_ids.npy' % f1[0])
species_embedding = load_embeddings('./data/embeddings/%s_entity_embeddings.npy' % f2[0],
'./data/embeddings/%s_entity_ids.npy' % f2[0])
chemical_features = load_features('./data/chemicals_features.csv')
chemical_features = dict(zip(chemical_features,MinMaxScaler().fit_transform(np.asarray([chemical_features[k] for k in chemical_features]))))
for cf in [QuantileTransformer(n_quantiles=100,output_distribution='normal')]:
chemical_embedding = dict(zip(chemical_embedding,cf.fit_transform(np.asarray([chemical_embedding[k] for k in chemical_embedding]))))
for cf in [QuantileTransformer(n_quantiles=100,output_distribution='normal')]:
species_embedding = dict(zip(species_embedding,cf.fit_transform(np.asarray([species_embedding[k] for k in species_embedding]))))
species_divisions = defaultdict(set)
for k in range(1,2):
df = pd.read_csv('./data/species_groups_%s.csv' % str(k), index_col='parent')
for s in df.index:
species_divisions[s] |= set(df.loc[s,'children'].split(','))
species_divisions = dict(filter(lambda x:len(x[1])>5,species_divisions.items()))
#for k in species_divisions:
#print(get_species_name(k.split('/')[-1]))
#species_divisions = defaultdict(set)
#df = pd.read_csv('./data/species_divisions.csv', index_col='parent')
#for s in df.index:
#species_divisions[s] |= set(df.loc[s,'children'].split(','))
C = set.intersection(*map(lambda k:set(k.keys()),[smiles_fingerprints,pubchem_fingerprints,chemical_features,chemical_embedding]))
for d in [smiles_fingerprints,pubchem_fingerprints,chemical_embedding,chemical_features]:
for c in set(d.keys()):
if not c in C:
d.pop(c,None)
n = 7
clusters, cluster_centers = define_chemical_clusters([smiles_fingerprints],k=max(-1,n),use_pca=False)
print(*map(lambda x:len(x[1]),clusters.items()))
data = {}
all_runs = {}
TOP_K = 10
while True:
for C,S in tqdm(product(clusters,species_divisions),total=len(clusters)*len(species_divisions)):
k = [C,S]
C = list(clusters[C])
S = species_divisions[S]
k[1] = get_species_name(k[1].split('/')[-1])
loo = LeaveOneOut()
predictions = []
y_true = []
for train_index, test_index in loo.split(C):
C_t = [C[i] for i in train_index]
C_v = [C[i] for i in test_index]
r1,r2,yte = run_model(C_t,C_v,S,S,Y,
experimental_features,
pubchem_fingerprints,
chemical_embedding,
species_embedding,
chemical_features,
merge_species=True)
if r1 is None and r2 is None: continue
r1 = np.mean(r1)
r2 = np.mean(r2)
y_true.append(np.mean(yte))
predictions.append((r1,r2))
y_true, predictions = map(np.asarray,[y_true,predictions])
if len(predictions) < 10: continue
try:
if len(predictions.shape) < 2:
predictions = np.expand_dims(predictions,axis=1)
rsq_1 = r2_score(y_true,predictions[:,0])
rsq_2 = r2_score(y_true,predictions[:,1])
all_runs[tuple(k)] = (rsq_1,rsq_2)
except ValueError:
pass
all_runs = dict(sorted(all_runs.items(),key=lambda x: sum(x[1])/2,reverse=True))
print(all_runs)
data[len(cluster_centers)] = all_runs
if len(cluster_centers) > 0:
clusters, cluster_centers = merge_closest(clusters,cluster_centers)
for k in list(all_runs.keys())[:TOP_K]:
_,s = k
species_divisions.pop(k,None)
else:
break
pd.to_pickle(data,'chemical_cluster_merging.pkl')
exit()
ks = set()
for k in species_divisions:
S = species_divisions[k]
still_true = True
for k_c in clusters:
C = clusters[k_c]
Xtr,Xte,ytr,yte = filter_data(X,Y,C,C,S,S)
if count(Xtr,Xte) > 100: ks.add(k)
for k in tqdm(ks):
n=6
clusters, cluster_centers = define_chemical_clusters([smiles_fingerprints],k=max(-1,n))
S = species_divisions[k]
sn = get_species_name(k.split('/')[-1])
results = defaultdict(list)
i = 0
while True:
k_c = sorted(clusters,key=lambda x:len(clusters[x]),reverse=True)[0]
C_t = clusters[k_c]
if len(C_t) < 1: continue
C_t,C_v = train_test_split(list(C_t),test_size=0.25)
S_t = S
S_v = S
Xtr,Xte,ytr,yte = filter_data(X,Y,C_t,C_v,S_t,S_v)
try:
assert count(Xtr,Xte) > 20
r1,r2 = run_model(Xtr,
Xte,
ytr,
yte,
experimental_features,
pubchem_fingerprints,
chemical_embedding,
species_embedding,
chemical_features,
merge_species=True)
except AssertionError:
r1,r2 = float('nan'), float('nan')
except np.AxisError:
r1,r2 = float('nan'), float('nan')
results[i].append((r1,r2))
clusters, cluster_centers = merge_closest(clusters,cluster_centers)
if len(cluster_centers) < 1:
break
i += 1
v0 = [[v[0] for v in results[k]] for k in results]
v1 = [[v[1] for v in results[k]] for k in results]
fig, ax = plt.subplots()
for x,color,ran in zip([v0,v1],['red','green'],[np.arange(0,len(v0)*2,2),np.arange(1,len(v1)*2,2)]):
mins = [np.nanmin(a) for a in x]
maxes = [np.nanmax(a) for a in x]
means = [np.nanmean(a) for a in x]
std = [np.nanstd(a) for a in x]
mins,maxes,means,std = map(np.asarray,[mins,maxes,means,std])
ax.bar(ran,maxes,width=0.5,color=color)
#plt.ylim(-1,1)
ax.set_xticks(np.arange(0.5,len(v0)*2,2))
ax.set_xticklabels(('%s Clusters' % str(abs(i)) for i in range(-n,0)))
plt.savefig('./plots/chemical_clusters_taxon_%s.png' % sn)
exit()
#def tqdm(x,**params):
#return x
for filter_chemicals,string,TOP_K in tqdm(zip([inorganic_chemicals | salts],['organic'],[4]),total=1,desc='Chemical Groups'):
#if string=='organic': continue
for division in tqdm(S_v,total=len(S_v),desc='Divisions'):
if not len(S_v[division]) > 1: continue
model_params={'encode':False,'train_ae_fingerprints':False,'train_ae_species':False}
results = [[]]*TOP_K
f = lambda _s: sum([1 for c,s in X if (s == _s and c in C-filter_chemicals)])
tmp_division = list(sorted(S_v[division],key=f,reverse=True))[:TOP_K]
for i,s_v in tqdm(enumerate(tmp_division),desc='Species in division %s' % division,leave=False,total=len(tmp_division)):
C_restriction = {'C_v':{'not content':filter_chemicals},'C_t':{'not content':filter_chemicals}}
configs = []
#Method 1
configs.append((1, 1, {'S_v':{'content':set([s_v]),'max_len':1}}))
configs.append((1, 2, {'S_v':{'content':set([s_v]),'max_len':1}}))
#Method 2
configs.append((2, 1, {'S_v':{'content':set([s_v]),'max_len':1}}))
#Method 3
configs.append((3, 1, {'S_v':{'content':set([s_v]),'max_len':1}}))
configs.append((3, 2, {'S_v':{'content':set([s_v]),'max_len':1}}))
#Method 4
configs.append((4, 1, {'S_v':{'content':set([s_v]),'max_len':1}}))
configs.append((4, 2, {'S_v':{'content':set([s_v]),'max_len':1}}))
tmp_res = np.zeros((len(configs),2))
for j,config in tqdm(enumerate(configs),total=len(configs),leave=False,desc='Configs'):
m,v,res = config
r1_tmp = []
r2_tmp = []
for _ in range(10):
tf.keras.backend.clear_session()
prop = 0.3
Xtr,Xte,ytr,yte = data_split(X,Y,restrictions={**res,**C_restriction},method=m,variant=v,prop=prop)
try:
r1,r2 = run_model(Xtr,
Xte,
ytr,
yte,
experimental_features,
fingerprints,
chemical_embedding,
species_embedding,
model_params=model_params)
except:
r1,r2=0,0
r1_tmp.append(r1)
r2_tmp.append(r2)
tmp_res[j,0] = np.mean(r1_tmp)
tmp_res[j,1] = np.mean(r2_tmp)
results[i] = tmp_res
fig, axs = plt.subplots(1,len(results),figsize=(40, 10))
for i,ax in enumerate(axs):
ms = results[i]
baseline = ms[:,0]
over = ms[:,1]
baseline = np.nan_to_num(baseline, nan=0.0,posinf=0.0, neginf=0.0)
over = np.nan_to_num(over, nan=0.0,posinf=0.0, neginf=0.0)
width = 0.4
ax.bar(np.arange(0,len(baseline)*2,2),baseline,width,color='red')
ax.bar(np.arange(1,len(baseline)*2,2),over,width,color='green')
ax.set_title(get_species_name(tmp_division[i].split('/')[-1]))
ax.set_xticks(np.arange(0.5,len(baseline)*2,2))
ax.set_xticklabels((str(i) for i in range(len(configs))))
ax.set_ylim(0,max(*over,*baseline)+0.1)
plt.savefig('plots/division_%s_%s.png' % (division,string))
if __name__ == '__main__':
main()
| 32,681 | 35.394209 | 169 | py |
kge_ecotox_regression | kge_ecotox_regression-main/embedding_model.py | from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Input, Embedding, Dense, Dropout, Conv2D, Flatten, Concatenate, Multiply
import tensorflow as tf
def min_distance_loss(w,epsilon=1.0):
r = tf.reduce_sum(w*w, 1)
r = tf.reshape(r, [-1, 1])
D = r - 2*tf.matmul(w, tf.transpose(w)) + tf.transpose(r)
D = D + tf.linalg.diag(epsilon * tf.ones(D.shape[0]))
return tf.reduce_sum(tf.where(D<epsilon,1.0,0.0))/tf.cast(w.shape[1],tf.float32)
def TransE(entities,relations,dim=200,bias=1,lamb=1,norm_size=0.0,mdl=0.0):
inp = Input((3,))
inp_label = Input(())
s,p,o = tf.unstack(inp,axis=-1)
entity_embedding = Embedding(len(entities),dim,name='entity_embedding')
relation_embedding = Embedding(len(relations),dim,name='relation_embedding')
h,r,t = entity_embedding(s),relation_embedding(p),entity_embedding(o)
score = bias - tf.norm(h+r-t, ord=2, axis=-1)
loss = lamb - inp_label * score
loss = tf.where(loss>0,loss,0) + \
norm_size * tf.norm(entity_embedding.weights[0],ord=2)**2 + \
min_distance_loss(entity_embedding.weights[0]) * mdl
model = Model(inputs=[inp,inp_label],outputs=score)
model.add_loss(loss)
model.compile(optimizer='adam',loss=None)
return model
def DistMult(entities,relations,dim=200,norm_size=0.0,mdl=0.0):
inp = Input((3,))
inp_label = Input(())
s,p,o = tf.unstack(inp,axis=-1)
entity_embedding = Embedding(len(entities),dim,name='entity_embedding')
relation_embedding = Embedding(len(relations),dim,name='relation_embedding')
h,r,t = entity_embedding(s),relation_embedding(p),entity_embedding(o)
score = tf.keras.layers.Activation('linear')(tf.reduce_sum(h*r*t,axis=-1))
model = Model(inputs=[inp,inp_label],outputs=score)
loss = lambda true,pred: tf.reduce_sum(tf.math.log(1+tf.math.exp(-true*pred))) + \
norm_size * tf.norm(entity_embedding.weights[0],ord=2)**2 + \
min_distance_loss(entity_embedding.weights[0],mdl) * mdl
model.compile(optimizer='adam',loss=loss)
return model
def ComplEx(entities,relations,dim=200,norm_size=0.0,mdl=0.0):
inp = Input((3,))
inp_label = Input(())
s,p,o = tf.unstack(inp,axis=-1)
entity_embedding = Embedding(len(entities),dim,name='entity_embedding')
relation_embedding = Embedding(len(relations),dim,name='relation_embedding')
h,r,t = entity_embedding(s),relation_embedding(p),entity_embedding(o)
h_real,h_img = tf.split(h,2,axis=-1)
r_real,r_img = tf.split(r,2,axis=-1)
t_real,t_img = tf.split(t,2,axis=-1)
score = tf.reduce_sum(r_real*h_real*t_real,axis=-1) + \
tf.reduce_sum(r_real*h_img*t_img,axis=-1) + \
tf.reduce_sum(r_img*h_real*t_img,axis=-1) - \
tf.reduce_sum(r_img*h_img*t_real,axis=-1)
model = Model(inputs=[inp,inp_label],outputs=score)
loss = lambda true,pred: tf.reduce_sum(tf.math.log(1+tf.math.exp(-true*pred))) + \
norm_size * tf.norm(entity_embedding.weights[0],ord=2)**2 + \
min_distance_loss(entity_embedding.weights[0]) * mdl
model.compile(optimizer='adam',loss=loss)
return model
def ConvE(entities,relations):
dim = 200
inp = Input((3,))
inp_label = Input(())
s,p,o = tf.unstack(inp,axis=-1)
entity_embedding = Embedding(len(entities),dim,name='entity_embedding')
relation_embedding = Embedding(len(relations),dim,name='relation_embedding')
h,r,t = entity_embedding(s),relation_embedding(p),entity_embedding(o)
h = tf.reshape(h,(-1,20,10,1))
r = tf.reshape(r,(-1,20,10,1))
x = Concatenate(axis=2)([h,r])
x = Conv2D(16,(5,5),activation='relu')(x)
x = Dropout(0.2)(x)
x = Conv2D(16,(3,3),activation='relu')(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(dim)(x)
x = Multiply()([x,t])
x = Dense(1,activation='sigmoid')(x)
model = Model(inputs=[inp,inp_label],outputs=x)
model.compile(optimizer='adam',loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=0.05))
return model
| 4,177 | 31.897638 | 108 | py |
kge_ecotox_regression | kge_ecotox_regression-main/pretrained_embedding_models.py |
import sys
import os
from itertools import product
from KGEkeras import DistMult, HolE, TransE, HAKE, ConvE, ComplEx, ConvR, RotatE, pRotatE, ConvKB, CosinE
from kerastuner import RandomSearch, HyperParameters, Objective, Hyperband, BayesianOptimization
from random import choice
from collections import defaultdict
from tensorflow.keras.losses import binary_crossentropy,hinge,mean_squared_error
from tensorflow.keras import Input
from tensorflow.keras import Model
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping, Callback, TerminateOnNaN, ReduceLROnPlateau
from sklearn.metrics.cluster import completeness_score
from tensorflow.keras.optimizers import Adam
import json
import tensorflow as tf
from tensorflow.keras.optimizers.schedules import ExponentialDecay
from KGEkeras import loss_function_lookup
from lib.utils import generate_negative, oversample_data, load_data
from tqdm import tqdm
import string
import random
from random import choices
from lib.hptuner import HPTuner
import pickle
try:
from tensorflow_addons.callbacks import TimeStopping
except:
pass
from rdflib import Graph, URIRef, Literal, Namespace
from KGEkeras import LiteralConverter
from sklearn.decomposition import PCA
SECONDS_PER_TRAIL = 600
SECONDS_TO_TERMINATE = 3600
SEARCH_MAX_EPOCHS = 10
MAX_EPOCHS = 200
MIN_EPOCHS = 50
MAX_TRIALS = 20
PATIENCE = 10
EPSILON = 10e-7
models = {
#'DistMult':DistMult,
#'TransE':TransE,
#'HolE':HolE,
'ComplEx':ComplEx,
#'HAKE':HAKE,
#'pRotatE':pRotatE,
#'RotatE':RotatE,
#'ConvE':ConvE,
#'ConvKB':ConvKB,
}
class DataGenerator(tf.keras.utils.Sequence):
def __init__(self, kg, ns=10, batch_size=32, shuffle=True):
self.batch_size = min(batch_size,len(kg))
self.kg = kg
self.ns = ns
self.num_e = len(set([s for s,_,_ in kg])|set([o for _,_,o in kg]))
self.shuffle = shuffle
self.indices = list(range(len(kg)))
self.on_epoch_end()
def __len__(self):
return len(self.kg) // self.batch_size
def __getitem__(self, index):
index = self.index[index * self.batch_size:(index + 1) * self.batch_size]
batch = [self.indices[k] for k in index]
X, y = self.__get_data(batch)
return X, y
def on_epoch_end(self):
self.index = np.arange(len(self.indices))
if self.shuffle == True:
np.random.shuffle(self.index)
def __get_data(self, batch):
tmp_kg = np.asarray([self.kg[i] for i in batch])
negative_kg = generate_negative(tmp_kg,N=self.num_e,negative=self.ns)
X = oversample_data(kgs=[tmp_kg,negative_kg])
return X, None
def build_model(hp):
params = hp.copy()
params['e_dim'] = params['dim']
params['r_dim'] = params['dim']
params['name'] = 'embedding_model'
embedding_model = models[params['embedding_model']]
embedding_model = embedding_model(**params)
triple = Input((3,))
ftriple = Input((3,))
inputs = [triple, ftriple]
score = embedding_model(triple)
fscore = embedding_model(ftriple)
loss_function = loss_function_lookup(params['loss_function'])
loss = loss_function(score,fscore,params['margin'] or 1, 1)
model = Model(inputs=inputs, outputs=loss)
model.add_loss(loss)
model.compile(optimizer=Adam(learning_rate=ExponentialDecay(params['learning_rate'],decay_steps=100000,decay_rate=0.96)),
loss=None)
return model
def optimize_model(model, kg, lit=False, name='name', hp=None):
if lit:
lc = LiteralConverter(kg)
literals = lc.fit_transform()
kg = lc.g
literals = PCA(min(len(literals[0]),100)).fit_transform(literals)
else:
literals = None
kg -= [(s,p,o) for s,p,o in kg if isinstance(o,Literal)]
entities = set(kg.subjects()) | set(kg.objects())
relations = set(kg.predicates())
me = {k:i for i,k in enumerate(entities)}
mr = {k:i for i,k in enumerate(relations)}
kg = list(map(lambda x: (me[x[0]],mr[x[1]],me[x[2]]), kg))
bs = 512
kg = np.asarray(kg)
model_name = model
N = len(me)
M = len(mr)
hptuner = HPTuner(runs=MAX_TRIALS, objectiv_direction='min')
hptuner.add_value_hp('gamma',0,21)
hptuner.add_value_hp('dim',100,401,dtype=int)
hptuner.add_value_hp('negative_samples',10,101,dtype=int)
hptuner.add_value_hp('margin',1,11,dtype=int)
hptuner.add_list_hp('loss_function',['pairwize_hinge','pairwize_logistic','pointwize_hinge','pointwize_logistic'],exhaustive=True)
hptuner.add_fixed_hp('embedding_model',model)
hptuner.add_fixed_hp('dp',0.2)
hptuner.add_fixed_hp('hidden_dp',0.2)
hptuner.add_fixed_hp('num_entities',N)
hptuner.add_fixed_hp('num_relations',M)
if hp:
for k,i in hp.items():
hptuner.add_fixed_hp(k,i)
hptuner.add_fixed_hp('num_entities',N)
hptuner.add_fixed_hp('num_relations',M)
hptuner.add_fixed_hp('learning_rate',0.001)
hptuner.add_fixed_hp('regularization',0.001)
if lit:
hptuner.add_fixed_hp('literals',literals)
hptuner.add_fixed_hp('literal_activation','tanh')
if hp:
hptuner.next_hp_config()
hptuner.add_result(0.0)
with tqdm(total=hptuner.runs, desc='Trials') as pbar:
while hptuner.is_active and hp is None:
hp = hptuner.next_hp_config()
model = build_model(hp)
tr_gen = DataGenerator(kg, batch_size=bs, shuffle=True, ns=hp['negative_samples'])
hist = model.fit(tr_gen,epochs=SEARCH_MAX_EPOCHS,verbose=2, callbacks=[EarlyStopping('loss'),TerminateOnNaN()])
score = hist.history['loss'][-1]/hist.history['loss'][0]
hptuner.add_result(score)
tf.keras.backend.clear_session()
pbar.update(1)
hp = hptuner.best_config()
#if hp is None:
#with open('./pretrained_hp/%s%s_kg.json' % (model_name,name), 'w') as fp:
#json.dump(hp, fp)
model = build_model(hp)
tr_gen = DataGenerator(kg, batch_size=bs, shuffle=True, ns=hp['negative_samples'])
hist = model.fit(tr_gen,epochs=MAX_EPOCHS, verbose=2, callbacks=[EarlyStopping('loss',patience=PATIENCE), TerminateOnNaN()])
if np.isnan(hist.history['loss'][-1]):
print(model_name,'nan loss.')
return optimize_model(model_name,kg,lit,name,None)
for l in model.layers:
if isinstance(l,models[model_name]):
m = l.name
m, W1, W2 = model, model.get_layer(m).entity_embedding.get_weights()[0], model.get_layer(m).relational_embedding.get_weights()[0]
m.save_weights('pretrained_models/model/'+name)
np.save(name+'_entity_embeddings.npy', W1)
np.save(name+'_entity_ids.npy',np.asarray(list(zip(entities,range(len(entities))))))
np.save(name+'_relational_embeddings.npy', W2)
np.save(name+'_relation_ids.npy',np.asarray(list(zip(relations,range(len(relations))))))
def main():
d = './data/embeddings/'
use_literals = product([False,True],[False,True])
g1_parts = [[0],[0,1],[0,1,2]]
g2_parts = [[0],[0,1]]
p = list(product(g1_parts,g2_parts))
p += [p[-1]]
ul = (False,False)
for g1p,g2p in tqdm(p):
g1,g2 = Graph(),Graph()
for i in g1p:
g = Graph()
g.load('./data/chemicals_%s.ttl' % str(i),format='ttl')
g1 += g
for i in g2p:
g = Graph()
g.load('./data/taxonomy_%s.ttl' % str(i),format='ttl')
g2 += g
for lit,gp,kg,name in zip([*ul],[g1p,g2p],[g1,g2],['_chemical_','_taxonomy_']):
#hp_file = '../KGE-CEP/pretrained_hp/%s%s_kg.json' % (model,name)
hp = {'e_dim':100,
'negative_samples':10,
'loss_function':'pairwize_logistic'}
model = 'ComplEx'
f = d+model+name+str(hash((lit,*gp)))
optimize_model(model,kg,lit,name=f,hp=hp)
tf.keras.backend.clear_session()
if (g1p,g2p) == p[-1]:
ul = (True,True)
if __name__ == '__main__':
main()
| 8,625 | 30.140794 | 134 | py |
kge_ecotox_regression | kge_ecotox_regression-main/autoencoder.py |
from tensorflow.keras.layers import Dense, GaussianNoise, Input, LayerNormalization
from tensorflow.keras.models import Model
from tensorflow import keras
def create_auto_encoder(input_size, dense_layers = (10,), noise=0):
autoencoder = keras.Sequential()
if noise > 0:
autoencoder.add(GaussianNoise(noise))
for l in dense_layers:
autoencoder.add(Dense(l,activation='relu'))
encoder = autoencoder
for l in dense_layers[::-1]:
autoencoder.add(Dense(l,activation='relu'))
autoencoder.add(Dense(input_size,activation='sigmoid'))
return encoder, autoencoder
| 613 | 33.111111 | 83 | py |
lepard | lepard-main/main.py | import os, torch, json, argparse, shutil
from easydict import EasyDict as edict
import yaml
from datasets.dataloader import get_dataloader, get_datasets
from models.pipeline import Pipeline
from lib.utils import setup_seed
from lib.tester import get_trainer
from models.loss import MatchMotionLoss
from lib.tictok import Timers
from configs.models import architectures
from torch import optim
setup_seed(0)
def join(loader, node):
seq = loader.construct_sequence(node)
return '_'.join([str(i) for i in seq])
yaml.add_constructor('!join', join)
if __name__ == '__main__':
# load configs
parser = argparse.ArgumentParser()
parser.add_argument('config', type=str, help= 'Path to the config file.')
args = parser.parse_args()
with open(args.config,'r') as f:
config = yaml.load(f, Loader=yaml.Loader)
config['snapshot_dir'] = 'snapshot/%s/%s' % (config['dataset']+config['folder'], config['exp_dir'])
config['tboard_dir'] = 'snapshot/%s/%s/tensorboard' % (config['dataset']+config['folder'], config['exp_dir'])
config['save_dir'] = 'snapshot/%s/%s/checkpoints' % (config['dataset']+config['folder'], config['exp_dir'])
config = edict(config)
os.makedirs(config.snapshot_dir, exist_ok=True)
os.makedirs(config.save_dir, exist_ok=True)
os.makedirs(config.tboard_dir, exist_ok=True)
if config.gpu_mode:
config.device = torch.device("cuda:0")
else:
config.device = torch.device('cpu')
# backup the
if config.mode == 'train':
os.system(f'cp -r models {config.snapshot_dir}')
os.system(f'cp -r configs {config.snapshot_dir}')
os.system(f'cp -r cpp_wrappers {config.snapshot_dir}')
os.system(f'cp -r datasets {config.snapshot_dir}')
os.system(f'cp -r kernels {config.snapshot_dir}')
os.system(f'cp -r lib {config.snapshot_dir}')
shutil.copy2('main.py',config.snapshot_dir)
# model initialization
config.kpfcn_config.architecture = architectures[config.dataset]
config.model = Pipeline(config)
# config.model = KPFCNN(config)
# create optimizer
if config.optimizer == 'SGD':
config.optimizer = optim.SGD(
config.model.parameters(),
lr=config.lr,
momentum=config.momentum,
weight_decay=config.weight_decay,
)
elif config.optimizer == 'ADAM':
config.optimizer = optim.Adam(
config.model.parameters(),
lr=config.lr,
betas=(0.9, 0.999),
weight_decay=config.weight_decay,
)
#create learning rate scheduler
if 'overfit' in config.exp_dir :
config.scheduler = optim.lr_scheduler.MultiStepLR(
config.optimizer,
milestones=[config.max_epoch-1], # fix lr during overfitting
gamma=0.1,
last_epoch=-1)
else:
config.scheduler = optim.lr_scheduler.ExponentialLR(
config.optimizer,
gamma=config.scheduler_gamma,
)
config.timers = Timers()
# create dataset and dataloader
train_set, val_set, test_set = get_datasets(config)
config.train_loader, neighborhood_limits = get_dataloader(train_set,config,shuffle=True)
config.val_loader, _ = get_dataloader(val_set, config, shuffle=False, neighborhood_limits=neighborhood_limits)
config.test_loader, _ = get_dataloader(test_set, config, shuffle=False, neighborhood_limits=neighborhood_limits)
# config.desc_loss = MetricLoss(config)
config.desc_loss = MatchMotionLoss (config['train_loss'])
trainer = get_trainer(config)
if(config.mode=='train'):
trainer.train()
else:
trainer.test()
| 3,723 | 32.54955 | 116 | py |
lepard | lepard-main/models/matching.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from models.position_encoding import VolumetricPositionEncoding as VolPE
def log_optimal_transport(scores, alpha, iters, src_mask, tgt_mask ):
b, m, n = scores.shape
if src_mask is None:
ms = m
ns = n
else :
ms = src_mask.sum(dim=1, keepdim=True)
ns = tgt_mask.sum(dim=1, keepdim=True)
bins0 = alpha.expand(b, m, 1)
bins1 = alpha.expand(b, 1, n)
alpha = alpha.expand(b, 1, 1)
Z = torch.cat([torch.cat([scores, bins0], -1),
torch.cat([bins1, alpha], -1)], 1)
norm = - (ms + ns).log() # [b, 1]
log_mu = torch.cat([norm .repeat(1, m), ns.log() + norm], dim=1)
log_nu = torch.cat([norm.repeat(1, n), ms.log() + norm], dim=1)
u, v = torch.zeros_like(log_mu), torch.zeros_like(log_nu)
for _ in range(iters):
u = log_mu - torch.logsumexp( Z + v.unsqueeze(1), dim=2)
v = log_nu - torch.logsumexp(Z + u.unsqueeze(2), dim=1)
Z= Z + u.unsqueeze(2) + v.unsqueeze(1)
Z = Z - norm.view(-1,1,1)
return Z
class Matching(nn.Module):
def __init__(self, config):
super().__init__()
self.match_type = config['match_type']
self.confidence_threshold = config['confidence_threshold']
d_model = config['feature_dim']
self.src_proj = nn.Linear(d_model, d_model, bias=False)
self.tgt_proj = nn.Linear(d_model, d_model, bias=False)
self.entangled= config['entangled']
if self.match_type == "dual_softmax":
self.temperature = config['dsmax_temperature']
elif self.match_type == 'sinkhorn':
#sinkhorn algorithm
self.skh_init_bin_score = config['skh_init_bin_score']
self.skh_iters = config['skh_iters']
self.skh_prefilter = config['skh_prefilter']
self.bin_score = nn.Parameter(
torch.tensor( self.skh_init_bin_score, requires_grad=True))
else:
raise NotImplementedError()
@staticmethod
@torch.no_grad()
def get_match( conf_matrix, thr, mutual=True):
mask = conf_matrix > thr
#mutual nearest
if mutual:
mask = mask \
* (conf_matrix == conf_matrix.max(dim=2, keepdim=True)[0]) \
* (conf_matrix == conf_matrix.max(dim=1, keepdim=True)[0])
#find all valid coarse matches
index = (mask==True).nonzero()
b_ind, src_ind, tgt_ind = index[:,0], index[:,1], index[:,2]
mconf = conf_matrix[b_ind, src_ind, tgt_ind]
return index, mconf, mask
@staticmethod
@torch.no_grad()
def get_topk_match( conf_matrix, thr, mutual=True):
mask = conf_matrix > thr
#mutual nearest
if mutual:
mask = mask \
* (conf_matrix == conf_matrix.max(dim=2, keepdim=True)[0]) \
* (conf_matrix == conf_matrix.max(dim=1, keepdim=True)[0])
#find all valid coarse matches
index = (mask==True).nonzero()
b_ind, src_ind, tgt_ind = index[:,0], index[:,1], index[:,2]
mconf = conf_matrix[b_ind, src_ind, tgt_ind]
return index, mconf, mask
def forward(self, src_feats, tgt_feats, src_pe, tgt_pe, src_mask, tgt_mask, data, pe_type="rotary"):
'''
@param src_feats: [B, S, C]
@param tgt_feats: [B, T, C]
@param src_mask: [B, S]
@param tgt_mask: [B, T]
@return:
'''
src_feats = self.src_proj(src_feats)
tgt_feats = self.src_proj(tgt_feats)
data["src_feats_nopos"] = src_feats
data["tgt_feats_nopos"] = tgt_feats
if not self.entangled :
src_feats = VolPE.embed_pos(pe_type, src_feats, src_pe)
tgt_feats = VolPE.embed_pos(pe_type, tgt_feats, tgt_pe)
data["src_feats"] = src_feats
data["tgt_feats"] = tgt_feats
src_feats, tgt_feats = map(lambda feat: feat / feat.shape[-1] ** .5,
[src_feats, tgt_feats])
if self.match_type == "dual_softmax":
# dual softmax matching
sim_matrix_1 = torch.einsum("bsc,btc->bst", src_feats, tgt_feats) / self.temperature
if src_mask is not None:
sim_matrix_2 = sim_matrix_1.clone()
sim_matrix_1.masked_fill_(~src_mask[:, :, None], float('-inf'))
sim_matrix_2.masked_fill_(~tgt_mask[:, None, :], float('-inf'))
conf_matrix = F.softmax(sim_matrix_1, 1) * F.softmax(sim_matrix_2, 2)
else :
conf_matrix = F.softmax(sim_matrix_1, 1) * F.softmax(sim_matrix_1, 2)
elif self.match_type == "sinkhorn" :
#optimal transport sinkhoron
sim_matrix = torch.einsum("bsc,btc->bst", src_feats, tgt_feats)
if src_mask is not None:
sim_matrix.masked_fill_(
~(src_mask[..., None] * tgt_mask[:, None]).bool(), float('-inf'))
log_assign_matrix = log_optimal_transport( sim_matrix, self.bin_score, self.skh_iters, src_mask, tgt_mask)
assign_matrix = log_assign_matrix.exp()
conf_matrix = assign_matrix[:, :-1, :-1].contiguous()
coarse_match, _, _ = self.get_match(conf_matrix, self.confidence_threshold)
return conf_matrix, coarse_match
| 5,412 | 29.931429 | 118 | py |
lepard | lepard-main/models/loss.py | import torch
import torch.nn as nn
import numpy as np
import open3d as o3d
from lib.benchmark_utils import to_o3d_pcd
from lib.visualization import *
import nibabel.quaternions as nq
from sklearn.metrics import precision_recall_fscore_support
from datasets.utils import blend_scene_flow, multual_nn_correspondence, knn_point_np
from models.matching import Matching as CM
def ransac_pose_estimation(src_pcd, tgt_pcd, corrs, distance_threshold=0.05, ransac_n=3):
src_pcd = to_o3d_pcd(src_pcd)
tgt_pcd = to_o3d_pcd(tgt_pcd)
corrs = o3d.utility.Vector2iVector(np.array(corrs).T)
result_ransac = o3d.registration.registration_ransac_based_on_correspondence(
source=src_pcd, target=tgt_pcd, corres=corrs,
max_correspondence_distance=distance_threshold,
estimation_method=o3d.registration.TransformationEstimationPointToPoint(False),
ransac_n=ransac_n,
criteria=o3d.registration.RANSACConvergenceCriteria(50000, 1000))
return result_ransac.transformation
def computeTransformationErr(trans, info):
"""
Computer the transformation error as an approximation of the RMSE of corresponding points.
More informaiton at http://redwood-data.org/indoor/registration.html
Args:
trans (numpy array): transformation matrices [n,4,4]
info (numpy array): covariance matrices of the gt transformation paramaters [n,4,4]
Returns:
p (float): transformation error
"""
t = trans[:3, 3]
r = trans[:3, :3]
q = nq.mat2quat(r)
er = np.concatenate([t, q[1:]], axis=0)
p = er.reshape(1, 6) @ info @ er.reshape(6, 1) / info[0, 0]
return p.item()
class MatchMotionLoss(nn.Module):
def __init__(self, config):
super().__init__()
self.focal_alpha = config['focal_alpha']
self.focal_gamma = config['focal_gamma']
self.pos_w = config['pos_weight']
self.neg_w = config['neg_weight']
self.mot_w = config['motion_weight']
self.mat_w = config['match_weight']
self.motion_loss_type = config['motion_loss_type']
self.match_type = config['match_type']
self.positioning_type = config['positioning_type']
self.registration_threshold = config['registration_threshold']
self.confidence_threshold_metric = config['confidence_threshold_metric']
self.inlier_thr = config['inlier_thr']
self.fmr_thr = config['fmr_thr']
self.mutual_nearest = config['mutual_nearest']
self.dataset = config['dataset']
def forward(self, data):
loss_info = {}
loss = self.ge_coarse_loss(data, loss_info)
loss_info.update({ 'loss': loss })
return loss_info
def ge_coarse_loss(self, data, loss_info, eval_metric=False):
if self.dataset == "4dmatch":
s2t_flow = torch.zeros_like(data['s_pcd'])
for i, cflow in enumerate(data['coarse_flow']):
s2t_flow[i][: len(cflow)] = cflow
loss = 0.
src_mask = data['src_mask']
tgt_mask = data['tgt_mask']
conf_matrix_pred = data['conf_matrix_pred']
match_gt = data['coarse_matches']
R_s2t_gt = data['batched_rot']
t_s2t_gt = data['batched_trn']
#get the overlap mask, for dense motion loss
s_overlap_mask = torch.zeros_like(src_mask).bool()
for bi, corr in enumerate (match_gt):
s_overlap_mask[bi][ corr[0] ] = True
# compute focal loss
c_weight = (src_mask[:, :, None] * tgt_mask[:, None, :]).float()
conf_matrix_gt = self.match_2_conf_matrix(match_gt, conf_matrix_pred)
data['conf_matrix_gt'] = conf_matrix_gt
focal_coarse = self.compute_correspondence_loss(conf_matrix_pred, conf_matrix_gt, weight=c_weight)
recall, precision = self.compute_match_recall( conf_matrix_gt, data['coarse_match_pred'])
loss_info.update( { "focal_coarse": focal_coarse, "recall_coarse": recall, "precision_coarse": precision } )
loss = loss + self.mat_w * focal_coarse
if recall > 0.01 and self.mot_w > 0:
R_s2t_pred = data["R_s2t_pred"]
t_s2t_pred = data["t_s2t_pred"]
#compute predicted flow. Note, if 4dmatch, the R_pred,t_pred try to find the best rigid fit of deformation
src_pcd_wrapped_pred = (torch.matmul(R_s2t_pred, data['s_pcd'].transpose(1, 2)) + t_s2t_pred).transpose(1, 2)
sflow_pred = src_pcd_wrapped_pred - data['s_pcd']
if self.dataset == '4dmatch':
spcd_deformed = data['s_pcd'] + s2t_flow
src_pcd_wrapped_gt = (torch.matmul(R_s2t_gt, spcd_deformed.transpose(1, 2)) + t_s2t_gt).transpose(1, 2)
else : # 3dmatch
src_pcd_wrapped_gt = (torch.matmul(R_s2t_gt, data['s_pcd'].transpose(1, 2)) + t_s2t_gt).transpose(1, 2)
sflow_gt = src_pcd_wrapped_gt - data['s_pcd']
e1 = torch.sum(torch.abs(sflow_pred - sflow_gt), 2)
e1 = e1[s_overlap_mask] # [data['src_mask']]
l1_loss = torch.mean(e1)
loss = loss + self.mot_w * l1_loss
#
# if eval_metric :
#
# match_pred, _, _ = CM.get_match(data['conf_matrix_pred'], thr=self.confidence_threshold_metric, mutual=self.mutual_nearest)
#
# '''Inlier Ratio (IR)'''
# ir = self.compute_inlier_ratio(match_pred, data, self.inlier_thr,
# s2t_flow=s2t_flow if self.dataset == "4dmatch" else None)
# loss_info.update({"Inlier Ratio": ir.mean()})
#
# if self.dataset == '3dmatch':
#
# '''Feature Matching Recall (FMR)'''
# fmr = (ir > self.fmr_thr).float().sum() / len(ir)
# loss_info.update({"Feature Matching Recall": fmr})
#
# '''Registration Recall (RR)'''
# rot_, trn_ = self.ransac_regist_coarse(data['s_pcd'], data['t_pcd'], src_mask, tgt_mask , match_pred)
# rot, trn = rot_.to(data['s_pcd']) , trn_.to(data['s_pcd'])
# rr = self.compute_registration_recall(rot, trn, data, self.registration_threshold)
# loss_info.update({'Registration_Recall': rr})
if self.positioning_type == "procrustes":
for layer_ind in data["position_layers"]:
# compute focal loss
rpe_conf_matrix = data["position_layers"][layer_ind]["conf_matrix"]
focal_rpe = self.compute_correspondence_loss(rpe_conf_matrix, conf_matrix_gt, weight=c_weight)
recall, precision = self.compute_match_recall(conf_matrix_gt,
data["position_layers"][layer_ind]['match_pred'])
# loss_info.update({'focal_layer_%d' % layer_ind: focal_rpe, 'recall_layer_%d' % layer_ind: recall,
# 'precision_layer_%d' % layer_ind: precision})
loss = loss + self.mat_w * focal_rpe
if recall >0.01 and self.mot_w > 0:
R_s2t_pred = data["position_layers"][layer_ind]["R_s2t_pred"]
t_s2t_pred = data["position_layers"][layer_ind]["t_s2t_pred"]
src_pcd_wrapped_pred = (torch.matmul(R_s2t_pred, data['s_pcd'].transpose(1, 2)) + t_s2t_pred).transpose(1, 2)
sflow_pred = src_pcd_wrapped_pred - data['s_pcd']
if self.dataset == '4dmatch':
spcd_deformed = data['s_pcd'] + s2t_flow
src_pcd_wrapped_gt = ( torch.matmul(R_s2t_gt, spcd_deformed.transpose(1, 2)) + t_s2t_gt).transpose(1, 2)
else: # 3dmatch
src_pcd_wrapped_gt = ( torch.matmul(R_s2t_gt, data['s_pcd'].transpose(1, 2)) + t_s2t_gt).transpose(1, 2)
sflow_gt = src_pcd_wrapped_gt - data['s_pcd']
e1 = torch.sum(torch.abs(sflow_pred - sflow_gt), 2) #[data['src_mask']]
e1 = e1[s_overlap_mask] # [data['src_mask']]
l1_loss = torch.mean(e1)
loss = loss + self.mot_w * l1_loss
return loss
@staticmethod
def compute_nrfmr(match_pred, data, recall_thr=0.04):
s_pcd, t_pcd = data['s_pcd'], data['t_pcd']
s_pcd_raw = data['src_pcd_list']
sflow_list = data['sflow_list']
metric_index_list = data['metric_index_list']
batched_rot = data['batched_rot'] # B,3,3
batched_trn = data['batched_trn']
nrfmr = 0.
for i in range(len(s_pcd_raw)):
# use the match prediction as the motion anchor
match_pred_i = match_pred[match_pred[:, 0] == i]
s_id, t_id = match_pred_i[:, 1], match_pred_i[:, 2]
s_pcd_matched = s_pcd[i][s_id]
t_pcd_matched = t_pcd[i][t_id]
motion_pred = t_pcd_matched - s_pcd_matched
if len(s_pcd_matched) >= 3 :
# get the wrapped metric points
metric_index = metric_index_list[i]
sflow = sflow_list[i]
s_pcd_raw_i = s_pcd_raw[i]
metric_pcd = s_pcd_raw_i[metric_index]
metric_sflow = sflow[metric_index]
metric_pcd_deformed = metric_pcd + metric_sflow
metric_pcd_wrapped_gt = (torch.matmul(batched_rot[i], metric_pcd_deformed.T) + batched_trn[i]).T
# blend the motion for metric points
try:
metric_motion_pred, valid_mask = MatchMotionLoss.blend_anchor_motion(
metric_pcd.cpu().numpy(), s_pcd_matched.cpu().numpy(), motion_pred.cpu().numpy(), knn=3,
search_radius=0.1)
metric_pcd_wrapped_pred = metric_pcd + torch.from_numpy(metric_motion_pred).to(metric_pcd)
dist = torch.sqrt(torch.sum((metric_pcd_wrapped_pred - metric_pcd_wrapped_gt) ** 2, dim=1))
r = (dist < recall_thr).float().sum() / len(dist)
except :
r = 0
nrfmr = nrfmr + r
debug = False
if debug:
import mayavi.mlab as mlab
c_red = (224. / 255., 0 / 255., 125 / 255.)
c_pink = (224. / 255., 75. / 255., 232. / 255.)
c_blue = (0. / 255., 0. / 255., 255. / 255.)
scale_factor = 0.013
metric_pcd_wrapped_gt = metric_pcd_wrapped_gt.cpu()
metric_pcd_wrapped_pred = metric_pcd_wrapped_pred.cpu()
err = metric_pcd_wrapped_pred - metric_pcd_wrapped_gt
mlab.points3d(metric_pcd_wrapped_gt[:, 0], metric_pcd_wrapped_gt[:, 1], metric_pcd_wrapped_gt[:, 2],
scale_factor=scale_factor, color=c_pink)
mlab.points3d(metric_pcd_wrapped_pred[:, 0], metric_pcd_wrapped_pred[:, 1],
metric_pcd_wrapped_pred[:, 2], scale_factor=scale_factor, color=c_blue)
mlab.quiver3d(metric_pcd_wrapped_gt[:, 0], metric_pcd_wrapped_gt[:, 1], metric_pcd_wrapped_gt[:, 2],
err[:, 0], err[:, 1], err[:, 2],
scale_factor=1, mode='2ddash', line_width=1.)
mlab.show()
nrfmr = nrfmr / len(s_pcd_raw)
return nrfmr
@staticmethod
def blend_anchor_motion(query_loc, reference_loc, reference_flow, knn=3, search_radius=0.1):
'''approximate flow on query points
this function assume query points are sub- or un-sampled from reference locations
@param query_loc:[m,3]
@param reference_loc:[n,3]
@param reference_flow:[n,3]
@param knn:
@return:
blended_flow:[m,3]
'''
dists, idx = knn_point_np(knn, reference_loc, query_loc)
dists[dists < 1e-10] = 1e-10
mask = dists > search_radius
dists[mask] = 1e+10
weight = 1.0 / dists
weight = weight / np.sum(weight, -1, keepdims=True) # [B,N,3]
blended_flow = np.sum(reference_flow[idx] * weight.reshape([-1, knn, 1]), axis=1, keepdims=False)
mask = mask.sum(axis=1) < 3
return blended_flow, mask
def compute_correspondence_loss(self, conf, conf_gt, weight=None):
'''
@param conf: [B, L, S]
@param conf_gt: [B, L, S]
@param weight: [B, L, S]
@return:
'''
pos_mask = conf_gt == 1
neg_mask = conf_gt == 0
pos_w, neg_w = self.pos_w, self.neg_w
#corner case assign a wrong gt
if not pos_mask.any():
pos_mask[0, 0, 0] = True
if weight is not None:
weight[0, 0, 0] = 0.
pos_w = 0.
if not neg_mask.any():
neg_mask[0, 0, 0] = True
if weight is not None:
weight[0, 0, 0] = 0.
neg_w = 0.
# focal loss
conf = torch.clamp(conf, 1e-6, 1 - 1e-6)
alpha = self.focal_alpha
gamma = self.focal_gamma
if self.match_type == "dual_softmax":
pos_conf = conf[pos_mask]
loss_pos = - alpha * torch.pow(1 - pos_conf, gamma) * pos_conf.log()
if weight is not None:
loss_pos = loss_pos * weight[pos_mask]
loss = pos_w * loss_pos.mean()
return loss
elif self.match_type == "sinkhorn":
# no supervision on dustbin row & column.
loss_pos = - alpha * torch.pow(1 - conf[pos_mask], gamma) * (conf[pos_mask]).log()
loss_neg = - alpha * torch.pow(conf[neg_mask], gamma) * (1 - conf[neg_mask]).log()
loss = pos_w * loss_pos.mean() + neg_w * loss_neg.mean()
return loss
def match_2_conf_matrix(self, matches_gt, matrix_pred):
matrix_gt = torch.zeros_like(matrix_pred)
for b, match in enumerate (matches_gt) :
matrix_gt [ b][ match[0], match[1] ] = 1
return matrix_gt
@staticmethod
def compute_match_recall(conf_matrix_gt, match_pred) : #, s_pcd, t_pcd, search_radius=0.3):
'''
@param conf_matrix_gt:
@param match_pred:
@return:
'''
pred_matrix = torch.zeros_like(conf_matrix_gt)
b_ind, src_ind, tgt_ind = match_pred[:, 0], match_pred[:, 1], match_pred[:, 2]
pred_matrix[b_ind, src_ind, tgt_ind] = 1.
true_positive = (pred_matrix == conf_matrix_gt) * conf_matrix_gt
recall = true_positive.sum() / conf_matrix_gt.sum()
precision = true_positive.sum() / max(len(match_pred), 1)
return recall, precision
@staticmethod
def ransac_regist_coarse(batched_src_pcd, batched_tgt_pcd, src_mask, tgt_mask, match_pred ):
s_len = src_mask.sum(dim=1).int()
t_len = tgt_mask.sum(dim=1).int()
bsize = len(batched_src_pcd)
batched_src_pcd = MatchMotionLoss.tensor2numpy( batched_src_pcd)
batched_tgt_pcd = MatchMotionLoss.tensor2numpy( batched_tgt_pcd)
match_pred = MatchMotionLoss.tensor2numpy(match_pred)
rot = []
trn = []
for i in range(bsize):
s_pcd = batched_src_pcd[i][:s_len[i]]
t_pcd = batched_tgt_pcd[i][:t_len[i]]
pair_i = match_pred[:, 0] == i
n_pts = pair_i.sum()
if n_pts < 3 :
rot.append(torch.eye(3))
trn.append(torch.zeros((3,1)))
continue
ind = match_pred[pair_i]
s_ind, t_ind = ind[:, 1], ind[:, 2]
pose = ransac_pose_estimation(s_pcd, t_pcd, [s_ind, t_ind], distance_threshold=0.05)
pose = pose.copy()
rot.append(torch.from_numpy(pose[:3,:3]))
trn.append(torch.from_numpy(pose[:3,3:]))
return torch.stack(rot, dim=0 ), torch.stack(trn , dim=0)#ndarray
@staticmethod
def compute_inlier_ratio(match_pred, data, inlier_thr, s2t_flow=None):
s_pcd, t_pcd = data['s_pcd'], data['t_pcd'] #B,N,3
batched_rot = data['batched_rot'] #B,3,3
batched_trn = data['batched_trn']
if s2t_flow is not None: # 4dmatch
s_pcd_deformed = s_pcd + s2t_flow
s_pcd_wrapped = (torch.matmul(batched_rot, s_pcd_deformed.transpose(1, 2)) + batched_trn).transpose(1,2)
else: # 3dmatch
s_pcd_wrapped = (torch.matmul(batched_rot, s_pcd.transpose(1, 2)) + batched_trn).transpose(1,2)
s_pcd_matched = s_pcd_wrapped [match_pred[:,0], match_pred[:,1]]
t_pcd_matched = t_pcd [match_pred[:,0], match_pred[:,2]]
inlier = torch.sum( (s_pcd_matched - t_pcd_matched)**2 , dim= 1) < inlier_thr**2
bsize = len(s_pcd)
IR=[]
for i in range(bsize):
pair_i = match_pred[:, 0] == i
n_match = pair_i.sum()
inlier_i = inlier[pair_i]
n_inlier = inlier_i.sum().float()
if n_match <3:
IR.append( n_match.float()*0)
else :
IR.append(n_inlier/n_match)
return torch.stack(IR, dim=0)
@staticmethod
def compute_registration_recall(R_est, t_est, data, thr=0.2):
bs = len(R_est)
success = 0.
if data['gt_cov'] is not None:
err2 = thr ** 2
gt = np.zeros( (bs, 4, 4))
gt[:, -1,-1] = 1
gt[:, :3, :3] = data['batched_rot'].cpu().numpy()
gt[:, :3, 3:] = data['batched_trn'].cpu().numpy()
pred = np.zeros((bs, 4, 4))
pred[:, -1, -1] = 1
pred[:, :3, :3] = R_est.detach().cpu().numpy()
pred[:, :3, 3:] = t_est.detach().cpu().numpy()
for i in range(bs):
p = computeTransformationErr( np.linalg.inv(gt[i]) @ pred[i], data['gt_cov'][i])
if p <= err2:
success += 1
rr = success / bs
return rr
else :
return 0.
@staticmethod
def tensor2numpy(tensor):
if tensor.requires_grad:
tensor=tensor.detach()
return tensor.cpu().numpy() | 18,271 | 38.042735 | 137 | py |
lepard | lepard-main/models/position_encoding.py | import math
import torch
from torch import nn
class VolumetricPositionEncoding(nn.Module):
def __init__(self, config):
super().__init__()
self.feature_dim = config.feature_dim
self.vol_bnds = config.vol_bnds
self.voxel_size = config.voxel_size
self.vol_origin = self.vol_bnds[0]
self.pe_type = config.pe_type
def voxelize(self, xyz):
'''
@param xyz: B,N,3
@return: B,N,3
'''
if type ( self.vol_origin ) == list :
self.vol_origin = torch.FloatTensor(self.vol_origin ).view(1, 1, -1).to( xyz.device )
return (xyz - self.vol_origin) / self.voxel_size
@staticmethod
def embed_rotary(x, cos, sin):
'''
@param x: [B,N,d]
@param cos: [B,N,d] [θ0,θ0,θ1,θ1,θ2,θ2......θd/2-1,θd/2-1]
@param sin: [B,N,d] [θ0,θ0,θ1,θ1,θ2,θ2......θd/2-1,θd/2-1]
@return:
'''
x2 = torch.stack([-x[..., 1::2], x[..., ::2]], dim=-1).reshape_as(x).contiguous()
x = x * cos + x2 * sin
return x
@staticmethod
def embed_pos(pe_type, x, pe):
""" combine feature and position code
"""
if pe_type == 'rotary':
return VolumetricPositionEncoding.embed_rotary(x, pe[..., 0], pe[..., 1])
elif pe_type == 'sinusoidal':
return x + pe
else:
raise KeyError()
def forward(self, XYZ):
'''
@param XYZ: [B,N,3]
@return:
'''
bsize, npoint, _ = XYZ.shape
vox = self.voxelize( XYZ)
x_position, y_position, z_position = vox[..., 0:1], vox[...,1:2], vox[...,2:3]
div_term = torch.exp( torch.arange(0, self.feature_dim // 3, 2, dtype=torch.float, device=XYZ.device) * (-math.log(10000.0) / (self.feature_dim // 3)))
div_term = div_term.view( 1,1, -1) # [1, 1, d//6]
sinx = torch.sin(x_position * div_term) # [B, N, d//6]
cosx = torch.cos(x_position * div_term)
siny = torch.sin(y_position * div_term)
cosy = torch.cos(y_position * div_term)
sinz = torch.sin(z_position * div_term)
cosz = torch.cos(z_position * div_term)
if self.pe_type == 'sinusoidal' :
position_code = torch.cat( [ sinx, cosx, siny, cosy, sinz, cosz] , dim=-1 )
elif self.pe_type == "rotary" :
# sin/cos [θ0,θ1,θ2......θd/6-1] -> sin/cos [θ0,θ0,θ1,θ1,θ2,θ2......θd/6-1,θd/6-1]
sinx, cosx, siny, cosy, sinz, cosz = map( lambda feat:torch.stack([feat, feat], dim=-1).view(bsize, npoint, -1),
[ sinx, cosx, siny, cosy, sinz, cosz] )
sin_pos = torch.cat([sinx,siny,sinz], dim=-1)
cos_pos = torch.cat([cosx,cosy,cosz], dim=-1)
position_code = torch.stack( [cos_pos, sin_pos] , dim=-1)
else:
raise KeyError()
if position_code.requires_grad:
position_code = position_code.detach()
return position_code | 2,989 | 33.367816 | 160 | py |
lepard | lepard-main/models/backbone.py | from models.blocks import *
import torch.nn.functional as F
import numpy as np
class KPFCN(nn.Module):
def __init__(self, config):
super(KPFCN, self).__init__()
############
# Parameters
############
layer = 0
r = config.first_subsampling_dl * config.conv_radius
in_dim = config.in_feats_dim
out_dim = config.first_feats_dim
#####################
# List Encoder blocks
#####################
self.encoder_blocks = nn.ModuleList()
self.encoder_skip_dims = []
self.encoder_skips = []
# Loop over consecutive blocks
for block_i, block in enumerate(config.architecture):
# Check equivariance
if ('equivariant' in block) and (not out_dim % 3 == 0):
raise ValueError('Equivariant block but features dimension is not a factor of 3')
# Detect change to next layer for skip connection
if np.any([tmp in block for tmp in ['pool', 'strided', 'upsample', 'global']]):
self.encoder_skips.append(block_i)
self.encoder_skip_dims.append(in_dim)
# Detect upsampling block to stop
if 'upsample' in block:
break
# Apply the good block function defining tf ops
self.encoder_blocks.append(block_decider(block,
r,
in_dim,
out_dim,
layer,
config))
# Update dimension of input from output
if 'simple' in block:
in_dim = out_dim // 2
else:
in_dim = out_dim
# Detect change to a subsampled layer
if 'pool' in block or 'strided' in block:
# Update radius and feature dimension for next layer
layer += 1
r *= 2
out_dim *= 2
#####################
# bottleneck output & input layer
self.coarse_out = nn.Conv1d(in_dim//2, config.coarse_feature_dim, kernel_size=1, bias=True)
coarse_in_dim = config.coarse_feature_dim
self.coarse_in = nn.Conv1d(coarse_in_dim, in_dim//2, kernel_size=1, bias=True)
#####################
# List Decoder blocks
#####################
# Save all block operations in a list of modules
self.decoder_blocks = nn.ModuleList()
self.decoder_concats = []
# Find first upsampling block
start_i = 0
for block_i, block in enumerate(config.architecture):
if 'upsample' in block:
start_i = block_i
break
# Loop over consecutive blocks
for block_i, block in enumerate(config.architecture[start_i:]):
# Add dimension of skip connection concat
if block_i > 0 and 'upsample' in config.architecture[start_i + block_i - 1]:
in_dim += self.encoder_skip_dims[layer]
self.decoder_concats.append(block_i)
# Apply the good block function defining tf ops
self.decoder_blocks.append(block_decider(block,
r,
in_dim,
out_dim,
layer,
config))
# Update dimension of input from output
in_dim = out_dim
# Detect change to a subsampled layer
if 'upsample' in block:
# Update radius and feature dimension for next layer
layer -= 1
r *= 0.5
out_dim = out_dim // 2
#####################
# fine output layer
#####################
fine_feature_dim = config.fine_feature_dim
self.fine_out = nn.Conv1d(out_dim, fine_feature_dim, kernel_size=1, bias=True)
def forward(self, batch, phase = 'encode'):
# Get input features
if phase == 'coarse' :
x = batch['features'].clone().detach()
# 1. joint encoder part
self.skip_x = []
for block_i, block_op in enumerate(self.encoder_blocks):
if block_i in self.encoder_skips:
self.skip_x.append(x)
x = block_op(x, batch) # [N,C]
for block_i, block_op in enumerate(self.decoder_blocks):
if block_i in self.decoder_concats:
x = torch.cat([x, self.skip_x.pop()], dim=1)
x = block_op(x, batch)
if block_i == 1 :
coarse_feats = x.transpose(0,1).unsqueeze(0) #[B, C, N]
coarse_feats = self.coarse_out(coarse_feats) #[B, C, N]
coarse_feats = coarse_feats.transpose(1,2).squeeze(0)
return coarse_feats #[N,C2]
#
# elif phase == "fine":
#
# coarse_feats = batch['coarse_feats']
# coarse_feats = coarse_feats.transpose(0,1).unsqueeze(0)
# coarse_feats = self.coarse_in(coarse_feats)
# x = coarse_feats.transpose(1,2).squeeze(0)
#
#
# for block_i, block_op in enumerate(self.decoder_blocks):
# if block_i > 1 :
# if block_i in self.decoder_concats:
# x = torch.cat([x, self.skip_x.pop()], dim=1)
# x = block_op(x, batch)
#
# fine_feats = x.transpose(0, 1).unsqueeze(0) # [1, C, N]
# fine_feats = self.fine_out(fine_feats) # [1, C, N]
# fine_feats = fine_feats.transpose(1, 2).squeeze(0)
#
# return fine_feats | 6,033 | 36.018405 | 100 | py |
lepard | lepard-main/models/transformer.py | import copy
import math
import torch
from torch import nn
from torch.nn import Module, Dropout
from models.position_encoding import VolumetricPositionEncoding as VolPE
from models.matching import Matching
from models.procrustes import SoftProcrustesLayer
import numpy as np
import random
from scipy.spatial.transform import Rotation
class GeometryAttentionLayer(nn.Module):
def __init__(self, config):
super(GeometryAttentionLayer, self).__init__()
d_model = config['feature_dim']
nhead = config['n_head']
self.dim = d_model // nhead
self.nhead = nhead
self.pe_type = config['pe_type']
# multi-head attention
self.q_proj = nn.Linear(d_model, d_model, bias=False)
self.k_proj = nn.Linear(d_model, d_model, bias=False)
self.v_proj = nn.Linear(d_model, d_model, bias=False)
# self.attention = Attention() #LinearAttention() if attention == 'linear' else FullAttention()
self.merge = nn.Linear(d_model, d_model, bias=False)
# feed-forward network
self.mlp = nn.Sequential(
nn.Linear(d_model*2, d_model*2, bias=False),
nn.ReLU(True),
nn.Linear(d_model*2, d_model, bias=False),
)
# norm and dropout
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
def forward(self, x, source, x_pe, source_pe, x_mask=None, source_mask=None):
bs = x.size(0)
q, k, v = x, source, source
qp, kvp = x_pe, source_pe
q_mask, kv_mask = x_mask, source_mask
if self.pe_type == 'sinusoidal':
#w(x+p), attention is all you need : https://arxiv.org/abs/1706.03762
if qp is not None: # disentangeld
q = q + qp
k = k + kvp
qw = self.q_proj(q).view(bs, -1, self.nhead, self.dim) # [N, L, (H, D)]
kw = self.k_proj(k).view(bs, -1, self.nhead, self.dim) # [N, S, (H, D)]
vw = self.v_proj(v).view(bs, -1, self.nhead, self.dim)
elif self.pe_type == 'rotary':
#Rwx roformer : https://arxiv.org/abs/2104.09864
qw = self.q_proj(q)
kw = self.k_proj(k)
vw = self.v_proj(v)
if qp is not None: # disentangeld
q_cos, q_sin = qp[...,0] ,qp[...,1]
k_cos, k_sin = kvp[...,0],kvp[...,1]
qw = VolPE.embed_rotary(qw, q_cos, q_sin)
kw = VolPE.embed_rotary(kw, k_cos, k_sin)
qw = qw.view(bs, -1, self.nhead, self.dim)
kw = kw.view(bs, -1, self.nhead, self.dim)
vw = vw.view(bs, -1, self.nhead, self.dim)
else:
raise KeyError()
# attention
a = torch.einsum("nlhd,nshd->nlsh", qw, kw)
if kv_mask is not None:
a.masked_fill_( q_mask[:, :, None, None] * (~kv_mask[:, None, :, None]), float('-inf'))
a = a / qw.size(3) **0.5
a = torch.softmax(a, dim=2)
o = torch.einsum("nlsh,nshd->nlhd", a, vw).contiguous() # [N, L, (H, D)]
message = self.merge(o.view(bs, -1, self.nhead*self.dim)) # [N, L, C]
message = self.norm1(message)
# feed-forward network
message = self.mlp(torch.cat([x, message], dim=2))
message = self.norm2(message)
e = x + message
return e
class RepositioningTransformer(nn.Module):
def __init__(self, config):
super(RepositioningTransformer, self).__init__()
self.d_model = config['feature_dim']
self.nhead = config['n_head']
self.layer_types = config['layer_types']
self.positioning_type = config['positioning_type']
self.pe_type =config['pe_type']
self.entangled= config['entangled']
self.positional_encoding = VolPE(config)
encoder_layer = GeometryAttentionLayer (config)
self.layers = nn.ModuleList()
for l_type in self.layer_types:
if l_type in ['self','cross']:
self.layers.append( copy.deepcopy(encoder_layer))
elif l_type == "positioning":
if self.positioning_type == 'procrustes':
positioning_layer = nn.ModuleList()
positioning_layer.append( Matching(config['feature_matching']))
positioning_layer.append( SoftProcrustesLayer(config['procrustes']) )
self.layers.append(positioning_layer)
elif self.positioning_type in ['oracle', 'randSO3']:
self.layers.append( None)
else :
raise KeyError(self.positioning_type + " undefined positional encoding type")
else:
raise KeyError()
self._reset_parameters()
def forward(self, src_feat, tgt_feat, s_pcd, t_pcd, src_mask, tgt_mask, data, T = None, timers = None):
self.timers = timers
assert self.d_model == src_feat.size(2), "the feature number of src and transformer must be equal"
if T is not None:
R, t = T
src_pcd_wrapped = (torch.matmul(R, s_pcd.transpose(1, 2)) + t).transpose(1, 2)
tgt_pcd_wrapped = t_pcd
else:
src_pcd_wrapped = s_pcd
tgt_pcd_wrapped = t_pcd
src_pe = self.positional_encoding( src_pcd_wrapped)
tgt_pe = self.positional_encoding( tgt_pcd_wrapped)
if not self.entangled:
position_layer = 0
data.update({"position_layers":{}})
for layer, name in zip(self.layers, self.layer_types) :
if name == 'self':
if self.timers: self.timers.tic('self atten')
src_feat = layer(src_feat, src_feat, src_pe, src_pe, src_mask, src_mask,)
tgt_feat = layer(tgt_feat, tgt_feat, tgt_pe, tgt_pe, tgt_mask, tgt_mask)
if self.timers: self.timers.toc('self atten')
elif name == 'cross':
if self.timers: self.timers.tic('cross atten')
src_feat = layer(src_feat, tgt_feat, src_pe, tgt_pe, src_mask, tgt_mask)
tgt_feat = layer(tgt_feat, src_feat, tgt_pe, src_pe, tgt_mask, src_mask)
if self.timers: self.timers.toc('cross atten')
elif name =='positioning':
if self.positioning_type == 'procrustes':
conf_matrix, match_pred = layer[0](src_feat, tgt_feat, src_pe, tgt_pe, src_mask, tgt_mask, data, pe_type=self.pe_type)
position_layer += 1
data["position_layers"][position_layer] = {"conf_matrix": conf_matrix, "match_pred": match_pred}
if self.timers: self.timers.tic('procrustes_layer')
R, t, R_forwd, t_forwd, condition, solution_mask = layer[1] (conf_matrix, s_pcd, t_pcd, src_mask, tgt_mask)
if self.timers: self.timers.toc('procrustes_layer')
data["position_layers"][position_layer].update({
"R_s2t_pred": R,"t_s2t_pred": t, "solution_mask": solution_mask, "condition": condition})
src_pcd_wrapped = (torch.matmul(R_forwd, s_pcd.transpose(1, 2)) + t_forwd).transpose(1, 2)
tgt_pcd_wrapped = t_pcd
src_pe = self.positional_encoding(src_pcd_wrapped)
tgt_pe = self.positional_encoding(tgt_pcd_wrapped)
elif self.positioning_type == 'randSO3':
src_pcd_wrapped = self.rand_rot_pcd( s_pcd, src_mask)
tgt_pcd_wrapped = t_pcd
src_pe = self.positional_encoding(src_pcd_wrapped)
tgt_pe = self.positional_encoding(tgt_pcd_wrapped)
elif self.positioning_type == 'oracle':
#Note R,t ground truth is only available for computing oracle position encoding
rot_gt = data['batched_rot']
trn_gt = data['batched_trn']
src_pcd_wrapped = (torch.matmul(rot_gt, s_pcd.transpose(1, 2)) + trn_gt).transpose(1, 2)
tgt_pcd_wrapped = t_pcd
src_pe = self.positional_encoding(src_pcd_wrapped)
tgt_pe = self.positional_encoding(tgt_pcd_wrapped)
else:
raise KeyError(self.positioning_type + " undefined positional encoding type")
else :
raise KeyError
return src_feat, tgt_feat, src_pe, tgt_pe
else : # pos. fea. entangeled
position_layer = 0
data.update({"position_layers":{}})
src_feat = VolPE.embed_pos(self.pe_type, src_feat, src_pe)
tgt_feat = VolPE.embed_pos(self.pe_type, tgt_feat, tgt_pe)
for layer, name in zip(self.layers, self.layer_types):
if name == 'self':
if self.timers: self.timers.tic('self atten')
src_feat = layer(src_feat, src_feat, None, None, src_mask, src_mask, )
tgt_feat = layer(tgt_feat, tgt_feat, None, None, tgt_mask, tgt_mask)
if self.timers: self.timers.toc('self atten')
elif name == 'cross':
if self.timers: self.timers.tic('cross atten')
src_feat = layer(src_feat, tgt_feat, None, None, src_mask, tgt_mask)
tgt_feat = layer(tgt_feat, src_feat, None, None, tgt_mask, src_mask)
if self.timers: self.timers.toc('cross atten')
elif name == 'positioning':
pass
return src_feat, tgt_feat, src_pe, tgt_pe
def rand_rot_pcd (self, pcd, mask):
'''
@param pcd: B, N, 3
@param mask: B, N
@return:
'''
pcd[~mask]=0.
N = mask.shape[1]
n_points = mask.sum(dim=1, keepdim=True).view(-1,1,1)
bs = pcd.shape[0]
euler_ab = np.random.rand(bs, 3) * np.pi * 2 # anglez, angley, anglex
rand_rot = torch.from_numpy( Rotation.from_euler('zyx', euler_ab).as_matrix() ).to(pcd)
pcd_u = pcd.mean(dim=1, keepdim=True) * N / n_points
pcd_centered = pcd - pcd_u
pcd_rand_rot = torch.matmul( rand_rot, pcd_centered.transpose(1,2) ).transpose(1,2) + pcd_u
return pcd_rand_rot
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p) | 10,666 | 36.559859 | 142 | py |
lepard | lepard-main/models/procrustes.py | import torch
import torch.nn as nn
def topk(data, num_topk):
sort, idx = data.sort(descending=True)
return sort[:num_topk], idx[:num_topk]
class SoftProcrustesLayer(nn.Module):
def __init__(self, config):
super(SoftProcrustesLayer, self).__init__()
self.sample_rate = config.sample_rate
self.max_condition_num= config.max_condition_num
@staticmethod
def batch_weighted_procrustes( X, Y, w, eps=0.0001):
'''
@param X: source frame [B, N,3]
@param Y: target frame [B, N,3]
@param w: weights [B, N,1]
@param eps:
@return:
'''
# https://ieeexplore.ieee.org/document/88573
bsize = X.shape[0]
device = X.device
W1 = torch.abs(w).sum(dim=1, keepdim=True)
w_norm = w / (W1 + eps)
mean_X = (w_norm * X).sum(dim=1, keepdim=True)
mean_Y = (w_norm * Y).sum(dim=1, keepdim=True)
Sxy = torch.matmul( (Y - mean_Y).transpose(1,2), w_norm * (X - mean_X) )
Sxy = Sxy.cpu().double()
U, D, V = Sxy.svd() # small SVD runs faster on cpu
condition = D.max(dim=1)[0] / D.min(dim=1)[0]
S = torch.eye(3)[None].repeat(bsize,1,1).double()
UV_det = U.det() * V.det()
S[:, 2:3, 2:3] = UV_det.view(-1, 1,1)
svT = torch.matmul( S, V.transpose(1,2) )
R = torch.matmul( U, svT).float().to(device)
t = mean_Y.transpose(1,2) - torch.matmul( R, mean_X.transpose(1,2) )
return R, t, condition
def forward(self, conf_matrix, src_pcd, tgt_pcd, src_mask, tgt_mask):
'''
@param conf_matrix:
@param src_pcd:
@param tgt_pcd:
@param src_mask:
@param tgt_mask:
@return:
'''
bsize, N, M = conf_matrix.shape
# subsample correspondence
src_len = src_mask.sum(dim=1)
tgt_len = tgt_mask.sum(dim=1)
entry_max, _ = torch.stack([src_len,tgt_len], dim=0).max(dim=0)
entry_max = (entry_max * self.sample_rate).int()
sample_n_points = entry_max.float().mean().int() #entry_max.max()
conf, idx = conf_matrix.view(bsize, -1).sort(descending=True,dim=1)
w = conf [:, :sample_n_points]
idx= idx[:, :sample_n_points]
idx_src = idx//M #torch.div(idx, M, rounding_mode='trunc')
idx_tgt = idx%M
b_index = torch.arange(bsize).view(-1, 1).repeat((1, sample_n_points)).view(-1)
src_pcd_sampled = src_pcd[b_index, idx_src.view(-1)].view(bsize, sample_n_points, -1)
tgt_pcd_sampled = tgt_pcd[b_index, idx_tgt.view(-1)].view(bsize, sample_n_points, -1)
w_mask = torch.arange(sample_n_points).view(1,-1).repeat(bsize,1).to(w)
w_mask = w_mask < entry_max[:,None]
w[~w_mask] = 0.
# solve
try :
R, t, condition = self.batch_weighted_procrustes(src_pcd_sampled, tgt_pcd_sampled, w[...,None])
except: # fail to get valid solution, this usually happens at the early stage of training
R = torch.eye(3)[None].repeat(bsize,1,1).type_as(conf_matrix)
t = torch.zeros(3, 1)[None].repeat(bsize,1,1).type_as(conf_matrix)
condition = torch.zeros(bsize).type_as(conf_matrix)
#filter unreliable solution with condition nnumber
solution_mask = condition < self.max_condition_num
R_forwd = R.clone()
t_forwd = t.clone()
R_forwd[~solution_mask] = torch.eye(3).type_as(R)
t_forwd[~solution_mask] = torch.zeros(3, 1).type_as(R)
return R, t, R_forwd, t_forwd, condition, solution_mask | 3,591 | 37.623656 | 107 | py |
lepard | lepard-main/models/pipeline.py | from models.blocks import *
from models.backbone import KPFCN
from models.transformer import RepositioningTransformer
from models.matching import Matching
from models.procrustes import SoftProcrustesLayer
class Pipeline(nn.Module):
def __init__(self, config):
super(Pipeline, self).__init__()
self.config = config
self.backbone = KPFCN(config['kpfcn_config'])
self.pe_type = config['coarse_transformer']['pe_type']
self.positioning_type = config['coarse_transformer']['positioning_type']
self.coarse_transformer = RepositioningTransformer(config['coarse_transformer'])
self.coarse_matching = Matching(config['coarse_matching'])
self.soft_procrustes = SoftProcrustesLayer(config['coarse_transformer']['procrustes'])
def forward(self, data, timers=None):
self.timers = timers
if self.timers: self.timers.tic('kpfcn backbone encode')
coarse_feats = self.backbone(data, phase="coarse")
if self.timers: self.timers.toc('kpfcn backbone encode')
if self.timers: self.timers.tic('coarse_preprocess')
src_feats, tgt_feats, s_pcd, t_pcd, src_mask, tgt_mask = self.split_feats (coarse_feats, data)
data.update({ 's_pcd': s_pcd, 't_pcd': t_pcd })
if self.timers: self.timers.toc('coarse_preprocess')
if self.timers: self.timers.tic('coarse feature transformer')
src_feats, tgt_feats, src_pe, tgt_pe = self.coarse_transformer(src_feats, tgt_feats, s_pcd, t_pcd, src_mask, tgt_mask, data, timers=timers)
if self.timers: self.timers.toc('coarse feature transformer')
if self.timers: self.timers.tic('match feature coarse')
conf_matrix_pred, coarse_match_pred = self.coarse_matching(src_feats, tgt_feats, src_pe, tgt_pe, src_mask, tgt_mask, data, pe_type = self.pe_type)
data.update({'conf_matrix_pred': conf_matrix_pred, 'coarse_match_pred': coarse_match_pred })
if self.timers: self.timers.toc('match feature coarse')
if self.timers: self.timers.tic('procrustes_layer')
R, t, _, _, _, _ = self.soft_procrustes(conf_matrix_pred, s_pcd, t_pcd, src_mask, tgt_mask)
data.update({"R_s2t_pred": R, "t_s2t_pred": t})
if self.timers: self.timers.toc('procrustes_layer')
return data
def split_feats(self, geo_feats, data):
pcd = data['points'][self.config['kpfcn_config']['coarse_level']]
src_mask = data['src_mask']
tgt_mask = data['tgt_mask']
src_ind_coarse_split = data[ 'src_ind_coarse_split']
tgt_ind_coarse_split = data['tgt_ind_coarse_split']
src_ind_coarse = data['src_ind_coarse']
tgt_ind_coarse = data['tgt_ind_coarse']
b_size, src_pts_max = src_mask.shape
tgt_pts_max = tgt_mask.shape[1]
src_feats = torch.zeros([b_size * src_pts_max, geo_feats.shape[-1]]).type_as(geo_feats)
tgt_feats = torch.zeros([b_size * tgt_pts_max, geo_feats.shape[-1]]).type_as(geo_feats)
src_pcd = torch.zeros([b_size * src_pts_max, 3]).type_as(pcd)
tgt_pcd = torch.zeros([b_size * tgt_pts_max, 3]).type_as(pcd)
src_feats[src_ind_coarse_split] = geo_feats[src_ind_coarse]
tgt_feats[tgt_ind_coarse_split] = geo_feats[tgt_ind_coarse]
src_pcd[src_ind_coarse_split] = pcd[src_ind_coarse]
tgt_pcd[tgt_ind_coarse_split] = pcd[tgt_ind_coarse]
return src_feats.view( b_size , src_pts_max , -1), \
tgt_feats.view( b_size , tgt_pts_max , -1), \
src_pcd.view( b_size , src_pts_max , -1), \
tgt_pcd.view( b_size , tgt_pts_max , -1), \
src_mask, \
tgt_mask | 3,685 | 43.95122 | 154 | py |
lepard | lepard-main/models/blocks.py | import time
import math
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.nn.init import kaiming_uniform_
from kernels.kernel_points import load_kernels
# from lib.ply import write_ply
def gather(x, idx, method=2):
"""
implementation of a custom gather operation for faster backwards.
:param x: input with shape [N, D_1, ... D_d]
:param idx: indexing with shape [n_1, ..., n_m]
:param method: Choice of the method
:return: x[idx] with shape [n_1, ..., n_m, D_1, ... D_d]
"""
if method == 0:
return x[idx]
elif method == 1:
x = x.unsqueeze(1)
x = x.expand((-1, idx.shape[-1], -1))
idx = idx.unsqueeze(2)
idx = idx.expand((-1, -1, x.shape[-1]))
return x.gather(0, idx)
elif method == 2:
for i, ni in enumerate(idx.size()[1:]):
x = x.unsqueeze(i+1)
new_s = list(x.size())
new_s[i+1] = ni
x = x.expand(new_s)
n = len(idx.size())
for i, di in enumerate(x.size()[n:]):
idx = idx.unsqueeze(i+n)
new_s = list(idx.size())
new_s[i+n] = di
idx = idx.expand(new_s)
return x.gather(0, idx)
else:
raise ValueError('Unkown method')
def radius_gaussian(sq_r, sig, eps=1e-9):
"""
Compute a radius gaussian (gaussian of distance)
:param sq_r: input radiuses [dn, ..., d1, d0]
:param sig: extents of gaussians [d1, d0] or [d0] or float
:return: gaussian of sq_r [dn, ..., d1, d0]
"""
return torch.exp(-sq_r / (2 * sig**2 + eps))
def closest_pool(x, inds):
"""
Pools features from the closest neighbors. WARNING: this function assumes the neighbors are ordered.
:param x: [n1, d] features matrix
:param inds: [n2, max_num] Only the first column is used for pooling
:return: [n2, d] pooled features matrix
"""
# Add a last row with minimum features for shadow pools
x = torch.cat((x, torch.zeros_like(x[:1, :])), 0)
# Get features for each pooling location [n2, d]
return gather(x, inds[:, 0])
def max_pool(x, inds):
"""
Pools features with the maximum values.
:param x: [n1, d] features matrix
:param inds: [n2, max_num] pooling indices
:return: [n2, d] pooled features matrix
"""
# Add a last row with minimum features for shadow pools
x = torch.cat((x, torch.zeros_like(x[:1, :])), 0)
# Get all features for each pooling location [n2, max_num, d]
pool_features = gather(x, inds)
# Pool the maximum [n2, d]
max_features, _ = torch.max(pool_features, 1)
return max_features
def global_average(x, batch_lengths):
"""
Block performing a global average over batch pooling
:param x: [N, D] input features
:param batch_lengths: [B] list of batch lengths
:return: [B, D] averaged features
"""
# Loop over the clouds of the batch
averaged_features = []
i0 = 0
for b_i, length in enumerate(batch_lengths):
# Average features for each batch cloud
averaged_features.append(torch.mean(x[i0:i0 + length], dim=0))
# Increment for next cloud
i0 += length
# Average features in each batch
return torch.stack(averaged_features)
# ----------------------------------------------------------------------------------------------------------------------
#
# KPConv class
# \******************/
#
class KPConv(nn.Module):
def __init__(self, kernel_size, p_dim, in_channels, out_channels, KP_extent, radius,
fixed_kernel_points='center', KP_influence='linear', aggregation_mode='sum',
deformable=False, modulated=False):
"""
Initialize parameters for KPConvDeformable.
:param kernel_size: Number of kernel points.
:param p_dim: dimension of the point space.
:param in_channels: dimension of input features.
:param out_channels: dimension of output features.
:param KP_extent: influence radius of each kernel point.
:param radius: radius used for kernel point init. Even for deformable, use the config.conv_radius
:param fixed_kernel_points: fix position of certain kernel points ('none', 'center' or 'verticals').
:param KP_influence: influence function of the kernel points ('constant', 'linear', 'gaussian').
:param aggregation_mode: choose to sum influences, or only keep the closest ('closest', 'sum').
:param deformable: choose deformable or not
:param modulated: choose if kernel weights are modulated in addition to deformed
"""
super(KPConv, self).__init__()
# Save parameters
self.K = kernel_size
self.p_dim = p_dim
self.in_channels = in_channels
self.out_channels = out_channels
self.radius = radius
self.KP_extent = KP_extent
self.fixed_kernel_points = fixed_kernel_points
self.KP_influence = KP_influence
self.aggregation_mode = aggregation_mode
self.deformable = deformable
self.modulated = modulated
# Running variable containing deformed KP distance to input points. (used in regularization loss)
self.min_d2 = None
self.deformed_KP = None
self.offset_features = None
# Initialize weights
self.weights = Parameter(torch.zeros((self.K, in_channels, out_channels), dtype=torch.float32),
requires_grad=True)
# Initiate weights for offsets
if deformable:
if modulated:
self.offset_dim = (self.p_dim + 1) * self.K
else:
self.offset_dim = self.p_dim * self.K
self.offset_conv = KPConv(self.K,
self.p_dim,
self.in_channels,
self.offset_dim,
KP_extent,
radius,
fixed_kernel_points=fixed_kernel_points,
KP_influence=KP_influence,
aggregation_mode=aggregation_mode)
self.offset_bias = Parameter(torch.zeros(self.offset_dim, dtype=torch.float32), requires_grad=True)
else:
self.offset_dim = None
self.offset_conv = None
self.offset_bias = None
# Reset parameters
self.reset_parameters()
# Initialize kernel points
self.kernel_points = self.init_KP()
return
def reset_parameters(self):
kaiming_uniform_(self.weights, a=math.sqrt(5))
if self.deformable:
nn.init.zeros_(self.offset_bias)
return
def init_KP(self):
"""
Initialize the kernel point positions in a sphere
:return: the tensor of kernel points
"""
# Create one kernel disposition (as numpy array). Choose the KP distance to center thanks to the KP extent
K_points_numpy = load_kernels(self.radius,
self.K,
dimension=self.p_dim,
fixed=self.fixed_kernel_points)
return Parameter(torch.tensor(K_points_numpy, dtype=torch.float32),
requires_grad=False)
def forward(self, q_pts, s_pts, neighb_inds, x):
###################
# Offset generation
###################
if self.deformable:
# Get offsets with a KPConv that only takes part of the features
self.offset_features = self.offset_conv(q_pts, s_pts, neighb_inds, x) + self.offset_bias
if self.modulated:
# Get offset (in normalized scale) from features
unscaled_offsets = self.offset_features[:, :self.p_dim * self.K]
unscaled_offsets = unscaled_offsets.view(-1, self.K, self.p_dim)
# Get modulations
modulations = 2 * torch.sigmoid(self.offset_features[:, self.p_dim * self.K:])
else:
# Get offset (in normalized scale) from features
unscaled_offsets = self.offset_features.view(-1, self.K, self.p_dim)
# No modulations
modulations = None
# Rescale offset for this layer
offsets = unscaled_offsets * self.KP_extent
else:
offsets = None
modulations = None
######################
# Deformed convolution
######################
# Add a fake point in the last row for shadow neighbors
s_pts = torch.cat((s_pts, torch.zeros_like(s_pts[:1, :]) + 1e6), 0)
# Get neighbor points [n_points, n_neighbors, dim]
neighbors = s_pts[neighb_inds, :]
# Center every neighborhood
neighbors = neighbors - q_pts.unsqueeze(1)
# Apply offsets to kernel points [n_points, n_kpoints, dim]
if self.deformable:
self.deformed_KP = offsets + self.kernel_points
deformed_K_points = self.deformed_KP.unsqueeze(1)
else:
deformed_K_points = self.kernel_points
# Get all difference matrices [n_points, n_neighbors, n_kpoints, dim]
neighbors.unsqueeze_(2)
differences = neighbors - deformed_K_points
# Get the square distances [n_points, n_neighbors, n_kpoints]
sq_distances = torch.sum(differences ** 2, dim=3)
# Optimization by ignoring points outside a deformed KP range
if self.deformable:
# Save distances for loss
self.min_d2, _ = torch.min(sq_distances, dim=1)
# Boolean of the neighbors in range of a kernel point [n_points, n_neighbors]
in_range = torch.any(sq_distances < self.KP_extent ** 2, dim=2).type(torch.int32)
# New value of max neighbors
new_max_neighb = torch.max(torch.sum(in_range, dim=1))
# For each row of neighbors, indices of the ones that are in range [n_points, new_max_neighb]
neighb_row_bool, neighb_row_inds = torch.topk(in_range, new_max_neighb.item(), dim=1)
# Gather new neighbor indices [n_points, new_max_neighb]
new_neighb_inds = neighb_inds.gather(1, neighb_row_inds, sparse_grad=False)
# Gather new distances to KP [n_points, new_max_neighb, n_kpoints]
neighb_row_inds.unsqueeze_(2)
neighb_row_inds = neighb_row_inds.expand(-1, -1, self.K)
sq_distances = sq_distances.gather(1, neighb_row_inds, sparse_grad=False)
# New shadow neighbors have to point to the last shadow point
new_neighb_inds *= neighb_row_bool
new_neighb_inds -= (neighb_row_bool.type(torch.int64) - 1) * int(s_pts.shape[0] - 1)
else:
new_neighb_inds = neighb_inds
# Get Kernel point influences [n_points, n_kpoints, n_neighbors]
if self.KP_influence == 'constant':
# Every point get an influence of 1.
all_weights = torch.ones_like(sq_distances)
all_weights = torch.transpose(all_weights, 1, 2)
elif self.KP_influence == 'linear':
# Influence decrease linearly with the distance, and get to zero when d = KP_extent.
all_weights = torch.clamp(1 - torch.sqrt(sq_distances) / self.KP_extent, min=0.0)
all_weights = torch.transpose(all_weights, 1, 2)
elif self.KP_influence == 'gaussian':
# Influence in gaussian of the distance.
sigma = self.KP_extent * 0.3
all_weights = radius_gaussian(sq_distances, sigma)
all_weights = torch.transpose(all_weights, 1, 2)
else:
raise ValueError('Unknown influence function type (config.KP_influence)')
# In case of closest mode, only the closest KP can influence each point
if self.aggregation_mode == 'closest':
neighbors_1nn = torch.argmin(sq_distances, dim=2)
all_weights *= torch.transpose(nn.functional.one_hot(neighbors_1nn, self.K), 1, 2)
elif self.aggregation_mode != 'sum':
raise ValueError("Unknown convolution mode. Should be 'closest' or 'sum'")
# Add a zero feature for shadow neighbors
x = torch.cat((x, torch.zeros_like(x[:1, :])), 0)
# Get the features of each neighborhood [n_points, n_neighbors, in_fdim]
neighb_x = gather(x, new_neighb_inds)
# Apply distance weights [n_points, n_kpoints, in_fdim]
weighted_features = torch.matmul(all_weights, neighb_x)
# Apply modulations
if self.deformable and self.modulated:
weighted_features *= modulations.unsqueeze(2)
# Apply network weights [n_kpoints, n_points, out_fdim]
weighted_features = weighted_features.permute((1, 0, 2))
kernel_outputs = torch.matmul(weighted_features, self.weights)
# Convolution sum [n_points, out_fdim]
# return torch.sum(kernel_outputs, dim=0)
output_features = torch.sum(kernel_outputs, dim=0, keepdim=False)
# normalization term.
neighbor_features_sum = torch.sum(neighb_x, dim=-1)
neighbor_num = torch.sum(torch.gt(neighbor_features_sum, 0.0), dim=-1)
neighbor_num = torch.max(neighbor_num, torch.ones_like(neighbor_num))
output_features = output_features / neighbor_num.unsqueeze(1)
return output_features
def __repr__(self):
return 'KPConv(radius: {:.2f}, extent: {:.2f}, in_feat: {:d}, out_feat: {:d})'.format(self.radius, self.KP_extent,
self.in_channels,
self.out_channels)
# ----------------------------------------------------------------------------------------------------------------------
#
# Complex blocks
# \********************/
#
def block_decider(block_name,
radius,
in_dim,
out_dim,
layer_ind,
config):
if block_name == 'unary':
return UnaryBlock(in_dim, out_dim, config.use_batch_norm, config.batch_norm_momentum)
elif block_name in ['simple',
'simple_deformable',
'simple_invariant',
'simple_equivariant',
'simple_strided',
'simple_deformable_strided',
'simple_invariant_strided',
'simple_equivariant_strided']:
return SimpleBlock(block_name, in_dim, out_dim, radius, layer_ind, config)
elif block_name in ['resnetb',
'resnetb_invariant',
'resnetb_equivariant',
'resnetb_deformable',
'resnetb_strided',
'resnetb_deformable_strided',
'resnetb_equivariant_strided',
'resnetb_invariant_strided']:
return ResnetBottleneckBlock(block_name, in_dim, out_dim, radius, layer_ind, config)
elif block_name == 'max_pool' or block_name == 'max_pool_wide':
return MaxPoolBlock(layer_ind)
elif block_name == 'global_average':
return GlobalAverageBlock()
elif block_name == 'nearest_upsample':
return NearestUpsampleBlock(layer_ind)
else:
raise ValueError('Unknown block name in the architecture definition : ' + block_name)
class BatchNormBlock(nn.Module):
def __init__(self, in_dim, use_bn, bn_momentum):
"""
Initialize a batch normalization block. If network does not use batch normalization, replace with biases.
:param in_dim: dimension input features
:param use_bn: boolean indicating if we use Batch Norm
:param bn_momentum: Batch norm momentum
"""
super(BatchNormBlock, self).__init__()
self.bn_momentum = bn_momentum
self.use_bn = use_bn
self.in_dim = in_dim
if self.use_bn:
#self.batch_norm = nn.BatchNorm1d(in_dim, momentum=bn_momentum)
self.batch_norm = nn.InstanceNorm1d(in_dim, momentum=bn_momentum)
else:
self.bias = Parameter(torch.zeros(in_dim, dtype=torch.float32), requires_grad=True)
return
def reset_parameters(self):
nn.init.zeros_(self.bias)
def forward(self, x):
if self.use_bn:
x = x.unsqueeze(2)
x = x.transpose(0, 2)
x = self.batch_norm(x)
x = x.transpose(0, 2)
return x.squeeze()
else:
return x + self.bias
def __repr__(self):
return 'BatchNormBlock(in_feat: {:d}, momentum: {:.3f}, only_bias: {:s})'.format(self.in_dim,
self.bn_momentum,
str(not self.use_bn))
class UnaryBlock(nn.Module):
def __init__(self, in_dim, out_dim, use_bn, bn_momentum, no_relu=False):
"""
Initialize a standard unary block with its ReLU and BatchNorm.
:param in_dim: dimension input features
:param out_dim: dimension input features
:param use_bn: boolean indicating if we use Batch Norm
:param bn_momentum: Batch norm momentum
"""
super(UnaryBlock, self).__init__()
self.bn_momentum = bn_momentum
self.use_bn = use_bn
self.no_relu = no_relu
self.in_dim = in_dim
self.out_dim = out_dim
self.mlp = nn.Linear(in_dim, out_dim, bias=False)
self.batch_norm = BatchNormBlock(out_dim, self.use_bn, self.bn_momentum)
if not no_relu:
self.leaky_relu = nn.LeakyReLU(0.1)
return
def forward(self, x, batch=None):
x = self.mlp(x)
x = self.batch_norm(x)
if not self.no_relu:
x = self.leaky_relu(x)
return x
def __repr__(self):
return 'UnaryBlock(in_feat: {:d}, out_feat: {:d}, BN: {:s}, ReLU: {:s})'.format(self.in_dim,
self.out_dim,
str(self.use_bn),
str(not self.no_relu))
class LastUnaryBlock(nn.Module):
def __init__(self, in_dim, out_dim, use_bn, bn_momentum, no_relu=False):
"""
Initialize a standard last_unary block without BN, ReLU.
:param in_dim: dimension input features
:param out_dim: dimension input features
:param use_bn: boolean indicating if we use Batch Norm
:param bn_momentum: Batch norm momentum
"""
super(LastUnaryBlock, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.mlp = nn.Linear(in_dim, out_dim, bias=False)
return
def forward(self, x, batch=None):
x = self.mlp(x)
return x
def __repr__(self):
return 'LastUnaryBlock(in_feat: {:d}, out_feat: {:d})'.format(self.in_dim,
self.out_dim)
class SimpleBlock(nn.Module):
def __init__(self, block_name, in_dim, out_dim, radius, layer_ind, config):
"""
Initialize a simple convolution block with its ReLU and BatchNorm.
:param in_dim: dimension input features
:param out_dim: dimension input features
:param radius: current radius of convolution
:param config: parameters
"""
super(SimpleBlock, self).__init__()
# get KP_extent from current radius
current_extent = radius * config.KP_extent / config.conv_radius
# Get other parameters
self.bn_momentum = config.batch_norm_momentum
self.use_bn = config.use_batch_norm
self.layer_ind = layer_ind
self.block_name = block_name
self.in_dim = in_dim
self.out_dim = out_dim
# Define the KPConv class
self.KPConv = KPConv(config.num_kernel_points,
config.in_points_dim,
in_dim,
out_dim // 2,
current_extent,
radius,
fixed_kernel_points=config.fixed_kernel_points,
KP_influence=config.KP_influence,
aggregation_mode=config.aggregation_mode,
deformable='deform' in block_name,
modulated=config.modulated)
# Other opperations
self.batch_norm = BatchNormBlock(out_dim // 2, self.use_bn, self.bn_momentum)
self.leaky_relu = nn.LeakyReLU(0.1)
return
def forward(self, x, batch):
if 'strided' in self.block_name:
q_pts = batch['points'][self.layer_ind + 1]
s_pts = batch['points'][self.layer_ind]
neighb_inds = batch['pools'][self.layer_ind]
else:
q_pts = batch['points'][self.layer_ind]
s_pts = batch['points'][self.layer_ind]
neighb_inds = batch['neighbors'][self.layer_ind]
x = self.KPConv(q_pts, s_pts, neighb_inds, x)
return self.leaky_relu(self.batch_norm(x))
class ResnetBottleneckBlock(nn.Module):
def __init__(self, block_name, in_dim, out_dim, radius, layer_ind, config):
"""
Initialize a resnet bottleneck block.
:param in_dim: dimension input features
:param out_dim: dimension input features
:param radius: current radius of convolution
:param config: parameters
"""
super(ResnetBottleneckBlock, self).__init__()
# get KP_extent from current radius
current_extent = radius * config.KP_extent / config.conv_radius
# Get other parameters
self.bn_momentum = config.batch_norm_momentum
self.use_bn = config.use_batch_norm
self.block_name = block_name
self.layer_ind = layer_ind
self.in_dim = in_dim
self.out_dim = out_dim
# First downscaling mlp
if in_dim != out_dim // 4:
self.unary1 = UnaryBlock(in_dim, out_dim // 4, self.use_bn, self.bn_momentum)
else:
self.unary1 = nn.Identity()
# KPConv block
self.KPConv = KPConv(config.num_kernel_points,
config.in_points_dim,
out_dim // 4,
out_dim // 4,
current_extent,
radius,
fixed_kernel_points=config.fixed_kernel_points,
KP_influence=config.KP_influence,
aggregation_mode=config.aggregation_mode,
deformable='deform' in block_name,
modulated=config.modulated)
self.batch_norm_conv = BatchNormBlock(out_dim // 4, self.use_bn, self.bn_momentum)
# Second upscaling mlp
self.unary2 = UnaryBlock(out_dim // 4, out_dim, self.use_bn, self.bn_momentum, no_relu=True)
# Shortcut optional mpl
if in_dim != out_dim:
self.unary_shortcut = UnaryBlock(in_dim, out_dim, self.use_bn, self.bn_momentum, no_relu=True)
else:
self.unary_shortcut = nn.Identity()
# Other operations
self.leaky_relu = nn.LeakyReLU(0.1)
return
def forward(self, features, batch):
if 'strided' in self.block_name:
q_pts = batch['points'][self.layer_ind + 1]
s_pts = batch['points'][self.layer_ind]
neighb_inds = batch['pools'][self.layer_ind]
else:
q_pts = batch['points'][self.layer_ind]
s_pts = batch['points'][self.layer_ind]
neighb_inds = batch['neighbors'][self.layer_ind]
# First downscaling mlp
x = self.unary1(features)
# Convolution
x = self.KPConv(q_pts, s_pts, neighb_inds, x)
x = self.leaky_relu(self.batch_norm_conv(x))
# Second upscaling mlp
x = self.unary2(x)
# Shortcut
if 'strided' in self.block_name:
shortcut = max_pool(features, neighb_inds)
else:
shortcut = features
shortcut = self.unary_shortcut(shortcut)
return self.leaky_relu(x + shortcut)
class GlobalAverageBlock(nn.Module):
def __init__(self):
"""
Initialize a global average block with its ReLU and BatchNorm.
"""
super(GlobalAverageBlock, self).__init__()
return
def forward(self, x, batch):
return global_average(x, batch['stack_lengths'][-1])
class NearestUpsampleBlock(nn.Module):
def __init__(self, layer_ind):
"""
Initialize a nearest upsampling block with its ReLU and BatchNorm.
"""
super(NearestUpsampleBlock, self).__init__()
self.layer_ind = layer_ind
return
def forward(self, x, batch):
return closest_pool(x, batch['upsamples'][self.layer_ind - 1])
def __repr__(self):
return 'NearestUpsampleBlock(layer: {:d} -> {:d})'.format(self.layer_ind,
self.layer_ind - 1)
class MaxPoolBlock(nn.Module):
def __init__(self, layer_ind):
"""
Initialize a max pooling block with its ReLU and BatchNorm.
"""
super(MaxPoolBlock, self).__init__()
self.layer_ind = layer_ind
return
def forward(self, x, batch):
return max_pool(x, batch['pools'][self.layer_ind + 1])
| 26,090 | 35.956091 | 122 | py |
lepard | lepard-main/datasets/_4dmatch.py | import os, sys, glob, torch
# sys.path.append("../")
[sys.path.append(i) for i in ['.', '..']]
import numpy as np
import torch
import random
from scipy.spatial.transform import Rotation
from torch.utils.data import Dataset
from lib.benchmark_utils import to_o3d_pcd, to_tsfm, KDTree_corr
from lib.utils import load_obj
HMN_intrin = np.array( [443, 256, 443, 250 ])
cam_intrin = np.array( [443, 256, 443, 250 ])
from lib.benchmark_utils import to_o3d_pcd, to_tsfm, get_correspondences
class _4DMatch(Dataset):
def __init__(self, config, split, data_augmentation=True):
super(_4DMatch, self).__init__()
assert split in ['train','val','test']
if 'overfit' in config.exp_dir:
d_slice = config.batch_size
else :
d_slice = None
self.entries = self.read_entries( config.split[split] , config.data_root, d_slice=d_slice )
self.base_dir = config.data_root
self.data_augmentation = data_augmentation
self.config = config
self.rot_factor = 1.
self.augment_noise = config.augment_noise
self.max_points = 30000
self.overlap_radius = 0.0375
self.cache = {}
self.cache_size = 30000
def read_entries (self, split, data_root, d_slice=None, shuffle= False):
entries = glob.glob(os.path.join(data_root, split, "*/*.npz"))
if shuffle:
random.shuffle(entries)
if d_slice:
return entries[:d_slice]
return entries
def __len__(self):
return len(self.entries )
def __getitem__(self, index, debug=False):
if index in self.cache:
entry = self.cache[index]
else :
entry = np.load(self.entries[index])
if len(self.cache) < self.cache_size:
self.cache[index] = entry
# get transformation
rot = entry['rot']
trans = entry['trans']
s2t_flow = entry['s2t_flow']
src_pcd = entry['s_pc']
tgt_pcd = entry['t_pc']
correspondences = entry['correspondences'] # obtained with search radius 0.015 m
src_pcd_deformed = src_pcd + s2t_flow
if "metric_index" in entry:
metric_index = entry['metric_index'].squeeze()
else:
metric_index = None
# if we get too many points, we do some downsampling
if (src_pcd.shape[0] > self.max_points):
idx = np.random.permutation(src_pcd.shape[0])[:self.max_points]
src_pcd = src_pcd[idx]
if (tgt_pcd.shape[0] > self.max_points):
idx = np.random.permutation(tgt_pcd.shape[0])[:self.max_points]
tgt_pcd = tgt_pcd[idx]
if debug:
import mayavi.mlab as mlab
c_red = (224. / 255., 0 / 255., 125 / 255.)
c_pink = (224. / 255., 75. / 255., 232. / 255.)
c_blue = (0. / 255., 0. / 255., 255. / 255.)
scale_factor = 0.013
src_wrapped = (np.matmul( rot, src_pcd_deformed.T ) + trans ).T
mlab.points3d(src_wrapped[:, 0], src_wrapped[:, 1], src_wrapped[:, 2], scale_factor=scale_factor, color=c_pink)
mlab.points3d(src_pcd[ :, 0] , src_pcd[ :, 1], src_pcd[:, 2], scale_factor=scale_factor , color=c_red)
mlab.points3d(tgt_pcd[ :, 0] , tgt_pcd[ :, 1], tgt_pcd[:, 2], scale_factor=scale_factor , color=c_blue)
mlab.show()
# add gaussian noise
if self.data_augmentation:
# rotate the point cloud
euler_ab = np.random.rand(3) * np.pi * 2 / self.rot_factor # anglez, angley, anglex
rot_ab = Rotation.from_euler('zyx', euler_ab).as_matrix()
if (np.random.rand(1)[0] > 0.5):
src_pcd = np.matmul(rot_ab, src_pcd.T).T
src_pcd_deformed = np.matmul(rot_ab, src_pcd_deformed.T).T
rot = np.matmul(rot, rot_ab.T)
else:
tgt_pcd = np.matmul(rot_ab, tgt_pcd.T).T
rot = np.matmul(rot_ab, rot)
trans = np.matmul(rot_ab, trans)
src_pcd += (np.random.rand(src_pcd.shape[0], 3) - 0.5) * self.augment_noise
tgt_pcd += (np.random.rand(tgt_pcd.shape[0], 3) - 0.5) * self.augment_noise
s2t_flow = src_pcd_deformed - src_pcd
if debug:
# wrapp_src = (np.matmul(rot, src_pcd.T)+ trans).T
src_wrapped = (np.matmul( rot, src_pcd_deformed.T ) + trans ).T
mlab.points3d(src_wrapped[:, 0], src_wrapped[:, 1], src_wrapped[:, 2], scale_factor=scale_factor, color=c_red)
mlab.points3d(tgt_pcd[:, 0], tgt_pcd[:, 1], tgt_pcd[:, 2], scale_factor=scale_factor, color=c_blue)
mlab.show()
if (trans.ndim == 1):
trans = trans[:, None]
src_feats = np.ones_like(src_pcd[:, :1]).astype(np.float32)
tgt_feats = np.ones_like(tgt_pcd[:, :1]).astype(np.float32)
rot = rot.astype(np.float32)
trans = trans.astype(np.float32)
#R * ( Ps + flow ) + t = Pt
return src_pcd, tgt_pcd, src_feats, tgt_feats, correspondences, rot, trans, s2t_flow, metric_index
if __name__ == '__main__':
from lib.utils import load_config
from easydict import EasyDict as edict
from lib.tictok import Timers
import yaml
def join(loader, node):
seq = loader.construct_sequence(node)
return '_'.join([str(i) for i in seq])
yaml.add_constructor('!join', join)
config = "/home/liyang/workspace/Regformer/configs/train/4dmatch.yaml"
with open(config,'r') as f:
config = yaml.load(f, Loader=yaml.Loader)
config = edict(config)
config.timers=Timers()
D = _4DMatch(config, "test")
for i in range (len(D)):
try:
if i%1000 == 0 :
print (i,"/",len(D))
D.__getitem__(i, debug=True)
except:
# print(i, "/", len(D))
pass | 5,939 | 32.75 | 123 | py |
lepard | lepard-main/datasets/dataloader.py | import numpy as np
from functools import partial
import torch
import cpp_wrappers.cpp_subsampling.grid_subsampling as cpp_subsampling
import cpp_wrappers.cpp_neighbors.radius_neighbors as cpp_neighbors
from datasets._3dmatch import _3DMatch
from datasets._4dmatch import _4DMatch
from datasets.utils import blend_scene_flow, multual_nn_correspondence
from lib.visualization import *
from torch.utils.data import DataLoader
def batch_grid_subsampling_kpconv(points, batches_len, features=None, labels=None, sampleDl=0.1, max_p=0, verbose=0, random_grid_orient=True):
"""
CPP wrapper for a grid subsampling (method = barycenter for points and features)
"""
if (features is None) and (labels is None):
s_points, s_len = cpp_subsampling.subsample_batch(points,
batches_len,
sampleDl=sampleDl,
max_p=max_p,
verbose=verbose)
return torch.from_numpy(s_points), torch.from_numpy(s_len)
elif (labels is None):
s_points, s_len, s_features = cpp_subsampling.subsample_batch(points,
batches_len,
features=features,
sampleDl=sampleDl,
max_p=max_p,
verbose=verbose)
return torch.from_numpy(s_points), torch.from_numpy(s_len), torch.from_numpy(s_features)
elif (features is None):
s_points, s_len, s_labels = cpp_subsampling.subsample_batch(points,
batches_len,
classes=labels,
sampleDl=sampleDl,
max_p=max_p,
verbose=verbose)
return torch.from_numpy(s_points), torch.from_numpy(s_len), torch.from_numpy(s_labels)
else:
s_points, s_len, s_features, s_labels = cpp_subsampling.subsample_batch(points,
batches_len,
features=features,
classes=labels,
sampleDl=sampleDl,
max_p=max_p,
verbose=verbose)
return torch.from_numpy(s_points), torch.from_numpy(s_len), torch.from_numpy(s_features), torch.from_numpy(s_labels)
def batch_neighbors_kpconv(queries, supports, q_batches, s_batches, radius, max_neighbors):
"""
Computes neighbors for a batch of queries and supports, apply radius search
:param queries: (N1, 3) the query points
:param supports: (N2, 3) the support points
:param q_batches: (B) the list of lengths of batch elements in queries
:param s_batches: (B)the list of lengths of batch elements in supports
:param radius: float32
:return: neighbors indices
"""
neighbors = cpp_neighbors.batch_query(queries, supports, q_batches, s_batches, radius=radius)
if max_neighbors > 0:
return torch.from_numpy(neighbors[:, :max_neighbors])
else:
return torch.from_numpy(neighbors)
def collate_fn_3dmatch(list_data, config, neighborhood_limits ):
batched_points_list = []
batched_features_list = []
batched_lengths_list = []
correspondences_list = []
src_pcd_list = []
tgt_pcd_list = []
batched_rot = []
batched_trn = []
gt_cov_list = []
for ind, ( src_pcd, tgt_pcd, src_feats, tgt_feats, correspondences, rot, trn, gt_cov) in enumerate(list_data):
correspondences_list.append(correspondences )
src_pcd_list.append(torch.from_numpy(src_pcd) )
tgt_pcd_list.append(torch.from_numpy(tgt_pcd) )
batched_points_list.append(src_pcd)
batched_points_list.append(tgt_pcd)
batched_features_list.append(src_feats)
batched_features_list.append(tgt_feats)
batched_lengths_list.append(len(src_pcd))
batched_lengths_list.append(len(tgt_pcd))
batched_rot.append( torch.from_numpy(rot).float())
batched_trn.append( torch.from_numpy(trn).float())
gt_cov_list.append(gt_cov)
gt_cov_list = None if gt_cov_list[0] is None \
else np.stack(gt_cov_list, axis=0)
# if timers: cnter['collate_load_batch'] = time.time() - st
batched_features = torch.from_numpy(np.concatenate(batched_features_list, axis=0))
batched_points = torch.from_numpy(np.concatenate(batched_points_list, axis=0))
batched_lengths = torch.from_numpy(np.array(batched_lengths_list)).int()
batched_rot = torch.stack(batched_rot,dim=0)
batched_trn = torch.stack(batched_trn,dim=0)
# Starting radius of convolutions
r_normal = config.first_subsampling_dl * config.conv_radius
# Starting layer
layer_blocks = []
layer = 0
# Lists of inputs
input_points = []
input_neighbors = []
input_pools = []
input_upsamples = []
input_batches_len = []
# construt kpfcn inds
for block_i, block in enumerate(config.architecture):
# Stop when meeting a global pooling or upsampling
if 'global' in block or 'upsample' in block:
break
# Get all blocks of the layer
if not ('pool' in block or 'strided' in block):
layer_blocks += [block]
if block_i < len(config.architecture) - 1 and not ('upsample' in config.architecture[block_i + 1]):
continue
# Convolution neighbors indices
# *****************************
if layer_blocks:
# Convolutions are done in this layer, compute the neighbors with the good radius
if np.any(['deformable' in blck for blck in layer_blocks[:-1]]):
r = r_normal * config.deform_radius / config.conv_radius
else:
r = r_normal
conv_i = batch_neighbors_kpconv(batched_points, batched_points, batched_lengths, batched_lengths, r,
neighborhood_limits[layer])
else:
# This layer only perform pooling, no neighbors required
conv_i = torch.zeros((0, 1), dtype=torch.int64)
# Pooling neighbors indices
# *************************
# If end of layer is a pooling operation
if 'pool' in block or 'strided' in block:
# New subsampling length
dl = 2 * r_normal / config.conv_radius
# Subsampled points
pool_p, pool_b = batch_grid_subsampling_kpconv(batched_points, batched_lengths, sampleDl=dl)
# Radius of pooled neighbors
if 'deformable' in block:
r = r_normal * config.deform_radius / config.conv_radius
else:
r = r_normal
# Subsample indices
pool_i = batch_neighbors_kpconv(pool_p, batched_points, pool_b, batched_lengths, r,
neighborhood_limits[layer])
# Upsample indices (with the radius of the next layer to keep wanted density)
up_i = batch_neighbors_kpconv(batched_points, pool_p, batched_lengths, pool_b, 2 * r,
neighborhood_limits[layer])
else:
# No pooling in the end of this layer, no pooling indices required
pool_i = torch.zeros((0, 1), dtype=torch.int64)
pool_p = torch.zeros((0, 3), dtype=torch.float32)
pool_b = torch.zeros((0,), dtype=torch.int64)
up_i = torch.zeros((0, 1), dtype=torch.int64)
# Updating input lists
input_points += [batched_points.float()]
input_neighbors += [conv_i.long()]
input_pools += [pool_i.long()]
input_upsamples += [up_i.long()]
input_batches_len += [batched_lengths]
# New points for next layer
batched_points = pool_p
batched_lengths = pool_b
# Update radius and reset blocks
r_normal *= 2
layer += 1
layer_blocks = []
# coarse infomation
coarse_level = config.coarse_level
pts_num_coarse = input_batches_len[coarse_level].view(-1, 2)
b_size = pts_num_coarse.shape[0]
src_pts_max, tgt_pts_max = pts_num_coarse.amax(dim=0)
coarse_pcd = input_points[coarse_level] # .numpy()
coarse_matches= []
src_ind_coarse_split= [] # src_feats shape :[b_size * src_pts_max]
src_ind_coarse = []
tgt_ind_coarse_split= []
tgt_ind_coarse = []
accumu = 0
src_mask = torch.zeros([b_size, src_pts_max], dtype=torch.bool)
tgt_mask = torch.zeros([b_size, tgt_pts_max], dtype=torch.bool)
#grid subsample fine level points for differentiable matching
fine_pts, fine_length = batch_grid_subsampling_kpconv(input_points[0], input_batches_len[0], sampleDl=dl*0.5*0.85)
fine_ind = batch_neighbors_kpconv(fine_pts, input_points[0], fine_length, input_batches_len[0], dl*0.5*0.85, 1).squeeze().long()
for entry_id, cnt in enumerate( pts_num_coarse ): #input_batches_len[-1].numpy().reshape(-1,2)) :
n_s_pts, n_t_pts = cnt
'''split mask for bottlenect feats'''
src_mask[entry_id][:n_s_pts] = 1
tgt_mask[entry_id][:n_t_pts] = 1
'''split indices of bottleneck feats'''
src_ind_coarse_split.append( torch.arange( n_s_pts ) + entry_id * src_pts_max )
tgt_ind_coarse_split.append( torch.arange( n_t_pts ) + entry_id * tgt_pts_max )
src_ind_coarse.append( torch.arange( n_s_pts ) + accumu )
tgt_ind_coarse.append( torch.arange( n_t_pts ) + accumu + n_s_pts )
'''get match at coarse level'''
c_src_pcd = coarse_pcd[accumu : accumu + n_s_pts]
c_tgt_pcd = coarse_pcd[accumu + n_s_pts: accumu + n_s_pts + n_t_pts]
s_pc_wrapped = (torch.matmul( batched_rot[entry_id], c_src_pcd.T ) + batched_trn [entry_id]).T
coarse_match_gt = torch.from_numpy( multual_nn_correspondence(s_pc_wrapped.numpy(), c_tgt_pcd.numpy(), search_radius=config['coarse_match_radius']) )# 0.1m scaled
coarse_matches.append(coarse_match_gt)
accumu = accumu + n_s_pts + n_t_pts
vis=False # for debug
if vis :
viz_coarse_nn_correspondence_mayavi(c_src_pcd, c_tgt_pcd, coarse_match_gt, scale_factor=0.04)
vis=False # for debug
if vis :
pass
import mayavi.mlab as mlab
# src_nei_valid = src_nei_mask[coarse_match_gt[0]].view(-1)
# tgt_nei_valid = tgt_nei_mask[coarse_match_gt[1]].view(-1)
#
# f_src_pcd = src_m_nei_pts.view(-1, 3)[src_nei_valid]
# f_tgt_pcd = tgt_m_nei_pts.view(-1,3)[tgt_nei_valid]
#
# mlab.points3d(f_src_pcd[:, 0], f_src_pcd[:, 1], f_src_pcd[:, 2], scale_factor=0.02,color=c_gray1)
# mlab.points3d(f_tgt_pcd[:, 0], f_tgt_pcd[:, 1], f_tgt_pcd[:, 2], scale_factor=0.02,color=c_gray2)
#
# src_m_nn_pts =src_m_nn_pts.view(-1, 3)
# src_m_nn_pts_wrapped = src_m_nn_pts_wrapped.view(-1,3)
# tgt_m_nn_pts = tgt_m_nei_pts [ torch.arange(tgt_m_nei_pts.shape[0]), nni.view(-1), ... ]
# mlab.points3d(src_m_nn_pts[:, 0], src_m_nn_pts[:, 1], src_m_nn_pts[:, 2], scale_factor=0.04,color=c_red)
# mlab.points3d(src_m_nn_pts_wrapped[:, 0], src_m_nn_pts_wrapped[:, 1], src_m_nn_pts_wrapped[:, 2], scale_factor=0.04,color=c_red)
# mlab.points3d(tgt_m_nn_pts[:, 0], tgt_m_nn_pts[:, 1], tgt_m_nn_pts[:, 2], scale_factor=0.04 ,color=c_blue)
# mlab.show()
# viz_coarse_nn_correspondence_mayavi(c_src_pcd, c_tgt_pcd, coarse_match_gt,
# f_src_pcd=src_m_nei_pts.view(-1,3)[src_nei_valid],
# f_tgt_pcd=tgt_m_nei_pts.view(-1,3)[tgt_nei_valid], scale_factor=0.08)
src_ind_coarse_split = torch.cat(src_ind_coarse_split)
tgt_ind_coarse_split = torch.cat(tgt_ind_coarse_split)
src_ind_coarse = torch.cat(src_ind_coarse)
tgt_ind_coarse = torch.cat(tgt_ind_coarse)
dict_inputs = {
'src_pcd_list': src_pcd_list,
'tgt_pcd_list': tgt_pcd_list,
'points': input_points,
'neighbors': input_neighbors,
'pools': input_pools,
'upsamples': input_upsamples,
'features': batched_features.float(),
'stack_lengths': input_batches_len,
'coarse_matches': coarse_matches,
'src_mask': src_mask,
'tgt_mask': tgt_mask,
'src_ind_coarse_split': src_ind_coarse_split,
'tgt_ind_coarse_split': tgt_ind_coarse_split,
'src_ind_coarse': src_ind_coarse,
'tgt_ind_coarse': tgt_ind_coarse,
'batched_rot': batched_rot,
'batched_trn': batched_trn,
'gt_cov': gt_cov_list,
#for refine
'correspondences_list': correspondences_list,
'fine_ind': fine_ind,
'fine_pts': fine_pts,
'fine_length': fine_length
}
return dict_inputs
def collate_fn_4dmatch(list_data, config, neighborhood_limits ):
batched_points_list = []
batched_features_list = []
batched_lengths_list = []
correspondences_list = []
src_pcd_list = []
tgt_pcd_list = []
batched_rot = []
batched_trn = []
sflow_list = []
metric_index_list = [] #for feature matching recall computation
for ind, ( src_pcd, tgt_pcd, src_feats, tgt_feats, correspondences, rot, trn, s2t_flow, metric_index) in enumerate(list_data):
correspondences_list.append(correspondences )
src_pcd_list.append(torch.from_numpy(src_pcd) )
tgt_pcd_list.append(torch.from_numpy(tgt_pcd) )
batched_points_list.append(src_pcd)
batched_points_list.append(tgt_pcd)
batched_features_list.append(src_feats)
batched_features_list.append(tgt_feats)
batched_lengths_list.append(len(src_pcd))
batched_lengths_list.append(len(tgt_pcd))
batched_rot.append( torch.from_numpy(rot).float())
batched_trn.append( torch.from_numpy(trn).float())
# gt_cov_list.append(gt_cov)
sflow_list.append( torch.from_numpy(s2t_flow).float() )
if metric_index is None:
metric_index_list = None
else :
metric_index_list.append ( torch.from_numpy(metric_index))
# if timers: cnter['collate_load_batch'] = time.time() - st
batched_features = torch.from_numpy(np.concatenate(batched_features_list, axis=0))
batched_points = torch.from_numpy(np.concatenate(batched_points_list, axis=0))
batched_lengths = torch.from_numpy(np.array(batched_lengths_list)).int()
batched_rot = torch.stack(batched_rot,dim=0)
batched_trn = torch.stack(batched_trn,dim=0)
# Starting radius of convolutions
r_normal = config.first_subsampling_dl * config.conv_radius
# Starting layer
layer_blocks = []
layer = 0
# Lists of inputs
input_points = []
input_neighbors = []
input_pools = []
input_upsamples = []
input_batches_len = []
# construt kpfcn inds
for block_i, block in enumerate(config.architecture):
# Stop when meeting a global pooling or upsampling
if 'global' in block or 'upsample' in block:
break
# Get all blocks of the layer
if not ('pool' in block or 'strided' in block):
layer_blocks += [block]
if block_i < len(config.architecture) - 1 and not ('upsample' in config.architecture[block_i + 1]):
continue
# Convolution neighbors indices
# *****************************
if layer_blocks:
# Convolutions are done in this layer, compute the neighbors with the good radius
if np.any(['deformable' in blck for blck in layer_blocks[:-1]]):
r = r_normal * config.deform_radius / config.conv_radius
else:
r = r_normal
conv_i = batch_neighbors_kpconv(batched_points, batched_points, batched_lengths, batched_lengths, r,
neighborhood_limits[layer])
else:
# This layer only perform pooling, no neighbors required
conv_i = torch.zeros((0, 1), dtype=torch.int64)
# Pooling neighbors indices
# *************************
# If end of layer is a pooling operation
if 'pool' in block or 'strided' in block:
# New subsampling length
dl = 2 * r_normal / config.conv_radius
# Subsampled points
pool_p, pool_b = batch_grid_subsampling_kpconv(batched_points, batched_lengths, sampleDl=dl)
# Radius of pooled neighbors
if 'deformable' in block:
r = r_normal * config.deform_radius / config.conv_radius
else:
r = r_normal
# Subsample indices
pool_i = batch_neighbors_kpconv(pool_p, batched_points, pool_b, batched_lengths, r,
neighborhood_limits[layer])
# Upsample indices (with the radius of the next layer to keep wanted density)
up_i = batch_neighbors_kpconv(batched_points, pool_p, batched_lengths, pool_b, 2 * r,
neighborhood_limits[layer])
else:
# No pooling in the end of this layer, no pooling indices required
pool_i = torch.zeros((0, 1), dtype=torch.int64)
pool_p = torch.zeros((0, 3), dtype=torch.float32)
pool_b = torch.zeros((0,), dtype=torch.int64)
up_i = torch.zeros((0, 1), dtype=torch.int64)
# Updating input lists
input_points += [batched_points.float()]
input_neighbors += [conv_i.long()]
input_pools += [pool_i.long()]
input_upsamples += [up_i.long()]
input_batches_len += [batched_lengths]
# New points for next layer
batched_points = pool_p
batched_lengths = pool_b
# Update radius and reset blocks
r_normal *= 2
layer += 1
layer_blocks = []
# coarse infomation
coarse_level = config.coarse_level
pts_num_coarse = input_batches_len[coarse_level].view(-1, 2)
b_size = pts_num_coarse.shape[0]
src_pts_max, tgt_pts_max = pts_num_coarse.amax(dim=0)
coarse_pcd = input_points[coarse_level] # .numpy()
coarse_matches= []
coarse_flow = []
src_ind_coarse_split= [] # src_feats shape :[b_size * src_pts_max]
src_ind_coarse = []
tgt_ind_coarse_split= []
tgt_ind_coarse = []
accumu = 0
src_mask = torch.zeros([b_size, src_pts_max], dtype=torch.bool)
tgt_mask = torch.zeros([b_size, tgt_pts_max], dtype=torch.bool)
for entry_id, cnt in enumerate( pts_num_coarse ): #input_batches_len[-1].numpy().reshape(-1,2)) :
n_s_pts, n_t_pts = cnt
'''split mask for bottlenect feats'''
src_mask[entry_id][:n_s_pts] = 1
tgt_mask[entry_id][:n_t_pts] = 1
'''split indices of bottleneck feats'''
src_ind_coarse_split.append( torch.arange( n_s_pts ) + entry_id * src_pts_max )
tgt_ind_coarse_split.append( torch.arange( n_t_pts ) + entry_id * tgt_pts_max )
src_ind_coarse.append( torch.arange( n_s_pts ) + accumu )
tgt_ind_coarse.append( torch.arange( n_t_pts ) + accumu + n_s_pts )
'''get match at coarse level'''
c_src_pcd_np = coarse_pcd[accumu : accumu + n_s_pts].numpy()
c_tgt_pcd_np = coarse_pcd[accumu + n_s_pts: accumu + n_s_pts + n_t_pts].numpy()
#interpolate flow
f_src_pcd = batched_points_list[entry_id * 2]
c_flow = blend_scene_flow( c_src_pcd_np, f_src_pcd, sflow_list[entry_id].numpy(), knn=3)
c_src_pcd_deformed = c_src_pcd_np + c_flow
s_pc_wrapped = (np.matmul( batched_rot[entry_id].numpy(), c_src_pcd_deformed.T ) + batched_trn [entry_id].numpy()).T
coarse_match_gt = torch.from_numpy( multual_nn_correspondence(s_pc_wrapped , c_tgt_pcd_np , search_radius=config['coarse_match_radius']) )# 0.1m scaled
coarse_matches.append(coarse_match_gt)
coarse_flow.append(torch.from_numpy(c_flow) )
accumu = accumu + n_s_pts + n_t_pts
vis=False # for debug
if vis :
viz_coarse_nn_correspondence_mayavi(c_src_pcd_np, c_tgt_pcd_np, coarse_match_gt, scale_factor=0.02)
src_ind_coarse_split = torch.cat(src_ind_coarse_split)
tgt_ind_coarse_split = torch.cat(tgt_ind_coarse_split)
src_ind_coarse = torch.cat(src_ind_coarse)
tgt_ind_coarse = torch.cat(tgt_ind_coarse)
dict_inputs = {
'src_pcd_list': src_pcd_list,
'tgt_pcd_list': tgt_pcd_list,
'points': input_points,
'neighbors': input_neighbors,
'pools': input_pools,
'upsamples': input_upsamples,
'features': batched_features.float(),
'stack_lengths': input_batches_len,
'coarse_matches': coarse_matches,
'coarse_flow' : coarse_flow,
'src_mask': src_mask,
'tgt_mask': tgt_mask,
'src_ind_coarse_split': src_ind_coarse_split,
'tgt_ind_coarse_split': tgt_ind_coarse_split,
'src_ind_coarse': src_ind_coarse,
'tgt_ind_coarse': tgt_ind_coarse,
'batched_rot': batched_rot,
'batched_trn': batched_trn,
'sflow_list': sflow_list,
"metric_index_list": metric_index_list
}
return dict_inputs
def calibrate_neighbors(dataset, config, collate_fn, keep_ratio=0.8, samples_threshold=2000):
# From config parameter, compute higher bound of neighbors number in a neighborhood
hist_n = int(np.ceil(4 / 3 * np.pi * (config.deform_radius + 1) ** 3))
neighb_hists = np.zeros((config.num_layers, hist_n), dtype=np.int32)
# Get histogram of neighborhood sizes i in 1 epoch max.
for i in range(len(dataset)):
batched_input = collate_fn([dataset[i]], config, neighborhood_limits=[hist_n] * 5)
# update histogram
counts = [torch.sum(neighb_mat < neighb_mat.shape[0], dim=1).numpy() for neighb_mat in batched_input['neighbors']]
hists = [np.bincount(c, minlength=hist_n)[:hist_n] for c in counts]
neighb_hists += np.vstack(hists)
# if timer.total_time - last_display > 0.1:
# last_display = timer.total_time
# print(f"Calib Neighbors {i:08d}: timings {timer.total_time:4.2f}s")
if np.min(np.sum(neighb_hists, axis=1)) > samples_threshold:
break
cumsum = np.cumsum(neighb_hists.T, axis=0)
percentiles = np.sum(cumsum < (keep_ratio * cumsum[hist_n - 1, :]), axis=0)
neighborhood_limits = percentiles
print('\n')
return neighborhood_limits
def get_datasets(config):
if (config.dataset == '3dmatch'):
train_set = _3DMatch(config, 'train', data_augmentation=True)
val_set = _3DMatch(config, 'val', data_augmentation=False)
test_set = _3DMatch(config, 'test', data_augmentation=False)
elif(config.dataset == '4dmatch'):
train_set = _4DMatch(config, 'train', data_augmentation=True)
val_set = _4DMatch(config, 'val', data_augmentation=False)
test_set = _4DMatch(config, 'test', data_augmentation=False)
else:
raise NotImplementedError
return train_set, val_set, test_set
def get_dataloader(dataset, config, shuffle=True, neighborhood_limits=None):
if config.dataset=='4dmatch':
collate_fn = collate_fn_4dmatch
elif config.dataset == '3dmatch':
collate_fn = collate_fn_3dmatch
else:
raise NotImplementedError()
if neighborhood_limits is None:
neighborhood_limits = calibrate_neighbors(dataset, config['kpfcn_config'], collate_fn=collate_fn)
print("neighborhood:", neighborhood_limits)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=config['batch_size'],
shuffle=shuffle,
num_workers=config['num_workers'],
collate_fn=partial(collate_fn, config=config['kpfcn_config'], neighborhood_limits=neighborhood_limits ),
drop_last=False
)
return dataloader, neighborhood_limits
if __name__ == '__main__':
pass
| 24,996 | 37.875583 | 171 | py |
lepard | lepard-main/datasets/_3dmatch.py | import os, sys, glob, torch
# sys.path.append("../")
[sys.path.append(i) for i in ['.', '..']]
import numpy as np
import torch
import random
from scipy.spatial.transform import Rotation
from torch.utils.data import Dataset
from lib.benchmark_utils import to_o3d_pcd, to_tsfm, KDTree_corr
from lib.utils import load_obj
from lib.benchmark_utils import to_o3d_pcd, to_tsfm, get_correspondences
class _3DMatch(Dataset):
def __init__(self, config,split, data_augmentation=True):
super(_3DMatch, self).__init__()
assert split in ['train','val','test']
if 'overfit' in config.exp_dir:
d_slice = config.batch_size
else :
d_slice = None
self.infos = self.read_entries( config.split[split] , config.data_root, d_slice=d_slice )
self.base_dir = config.data_root
self.data_augmentation = data_augmentation
self.config = config
self.rot_factor = 1.
self.augment_noise = config.augment_noise
self.max_points = 30000
self.overlap_radius = 0.0375
def read_entries (self, split, data_root, d_slice=None, shuffle= True):
infos = load_obj(split) # we use the split prepared by Predator
if d_slice:
for k, v in infos.items():
infos[k] = v[:d_slice]
return infos
def __len__(self):
return len(self.infos['rot'])
def __getitem__(self, item, debug=False):
# get transformation
rot = self.infos['rot'][item]
trans = self.infos['trans'][item]
if 'gt_cov' in self.infos:
gt_cov = self.infos['gt_cov'][item]
else :
gt_cov = None
# get pointcloud
src_path = os.path.join(self.base_dir, self.infos['src'][item])
tgt_path = os.path.join(self.base_dir, self.infos['tgt'][item])
src_pcd = torch.load(src_path)
tgt_pcd = torch.load(tgt_path)
# if we get too many points, we do some downsampling
if (src_pcd.shape[0] > self.max_points):
idx = np.random.permutation(src_pcd.shape[0])[:self.max_points]
src_pcd = src_pcd[idx]
if (tgt_pcd.shape[0] > self.max_points):
idx = np.random.permutation(tgt_pcd.shape[0])[:self.max_points]
tgt_pcd = tgt_pcd[idx]
if debug:
import mayavi.mlab as mlab
c_red = (224. / 255., 0 / 255., 125 / 255.)
c_pink = (224. / 255., 75. / 255., 232. / 255.)
c_blue = (0. / 255., 0. / 255., 255. / 255.)
scale_factor = 0.02
# mlab.points3d(s_pc[ :, 0] , s_pc[ :, 1], s_pc[:, 2], scale_factor=scale_factor , color=c_blue)
mlab.points3d(src_pcd[ :, 0] , src_pcd[ :, 1], src_pcd[:, 2], scale_factor=scale_factor , color=c_red)
mlab.points3d(tgt_pcd[ :, 0] , tgt_pcd[ :, 1], tgt_pcd[:, 2], scale_factor=scale_factor , color=c_blue)
mlab.show()
# add gaussian noise
if self.data_augmentation:
# rotate the point cloud
euler_ab = np.random.rand(3) * np.pi * 2 / self.rot_factor # anglez, angley, anglex
rot_ab = Rotation.from_euler('zyx', euler_ab).as_matrix()
if (np.random.rand(1)[0] > 0.5):
src_pcd = np.matmul(rot_ab, src_pcd.T).T
rot = np.matmul(rot, rot_ab.T)
else:
tgt_pcd = np.matmul(rot_ab, tgt_pcd.T).T
rot = np.matmul(rot_ab, rot)
trans = np.matmul(rot_ab, trans)
src_pcd += (np.random.rand(src_pcd.shape[0], 3) - 0.5) * self.augment_noise
tgt_pcd += (np.random.rand(tgt_pcd.shape[0], 3) - 0.5) * self.augment_noise
# get correspondence at fine level
tsfm = to_tsfm(rot, trans)
correspondences = get_correspondences(to_o3d_pcd(src_pcd), to_o3d_pcd(tgt_pcd), tsfm,self.overlap_radius)
if debug:
import mayavi.mlab as mlab
c_red = (224. / 255., 0 / 255., 125 / 255.)
c_pink = (224. / 255., 75. / 255., 232. / 255.)
c_blue = (0. / 255., 0. / 255., 255. / 255.)
scale_factor = 0.02
# mlab.points3d(s_pc[ :, 0] , s_pc[ :, 1], s_pc[:, 2], scale_factor=scale_factor , color=c_blue)
mlab.points3d(src_pcd[ :, 0] , src_pcd[ :, 1], src_pcd[:, 2], scale_factor=scale_factor , color=c_red)
mlab.points3d(tgt_pcd[ :, 0] , tgt_pcd[ :, 1], tgt_pcd[:, 2], scale_factor=scale_factor , color=c_blue)
mlab.show()
if (trans.ndim == 1):
trans = trans[:, None]
src_feats = np.ones_like(src_pcd[:, :1]).astype(np.float32)
tgt_feats = np.ones_like(tgt_pcd[:, :1]).astype(np.float32)
rot = rot.astype(np.float32)
trans = trans.astype(np.float32)
return src_pcd, tgt_pcd, src_feats, tgt_feats, correspondences, rot, trans, gt_cov
if __name__ == '__main__':
from lib.utils import load_config
from easydict import EasyDict as edict
from lib.tictok import Timers
import yaml
def join(loader, node):
seq = loader.construct_sequence(node)
return '_'.join([str(i) for i in seq])
yaml.add_constructor('!join', join)
config = "/home/liyang/workspace/Regformer/configs/train/3dmatch.yaml"
with open(config,'r') as f:
config = yaml.load(f, Loader=yaml.Loader)
config = edict(config)
config.timers=Timers()
D = _3DMatch(config, "test")
for i in range (len(D)):
try:
if i%1000 == 0 :
print (i,"/",len(D))
D.__getitem__(i, debug=True)
except:
pass
# print ( D.data_entries[i] )
# print (os.remove(D.data_entries[i]) )
| 5,766 | 33.327381 | 116 | py |
lepard | lepard-main/lib/tester.py | from lib.trainer import Trainer
import torch
from tqdm import tqdm
from models.loss import MatchMotionLoss as MML
import numpy as np
from models.matching import Matching as CM
import math
class _3DMatchTester(Trainer):
"""
3DMatch tester
"""
def __init__(self,args):
Trainer.__init__(self, args)
def test(self):
n = 3
afmr = 0.
arr = 0
air = 0
for i in range(n): # combat ransac nondeterministic
thr =0.05
rr, ir, fmr = self.test_thr(thr)
afmr+=fmr
arr+=rr
air+=ir
print( "conf_threshold", thr, "registration recall:", rr, " Inlier rate:", ir, "FMR:", fmr)
print("average registration recall:", arr / n, afmr/n, air/n)
# print ("registration recall:", self.test_thr())
def test_thr(self, conf_threshold=None):
# print('Start to evaluate on test datasets...')
# os.makedirs(f'{self.snapshot_dir}/{self.config.dataset}',exist_ok=True)
num_iter = math.ceil(len(self.loader['test'].dataset) // self.loader['test'].batch_size)
c_loader_iter = self.loader['test'].__iter__()
self.model.eval()
success1 = 0.
IR=0.
FMR=0.
with torch.no_grad():
for idx in tqdm(range(num_iter)): # loop through this epoch
##################################
if self.timers: self.timers.tic('load batch')
inputs = c_loader_iter.next()
for k, v in inputs.items():
if type(v) == list:
inputs[k] = [item.to(self.device) for item in v]
elif type(v) in [dict, float, type(None), np.ndarray]:
pass
else:
inputs[k] = v.to(self.device)
if self.timers: self.timers.toc('load batch')
##################################
if self.timers: self.timers.tic('forward pass')
data = self.model(inputs, timers=self.timers) # [N1, C1], [N2, C2]
if self.timers: self.timers.toc('forward pass')
match_pred, _, _ = CM.get_match(data['conf_matrix_pred'], thr=conf_threshold, mutual=False)
rot, trn = MML.ransac_regist_coarse(data['s_pcd'], data['t_pcd'], data['src_mask'], data['tgt_mask'], match_pred)
ir = MML.compute_inlier_ratio(match_pred, data, inlier_thr=0.1).mean()
rr1 = MML.compute_registration_recall(rot, trn, data, thr=0.2) # 0.2m
vis = False
if vis:
pcd = data['points'][0].cpu().numpy()
lenth = data['stack_lengths'][0][0]
spcd, tpcd = pcd[:lenth] , pcd[lenth:]
import mayavi.mlab as mlab
c_red = (224. / 255., 0 / 255., 125 / 255.)
c_pink = (224. / 255., 75. / 255., 232. / 255.)
c_blue = (0. / 255., 0. / 255., 255. / 255.)
scale_factor = 0.02
# mlab.points3d(s_pc[ :, 0] , s_pc[ :, 1], s_pc[:, 2], scale_factor=scale_factor , color=c_blue)
mlab.points3d(spcd[:, 0], spcd[:, 1], spcd[:, 2], scale_factor=scale_factor,
color=c_red)
mlab.points3d(tpcd[:, 0], tpcd[:, 1], tpcd[:, 2], scale_factor=scale_factor,
color=c_blue)
mlab.show()
spcd = ( np.matmul(rot, spcd.T) + trn ).T
mlab.points3d(spcd[:, 0], spcd[:, 1], spcd[:, 2], scale_factor=scale_factor,
color=c_red)
mlab.points3d(tpcd[:, 0], tpcd[:, 1], tpcd[:, 2], scale_factor=scale_factor,
color=c_blue)
mlab.show()
bs = len(rot)
assert bs==1
success1 += bs * rr1
IR += bs*ir
FMR += (ir>0.05).float()
recall1 = success1/len(self.loader['test'].dataset)
IRate = IR/len(self.loader['test'].dataset)
FMR = FMR/len(self.loader['test'].dataset)
return recall1, IRate, FMR
def blend_anchor_motion (query_loc, reference_loc, reference_flow , knn=3, search_radius=0.1) :
'''approximate flow on query points
this function assume query points are sub- or un-sampled from reference locations
@param query_loc:[m,3]
@param reference_loc:[n,3]
@param reference_flow:[n,3]
@param knn:
@return:
blended_flow:[m,3]
'''
from datasets.utils import knn_point_np
dists, idx = knn_point_np (knn, reference_loc, query_loc)
dists[dists < 1e-10] = 1e-10
mask = dists>search_radius
dists[mask] = 1e+10
weight = 1.0 / dists
weight = weight / np.sum(weight, -1, keepdims=True) # [B,N,3]
blended_flow = np.sum (reference_flow [idx] * weight.reshape ([-1, knn, 1]), axis=1, keepdims=False)
mask = mask.sum(axis=1)<3
return blended_flow, mask
def compute_nrfmr( match_pred, data, recall_thr=0.04):
s_pcd, t_pcd = data['s_pcd'], data['t_pcd']
s_pcd_raw = data ['src_pcd_list']
sflow_list = data['sflow_list']
metric_index_list = data['metric_index_list']
batched_rot = data['batched_rot'] # B,3,3
batched_trn = data['batched_trn']
nrfmr = 0.
for i in range ( len(s_pcd_raw)):
# get the metric points' transformed position
metric_index = metric_index_list[i]
sflow = sflow_list[i]
s_pcd_raw_i = s_pcd_raw[i]
metric_pcd = s_pcd_raw_i [ metric_index ]
metric_sflow = sflow [ metric_index ]
metric_pcd_deformed = metric_pcd + metric_sflow
metric_pcd_wrapped_gt = ( torch.matmul( batched_rot[i], metric_pcd_deformed.T) + batched_trn[i] ).T
# use the match prediction as the motion anchor
match_pred_i = match_pred[ match_pred[:, 0] == i ]
s_id , t_id = match_pred_i[:,1], match_pred_i[:,2]
s_pcd_matched= s_pcd[i][s_id]
t_pcd_matched= t_pcd[i][t_id]
motion_pred = t_pcd_matched - s_pcd_matched
metric_motion_pred, valid_mask = blend_anchor_motion(
metric_pcd.cpu().numpy(), s_pcd_matched.cpu().numpy(), motion_pred.cpu().numpy(), knn=3, search_radius=0.1)
metric_pcd_wrapped_pred = metric_pcd + torch.from_numpy(metric_motion_pred).to(metric_pcd)
debug = False
if debug:
import mayavi.mlab as mlab
c_red = (224. / 255., 0 / 255., 125 / 255.)
c_pink = (224. / 255., 75. / 255., 232. / 255.)
c_blue = (0. / 255., 0. / 255., 255. / 255.)
scale_factor = 0.013
metric_pcd_wrapped_gt = metric_pcd_wrapped_gt.cpu()
metric_pcd_wrapped_pred = metric_pcd_wrapped_pred.cpu()
err = metric_pcd_wrapped_pred - metric_pcd_wrapped_gt
mlab.points3d(metric_pcd_wrapped_gt[:, 0], metric_pcd_wrapped_gt[:, 1], metric_pcd_wrapped_gt[:, 2], scale_factor=scale_factor, color=c_pink)
mlab.points3d(metric_pcd_wrapped_pred[ :, 0] , metric_pcd_wrapped_pred[ :, 1], metric_pcd_wrapped_pred[:, 2], scale_factor=scale_factor , color=c_blue)
mlab.quiver3d(metric_pcd_wrapped_gt[:, 0], metric_pcd_wrapped_gt[:, 1], metric_pcd_wrapped_gt[:, 2], err[:, 0], err[:, 1], err[:, 2],
scale_factor=1, mode='2ddash', line_width=1.)
mlab.show()
dist = torch.sqrt( torch.sum( (metric_pcd_wrapped_pred - metric_pcd_wrapped_gt)**2, dim=1 ) )
r = (dist < recall_thr).float().sum() / len(dist)
nrfmr = nrfmr + r
nrfmr = nrfmr /len(s_pcd_raw)
return nrfmr
class _4DMatchTester(Trainer):
"""
3DMatch tester
"""
def __init__(self,args):
Trainer.__init__(self, args)
def test(self):
for thr in [ 0.05, 0.1, 0.2]:
# for thr in [ 0.1 ]:
import time
start = time.time()
ir, fmr, nspl = self.test_thr(thr)
print( "conf_threshold", thr, "NFMR:", fmr, " Inlier rate:", ir, "Number sample:", nspl)
print( "time costs:", time.time() - start)
def test_thr(self, conf_threshold=None):
num_iter = math.ceil(len(self.loader['test'].dataset) // self.loader['test'].batch_size)
c_loader_iter = self.loader['test'].__iter__()
self.model.eval()
assert self.loader['test'].batch_size == 1
IR=0.
NR_FMR=0.
inlier_thr = recall_thr = 0.04
n_sample = 0.
with torch.no_grad():
for idx in tqdm(range(num_iter)): # loop through this epoch
##################################
if self.timers: self.timers.tic('load batch')
inputs = c_loader_iter.next()
for k, v in inputs.items():
if type(v) == list:
inputs[k] = [item.to(self.device) for item in v]
elif type(v) in [ dict, float, type(None), np.ndarray]:
pass
else:
inputs[k] = v.to(self.device)
if self.timers: self.timers.toc('load batch')
##################################
if self.timers: self.timers.tic('forward pass')
data = self.model(inputs, timers=self.timers) # [N1, C1], [N2, C2]
if self.timers: self.timers.toc('forward pass')
match_pred, _, _ = CM.get_match(data['conf_matrix_pred'], thr=conf_threshold, mutual=True)
ir = MML.compute_inlier_ratio(match_pred, data, inlier_thr=inlier_thr, s2t_flow=data['coarse_flow'][0][None] )[0]
nrfmr = compute_nrfmr(match_pred, data, recall_thr=recall_thr)
IR += ir
NR_FMR += nrfmr
n_sample += match_pred.shape[0]
IRate = IR/len(self.loader['test'].dataset)
NR_FMR = NR_FMR/len(self.loader['test'].dataset)
n_sample = n_sample/len(self.loader['test'].dataset)
if self.timers: self.timers.print()
return IRate, NR_FMR, n_sample
def get_trainer(config):
if config.dataset == '3dmatch':
return _3DMatchTester(config)
elif config.dataset == '4dmatch':
return _4DMatchTester(config)
else:
raise NotImplementedError
| 10,544 | 34.385906 | 164 | py |
lepard | lepard-main/lib/benchmark_utils.py | import os,re,sys,json,yaml,random, glob, argparse, torch, pickle
from tqdm import tqdm
import numpy as np
from scipy.spatial.transform import Rotation
import open3d as o3d
_EPS = 1e-7 # To prevent division by zero
def viz_coarse_nn_correspondence_mayavi(s_pc, t_pc, good_c, bad_c, f_src_pcd=None, f_tgt_pcd=None, scale_factor=0.02):
'''
@param s_pc: [S,3]
@param t_pc: [T,3]
@param correspondence: [2,K]
@param f_src_pcd: [S1,3]
@param f_tgt_pcd: [T1,3]
@param scale_factor:
@return:
'''
import mayavi.mlab as mlab
c_red = (224. / 255., 0 / 255., 0 / 255.)
c_pink = (224. / 255., 75. / 255., 232. / 255.)
c_blue = (0. / 255., 0. / 255., 255. / 255.)
c_green = (0. / 255., 255. / 255., 0. / 255.)
c_gray1 = (255 / 255., 255 / 255., 125 / 255.)
c_gray2 = (125. / 255., 125. / 255., 255. / 255.)
if f_src_pcd is not None:
mlab.points3d(f_src_pcd[:, 0], f_src_pcd[:, 1], f_src_pcd[:, 2], scale_factor=scale_factor * 0.25,
color=c_gray1)
else:
mlab.points3d(s_pc[:, 0], s_pc[:, 1], s_pc[:, 2], scale_factor=scale_factor * 0.75, color=c_gray1)
if f_tgt_pcd is not None:
mlab.points3d(f_tgt_pcd[:, 0], f_tgt_pcd[:, 1], f_tgt_pcd[:, 2], scale_factor=scale_factor * 0.25,
color=c_gray2)
else:
mlab.points3d(t_pc[:, 0], t_pc[:, 1], t_pc[:, 2], scale_factor=scale_factor * 0.75, color=c_gray2)
s_cpts_god = s_pc[good_c[0]]
t_cpts_god = t_pc[good_c[1]]
flow_good = t_cpts_god - s_cpts_god
s_cpts_bd = s_pc[bad_c[0]]
t_cpts_bd = t_pc[bad_c[1]]
flow_bad = t_cpts_bd - s_cpts_bd
def match_draw(s_cpts, t_cpts, flow, color):
mlab.points3d(s_cpts[:, 0], s_cpts[:, 1], s_cpts[:, 2], scale_factor=scale_factor * 0.35, color=c_blue)
mlab.points3d(t_cpts[:, 0], t_cpts[:, 1], t_cpts[:, 2], scale_factor=scale_factor * 0.35, color=c_pink)
mlab.quiver3d(s_cpts[:, 0], s_cpts[:, 1], s_cpts[:, 2], flow[:, 0], flow[:, 1], flow[:, 2],
scale_factor=1, mode='2ddash', line_width=1., color=color)
match_draw(s_cpts_god, t_cpts_god, flow_good, c_green)
match_draw(s_cpts_bd, t_cpts_bd, flow_bad, c_red)
mlab.show()
def correspondence_viz(src_raw, tgt_raw, src_pcd, tgt_pcd, corrs, inlier_mask, max=200):
perm = np.random.permutation(corrs.shape[1])
ind = perm[:max]
corrs = corrs[:, ind]
inlier_mask = inlier_mask[ind]
good_c = corrs[:, inlier_mask]
bad_c = corrs[:, ~inlier_mask]
offset = np.array([[1.45, 0, 0]])
# src_pcd = src_pcd + offset
# src_raw = src_raw + offset
tgt_pcd = tgt_pcd + offset
tgt_raw = tgt_raw + offset
viz_coarse_nn_correspondence_mayavi(src_pcd, tgt_pcd, good_c, bad_c, src_raw, tgt_raw, scale_factor=0.07)
def fmr_wrt_distance(data,split,inlier_ratio_threshold=0.05):
"""
calculate feature match recall wrt distance threshold
"""
fmr_wrt_distance =[]
for distance_threshold in range(1,21):
inlier_ratios =[]
distance_threshold /=100.0
for idx in range(data.shape[0]):
inlier_ratio = (data[idx] < distance_threshold).mean()
inlier_ratios.append(inlier_ratio)
fmr = 0
for ele in split:
fmr += (np.array(inlier_ratios[ele[0]:ele[1]]) > inlier_ratio_threshold).mean()
fmr /= 8
fmr_wrt_distance.append(fmr*100)
return fmr_wrt_distance
def fmr_wrt_inlier_ratio(data, split, distance_threshold=0.1):
"""
calculate feature match recall wrt inlier ratio threshold
"""
fmr_wrt_inlier =[]
for inlier_ratio_threshold in range(1,21):
inlier_ratios =[]
inlier_ratio_threshold /=100.0
for idx in range(data.shape[0]):
inlier_ratio = (data[idx] < distance_threshold).mean()
inlier_ratios.append(inlier_ratio)
fmr = 0
for ele in split:
fmr += (np.array(inlier_ratios[ele[0]:ele[1]]) > inlier_ratio_threshold).mean()
fmr /= 8
fmr_wrt_inlier.append(fmr*100)
return fmr_wrt_inlier
def to_tensor(array):
"""
Convert array to tensor
"""
if(not isinstance(array,torch.Tensor)):
return torch.from_numpy(array).float()
else:
return array
def to_array(tensor):
"""
Conver tensor to array
"""
if(not isinstance(tensor,np.ndarray)):
if(tensor.device == torch.device('cpu')):
return tensor.numpy()
else:
return tensor.cpu().numpy()
else:
return tensor
def to_tsfm(rot,trans):
tsfm = np.eye(4)
tsfm[:3,:3]=rot
tsfm[:3,3]=trans.flatten()
return tsfm
def to_o3d_pcd(xyz):
"""
Convert tensor/array to open3d PointCloud
xyz: [N, 3]
"""
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(to_array(xyz))
return pcd
def to_o3d_feats(embedding):
"""
Convert tensor/array to open3d features
embedding: [N, 3]
"""
feats = o3d.registration.Feature()
feats.data = to_array(embedding).T
return feats
def get_correspondences(src_pcd, tgt_pcd, trans, search_voxel_size, K=None):
src_pcd.transform(trans)
correspondences = KDTree_corr ( src_pcd, tgt_pcd, search_voxel_size, K=None)
correspondences = torch.from_numpy(correspondences)
return correspondences
def KDTree_corr ( src_pcd_transformed, tgt_pcd, search_voxel_size, K=None):
pcd_tree = o3d.geometry.KDTreeFlann(tgt_pcd)
correspondences = []
for i, point in enumerate(src_pcd_transformed.points):
[count, idx, _] = pcd_tree.search_radius_vector_3d(point, search_voxel_size)
if K is not None:
idx = idx[:K]
for j in idx:
correspondences.append([i, j])
correspondences = np.array(correspondences)
return correspondences
def get_blue():
"""
Get color blue for rendering
"""
return [0, 0.651, 0.929]
def get_yellow():
"""
Get color yellow for rendering
"""
return [1, 0.706, 0]
def random_sample(pcd, feats, N):
"""
Do random sampling to get exact N points and associated features
pcd: [N,3]
feats: [N,C]
"""
if(isinstance(pcd,torch.Tensor)):
n1 = pcd.size(0)
elif(isinstance(pcd, np.ndarray)):
n1 = pcd.shape[0]
if n1 == N:
return pcd, feats
if n1 > N:
choice = np.random.permutation(n1)[:N]
else:
choice = np.random.choice(n1, N)
return pcd[choice], feats[choice]
def get_angle_deviation(R_pred,R_gt):
"""
Calculate the angle deviation between two rotaion matrice
The rotation error is between [0,180]
Input:
R_pred: [B,3,3]
R_gt : [B,3,3]
Return:
degs: [B]
"""
R=np.matmul(R_pred,R_gt.transpose(0,2,1))
tr=np.trace(R,0,1,2)
rads=np.arccos(np.clip((tr-1)/2,-1,1)) # clip to valid range
degs=rads/np.pi*180
return degs
def ransac_pose_estimation(src_pcd, tgt_pcd, src_feat, tgt_feat, mutual = False, distance_threshold = 0.05, ransac_n = 3):
"""
RANSAC pose estimation with two checkers
We follow D3Feat to set ransac_n = 3 for 3DMatch and ransac_n = 4 for KITTI.
For 3DMatch dataset, we observe significant improvement after changing ransac_n from 4 to 3.
"""
if(mutual):
if(torch.cuda.device_count()>=1):
device = torch.device('cuda')
else:
device = torch.device('cpu')
src_feat, tgt_feat = to_tensor(src_feat), to_tensor(tgt_feat)
scores = torch.matmul(src_feat.to(device), tgt_feat.transpose(0,1).to(device)).cpu()
selection = mutual_selection(scores[None,:,:])[0]
row_sel, col_sel = np.where(selection)
corrs = o3d.utility.Vector2iVector(np.array([row_sel,col_sel]).T)
src_pcd = to_o3d_pcd(src_pcd)
tgt_pcd = to_o3d_pcd(tgt_pcd)
result_ransac = o3d.registration.registration_ransac_based_on_correspondence(
source=src_pcd, target=tgt_pcd,corres=corrs,
max_correspondence_distance=distance_threshold,
estimation_method=o3d.registration.TransformationEstimationPointToPoint(False),
ransac_n=4,
criteria=o3d.registration.RANSACConvergenceCriteria(50000, 1000))
else:
src_pcd = to_o3d_pcd(src_pcd)
tgt_pcd = to_o3d_pcd(tgt_pcd)
src_feats = to_o3d_feats(src_feat)
tgt_feats = to_o3d_feats(tgt_feat)
result_ransac = o3d.registration.registration_ransac_based_on_feature_matching(
src_pcd, tgt_pcd, src_feats, tgt_feats,distance_threshold,
o3d.registration.TransformationEstimationPointToPoint(False), ransac_n,
[o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),
o3d.registration.CorrespondenceCheckerBasedOnDistance(distance_threshold)],
o3d.registration.RANSACConvergenceCriteria(50000, 1000))
return result_ransac.transformation
def get_inlier_ratio(src_pcd, tgt_pcd, src_feat, tgt_feat, rot, trans, inlier_distance_threshold = 0.1):
"""
Compute inlier ratios with and without mutual check, return both
"""
src_pcd = to_tensor(src_pcd)
tgt_pcd = to_tensor(tgt_pcd)
src_feat = to_tensor(src_feat)
tgt_feat = to_tensor(tgt_feat)
rot, trans = to_tensor(rot), to_tensor(trans)
results =dict()
results['w']=dict()
results['wo']=dict()
if(torch.cuda.device_count()>=1):
device = torch.device('cuda')
else:
device = torch.device('cpu')
src_pcd = (torch.matmul(rot, src_pcd.transpose(0,1)) + trans).transpose(0,1)
scores = torch.matmul(src_feat.to(device), tgt_feat.transpose(0,1).to(device)).cpu()
########################################
# 1. calculate inlier ratios wo mutual check
_, idx = scores.max(-1)
dist = torch.norm(src_pcd- tgt_pcd[idx],dim=1)
results['wo']['distance'] = dist.numpy()
c_inlier_ratio = (dist < inlier_distance_threshold).float().mean()
results['wo']['inlier_ratio'] = c_inlier_ratio
########################################
# 2. calculate inlier ratios w mutual check
selection = mutual_selection(scores[None,:,:])[0]
row_sel, col_sel = np.where(selection)
dist = torch.norm(src_pcd[row_sel]- tgt_pcd[col_sel],dim=1)
results['w']['distance'] = dist.numpy()
c_inlier_ratio = (dist < inlier_distance_threshold).float().mean()
results['w']['inlier_ratio'] = c_inlier_ratio
return results
def mutual_selection(score_mat):
"""
Return a {0,1} matrix, the element is 1 if and only if it's maximum along both row and column
Args: np.array()
score_mat: [B,N,N]
Return:
mutuals: [B,N,N]
"""
score_mat=to_array(score_mat)
if(score_mat.ndim==2):
score_mat=score_mat[None,:,:]
mutuals=np.zeros_like(score_mat)
for i in range(score_mat.shape[0]): # loop through the batch
c_mat=score_mat[i]
flag_row=np.zeros_like(c_mat)
flag_column=np.zeros_like(c_mat)
max_along_row=np.argmax(c_mat,1)[:,None]
max_along_column=np.argmax(c_mat,0)[None,:]
np.put_along_axis(flag_row,max_along_row,1,1)
np.put_along_axis(flag_column,max_along_column,1,0)
mutuals[i]=(flag_row.astype(np.bool)) & (flag_column.astype(np.bool))
return mutuals.astype(np.bool)
| 11,442 | 31.882184 | 122 | py |
lepard | lepard-main/lib/utils.py | import os,re,sys,json,yaml,random, argparse, torch, pickle
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from scipy.spatial.transform import Rotation
from sklearn.neighbors import NearestNeighbors
from scipy.spatial.distance import minkowski
_EPS = 1e-7 # To prevent division by zero
class Logger:
def __init__(self, path):
self.path = path
log_path = self.path + '/log'
if os.path.exists(log_path):
os.remove(log_path)
self.fw = open(log_path,'a')
def write(self, text):
self.fw.write(text)
self.fw.flush()
def close(self):
self.fw.close()
def save_obj(obj, path ):
"""
save a dictionary to a pickle file
"""
with open(path, 'wb') as f:
pickle.dump(obj, f)
def load_obj(path):
"""
read a dictionary from a pickle file
"""
with open(path, 'rb') as f:
return pickle.load(f)
def load_config(path):
"""
Loads config file:
Args:
path (str): path to the config file
Returns:
config (dict): dictionary of the configuration parameters, merge sub_dicts
"""
with open(path,'r') as f:
cfg = yaml.safe_load(f)
config = dict()
for key, value in cfg.items():
for k,v in value.items():
config[k] = v
return config
def setup_seed(seed):
"""
fix random seed for deterministic training
"""
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def square_distance(src, dst, normalised = False):
"""
Calculate Euclid distance between each two points.
Args:
src: source points, [B, N, C]
dst: target points, [B, M, C]
Returns:
dist: per-point square distance, [B, N, M]
"""
B, N, _ = src.shape
_, M, _ = dst.shape
dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))
if(normalised):
dist += 2
else:
dist += torch.sum(src ** 2, dim=-1)[:, :, None]
dist += torch.sum(dst ** 2, dim=-1)[:, None, :]
dist = torch.clamp(dist, min=1e-12, max=None)
return dist
def validate_gradient(model):
"""
Confirm all the gradients are non-nan and non-inf
"""
for name, param in model.named_parameters():
if param.grad is not None:
if torch.any(torch.isnan(param.grad)):
return False
if torch.any(torch.isinf(param.grad)):
return False
return True
def natural_key(string_):
"""
Sort strings by numbers in the name
"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)] | 2,759 | 23.424779 | 82 | py |
lepard | lepard-main/lib/trainer.py | import gc
import os
import torch
import torch.nn as nn
import numpy as np
from tensorboardX import SummaryWriter
from tqdm import tqdm
from lib.timer import AverageMeter
from lib.utils import Logger, validate_gradient
from lib.tictok import Timers
class Trainer(object):
def __init__(self, args):
self.config = args
# parameters
self.start_epoch = 1
self.max_epoch = args.max_epoch
self.save_dir = args.save_dir
self.device = args.device
self.verbose = args.verbose
self.model = args.model
self.model = self.model.to(self.device)
self.optimizer = args.optimizer
self.scheduler = args.scheduler
self.scheduler_freq = args.scheduler_freq
self.snapshot_dir = args.snapshot_dir
self.iter_size = args.iter_size
self.verbose_freq = args.verbose_freq // args.batch_size + 1
if 'overfit' in self.config.exp_dir:
self.verbose_freq = 1
self.loss = args.desc_loss
self.best_loss = 1e5
self.best_recall = -1e5
self.summary_writer = SummaryWriter(log_dir=args.tboard_dir)
self.logger = Logger(args.snapshot_dir)
self.logger.write(f'#parameters {sum([x.nelement() for x in self.model.parameters()]) / 1000000.} M\n')
if (args.pretrain != ''):
self._load_pretrain(args.pretrain)
self.loader = dict()
self.loader['train'] = args.train_loader
self.loader['val'] = args.val_loader
self.loader['test'] = args.test_loader
self.timers = args.timers
with open(f'{args.snapshot_dir}/model', 'w') as f:
f.write(str(self.model))
f.close()
def _snapshot(self, epoch, name=None):
state = {
'epoch': epoch,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict(),
'best_loss': self.best_loss,
'best_recall': self.best_recall
}
if name is None:
filename = os.path.join(self.save_dir, f'model_{epoch}.pth')
else:
filename = os.path.join(self.save_dir, f'model_{name}.pth')
self.logger.write(f"Save model to {filename}\n")
torch.save(state, filename, _use_new_zipfile_serialization=False)
def _load_pretrain(self, resume):
print ("loading pretrained", resume)
if os.path.isfile(resume):
state = torch.load(resume)
self.model.load_state_dict(state['state_dict'])
self.start_epoch = state['epoch']
self.scheduler.load_state_dict(state['scheduler'])
self.optimizer.load_state_dict(state['optimizer'])
self.best_loss = state['best_loss']
self.best_recall = state['best_recall']
self.logger.write(f'Successfully load pretrained model from {resume}!\n')
self.logger.write(f'Current best loss {self.best_loss}\n')
self.logger.write(f'Current best recall {self.best_recall}\n')
else:
raise ValueError(f"=> no checkpoint found at '{resume}'")
def _get_lr(self, group=0):
return self.optimizer.param_groups[group]['lr']
def inference_one_batch(self, inputs, phase):
assert phase in ['train', 'val', 'test']
inputs ['phase'] = phase
if (phase == 'train'):
self.model.train()
if self.timers: self.timers.tic('forward pass')
data = self.model(inputs, timers=self.timers) # [N1, C1], [N2, C2]
if self.timers: self.timers.toc('forward pass')
if self.timers: self.timers.tic('compute loss')
loss_info = self.loss( data)
if self.timers: self.timers.toc('compute loss')
if self.timers: self.timers.tic('backprop')
loss_info['loss'].backward()
if self.timers: self.timers.toc('backprop')
else:
self.model.eval()
with torch.no_grad():
data = self.model(inputs, timers=self.timers) # [N1, C1], [N2, C2]
loss_info = self.loss(data)
return loss_info
def inference_one_epoch(self, epoch, phase):
gc.collect()
assert phase in ['train', 'val', 'test']
# init stats meter
stats_meter = None # self.stats_meter()
num_iter = int(len(self.loader[phase].dataset) // self.loader[phase].batch_size) # drop last incomplete batch
c_loader_iter = self.loader[phase].__iter__()
self.optimizer.zero_grad()
for c_iter in tqdm(range(num_iter)): # loop through this epoch
if self.timers: self.timers.tic('one_iteration')
##################################
if self.timers: self.timers.tic('load batch')
inputs = c_loader_iter.next()
# for gpu_div_i, _ in enumerate(inputs):
for k, v in inputs.items():
if type(v) == list:
inputs [k] = [item.to(self.device) for item in v]
elif type(v) in [ dict, float, type(None), np.ndarray]:
pass
else:
inputs [k] = v.to(self.device)
if self.timers: self.timers.toc('load batch')
##################################
if self.timers: self.timers.tic('inference_one_batch')
loss_info = self.inference_one_batch(inputs, phase)
if self.timers: self.timers.toc('inference_one_batch')
###################################################
# run optimisation
# if self.timers: self.timers.tic('run optimisation')
if ((c_iter + 1) % self.iter_size == 0 and phase == 'train'):
gradient_valid = validate_gradient(self.model)
if (gradient_valid):
self.optimizer.step()
else:
self.logger.write('gradient not valid\n')
self.optimizer.zero_grad()
# if self.timers: self.timers.toc('run optimisation')
################################
torch.cuda.empty_cache()
if stats_meter is None:
stats_meter = dict()
for key, _ in loss_info.items():
stats_meter[key] = AverageMeter()
for key, value in loss_info.items():
stats_meter[key].update(value)
if phase == 'train' :
if (c_iter + 1) % self.verbose_freq == 0 and self.verbose :
curr_iter = num_iter * (epoch - 1) + c_iter
for key, value in stats_meter.items():
self.summary_writer.add_scalar(f'{phase}/{key}', value.avg, curr_iter)
dump_mess=True
if dump_mess:
message = f'{phase} Epoch: {epoch} [{c_iter + 1:4d}/{num_iter}]'
for key, value in stats_meter.items():
message += f'{key}: {value.avg:.2f}\t'
self.logger.write(message + '\n')
if self.timers: self.timers.toc('one_iteration')
# report evaluation score at end of each epoch
if phase in ['val', 'test']:
for key, value in stats_meter.items():
self.summary_writer.add_scalar(f'{phase}/{key}', value.avg, epoch)
message = f'{phase} Epoch: {epoch}'
for key, value in stats_meter.items():
message += f'{key}: {value.avg:.2f}\t'
self.logger.write(message + '\n')
return stats_meter
def train(self):
print('start training...')
for epoch in range(self.start_epoch, self.max_epoch):
with torch.autograd.set_detect_anomaly(True):
if self.timers: self.timers.tic('run one epoch')
stats_meter = self.inference_one_epoch(epoch, 'train')
if self.timers: self.timers.toc('run one epoch')
self.scheduler.step()
if 'overfit' in self.config.exp_dir :
if stats_meter['loss'].avg < self.best_loss:
self.best_loss = stats_meter['loss'].avg
self._snapshot(epoch, 'best_loss')
if self.timers: self.timers.print()
else : # no validation step for overfitting
if self.config.do_valid:
stats_meter = self.inference_one_epoch(epoch, 'val')
if stats_meter['loss'].avg < self.best_loss:
self.best_loss = stats_meter['loss'].avg
self._snapshot(epoch, 'best_loss')
if self.timers: self.timers.print()
# finish all epoch
print("Training finish!") | 8,861 | 34.590361 | 117 | py |
sngan.pytorch | sngan.pytorch-master/test.py | # -*- coding: utf-8 -*-
# @Date : 2019-07-25
# @Author : Xinyu Gong ([email protected])
# @Link : None
# @Version : 0.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cfg
import models
from functions import validate
from utils.utils import set_log_dir, create_logger
from utils.inception_score import _init_inception
from utils.fid_score import create_inception_graph, check_or_download_inception
import torch
import os
import numpy as np
from tensorboardX import SummaryWriter
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
def main():
args = cfg.parse_args()
torch.cuda.manual_seed(args.random_seed)
assert args.exp_name
assert args.load_path.endswith('.pth')
assert os.path.exists(args.load_path)
args.path_helper = set_log_dir('logs_eval', args.exp_name)
logger = create_logger(args.path_helper['log_path'], phase='test')
# set tf env
_init_inception()
inception_path = check_or_download_inception(None)
create_inception_graph(inception_path)
# import network
gen_net = eval('models.'+args.model+'.Generator')(args=args).cuda()
# fid stat
if args.dataset.lower() == 'cifar10':
fid_stat = 'fid_stat/fid_stats_cifar10_train.npz'
else:
raise NotImplementedError(f'no fid stat for {args.dataset.lower()}')
assert os.path.exists(fid_stat)
# initial
fixed_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (25, args.latent_dim)))
# set writer
logger.info(f'=> resuming from {args.load_path}')
checkpoint_file = args.load_path
assert os.path.exists(checkpoint_file)
checkpoint = torch.load(checkpoint_file)
if 'avg_gen_state_dict' in checkpoint:
gen_net.load_state_dict(checkpoint['avg_gen_state_dict'])
epoch = checkpoint['epoch']
logger.info(f'=> loaded checkpoint {checkpoint_file} (epoch {epoch})')
else:
gen_net.load_state_dict(checkpoint)
logger.info(f'=> loaded checkpoint {checkpoint_file}')
logger.info(args)
writer_dict = {
'writer': SummaryWriter(args.path_helper['log_path']),
'valid_global_steps': 0,
}
inception_score, fid_score = validate(args, fixed_z, fid_stat, gen_net, writer_dict)
logger.info(f'Inception score: {inception_score}, FID score: {fid_score}.')
if __name__ == '__main__':
main()
| 2,425 | 29.708861 | 88 | py |
sngan.pytorch | sngan.pytorch-master/functions.py | # -*- coding: utf-8 -*-
# @Date : 2019-07-25
# @Author : Xinyu Gong ([email protected])
# @Link : None
# @Version : 0.0
import os
import numpy as np
import torch
import torch.nn as nn
from torchvision.utils import make_grid
from imageio import imsave
from tqdm import tqdm
from copy import deepcopy
import logging
from utils.inception_score import get_inception_score
from utils.fid_score import calculate_fid_given_paths
logger = logging.getLogger(__name__)
def train(args, gen_net: nn.Module, dis_net: nn.Module, gen_optimizer, dis_optimizer, gen_avg_param, train_loader, epoch,
writer_dict, schedulers=None):
writer = writer_dict['writer']
gen_step = 0
# train mode
gen_net = gen_net.train()
dis_net = dis_net.train()
for iter_idx, (imgs, _) in enumerate(tqdm(train_loader)):
global_steps = writer_dict['train_global_steps']
# Adversarial ground truths
real_imgs = imgs.type(torch.cuda.FloatTensor)
# Sample noise as generator input
z = torch.cuda.FloatTensor(np.random.normal(0, 1, (imgs.shape[0], args.latent_dim)))
# ---------------------
# Train Discriminator
# ---------------------
dis_optimizer.zero_grad()
real_validity = dis_net(real_imgs)
fake_imgs = gen_net(z).detach()
assert fake_imgs.size() == real_imgs.size()
fake_validity = dis_net(fake_imgs)
# cal loss
d_loss = torch.mean(nn.ReLU(inplace=True)(1.0 - real_validity)) + \
torch.mean(nn.ReLU(inplace=True)(1 + fake_validity))
d_loss.backward()
dis_optimizer.step()
writer.add_scalar('d_loss', d_loss.item(), global_steps)
# -----------------
# Train Generator
# -----------------
if global_steps % args.n_critic == 0:
gen_optimizer.zero_grad()
gen_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.gen_batch_size, args.latent_dim)))
gen_imgs = gen_net(gen_z)
fake_validity = dis_net(gen_imgs)
# cal loss
g_loss = -torch.mean(fake_validity)
g_loss.backward()
gen_optimizer.step()
# adjust learning rate
if schedulers:
gen_scheduler, dis_scheduler = schedulers
g_lr = gen_scheduler.step(global_steps)
d_lr = dis_scheduler.step(global_steps)
writer.add_scalar('LR/g_lr', g_lr, global_steps)
writer.add_scalar('LR/d_lr', d_lr, global_steps)
# moving average weight
for p, avg_p in zip(gen_net.parameters(), gen_avg_param):
avg_p.mul_(0.999).add_(0.001, p.data)
writer.add_scalar('g_loss', g_loss.item(), global_steps)
gen_step += 1
# verbose
if gen_step and iter_idx % args.print_freq == 0:
tqdm.write(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]" %
(epoch, args.max_epoch, iter_idx % len(train_loader), len(train_loader), d_loss.item(), g_loss.item()))
writer_dict['train_global_steps'] = global_steps + 1
def validate(args, fixed_z, fid_stat, gen_net: nn.Module, writer_dict):
writer = writer_dict['writer']
global_steps = writer_dict['valid_global_steps']
# eval mode
gen_net = gen_net.eval()
# generate images
sample_imgs = gen_net(fixed_z)
img_grid = make_grid(sample_imgs, nrow=5, normalize=True, scale_each=True)
# get fid and inception score
fid_buffer_dir = os.path.join(args.path_helper['sample_path'], 'fid_buffer')
os.makedirs(fid_buffer_dir)
eval_iter = args.num_eval_imgs // args.eval_batch_size
img_list = list()
for iter_idx in tqdm(range(eval_iter), desc='sample images'):
z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.eval_batch_size, args.latent_dim)))
# Generate a batch of images
gen_imgs = gen_net(z).mul_(127.5).add_(127.5).clamp_(0.0, 255.0).permute(0, 2, 3, 1).to('cpu', torch.uint8).numpy()
for img_idx, img in enumerate(gen_imgs):
file_name = os.path.join(fid_buffer_dir, f'iter{iter_idx}_b{img_idx}.png')
imsave(file_name, img)
img_list.extend(list(gen_imgs))
# get inception score
logger.info('=> calculate inception score')
mean, std = get_inception_score(img_list)
# get fid score
logger.info('=> calculate fid score')
fid_score = calculate_fid_given_paths([fid_buffer_dir, fid_stat], inception_path=None)
os.system('rm -r {}'.format(fid_buffer_dir))
writer.add_image('sampled_images', img_grid, global_steps)
writer.add_scalar('Inception_score/mean', mean, global_steps)
writer.add_scalar('Inception_score/std', std, global_steps)
writer.add_scalar('FID_score', fid_score, global_steps)
writer_dict['valid_global_steps'] = global_steps + 1
return mean, fid_score
class LinearLrDecay(object):
def __init__(self, optimizer, start_lr, end_lr, decay_start_step, decay_end_step):
assert start_lr > end_lr
self.optimizer = optimizer
self.delta = (start_lr - end_lr) / (decay_end_step - decay_start_step)
self.decay_start_step = decay_start_step
self.decay_end_step = decay_end_step
self.start_lr = start_lr
self.end_lr = end_lr
def step(self, current_step):
if current_step <= self.decay_start_step:
lr = self.start_lr
elif current_step >= self.decay_end_step:
lr = self.end_lr
else:
lr = self.start_lr - self.delta * (current_step - self.decay_start_step)
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
return lr
def load_params(model, new_param):
for p, new_p in zip(model.parameters(), new_param):
p.data.copy_(new_p)
def copy_params(model):
flatten = deepcopy(list(p.data for p in model.parameters()))
return flatten
| 6,022 | 32.837079 | 123 | py |
sngan.pytorch | sngan.pytorch-master/datasets.py | import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.data import Dataset
class ImageDataset(object):
def __init__(self, args):
if args.dataset.lower() == 'cifar10':
Dt = datasets.CIFAR10
transform = transforms.Compose([
transforms.Resize(args.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
args.n_classes = 10
elif args.dataset.lower() == 'stl10':
Dt = datasets.STL10
transform = transforms.Compose([
transforms.Resize(args.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
else:
raise NotImplementedError('Unknown dataset: {}'.format(args.dataset))
if args.dataset.lower() == 'stl10':
self.train = torch.utils.data.DataLoader(
Dt(root=args.data_path, split='train+unlabeled', transform=transform, download=True),
batch_size=args.dis_batch_size, shuffle=True,
num_workers=args.num_workers, pin_memory=True)
self.valid = torch.utils.data.DataLoader(
Dt(root=args.data_path, split='test', transform=transform),
batch_size=args.dis_batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True)
self.test = self.valid
else:
self.train = torch.utils.data.DataLoader(
Dt(root=args.data_path, train=True, transform=transform, download=True),
batch_size=args.dis_batch_size, shuffle=True,
num_workers=args.num_workers, pin_memory=True)
self.valid = torch.utils.data.DataLoader(
Dt(root=args.data_path, train=False, transform=transform),
batch_size=args.dis_batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True)
self.test = self.valid
| 2,115 | 40.490196 | 101 | py |
sngan.pytorch | sngan.pytorch-master/train.py | # -*- coding: utf-8 -*-
# @Date : 2019-07-25
# @Author : Xinyu Gong ([email protected])
# @Link : None
# @Version : 0.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cfg
import models
import datasets
from functions import train, validate, LinearLrDecay, load_params, copy_params
from utils.utils import set_log_dir, save_checkpoint, create_logger
from utils.inception_score import _init_inception
from utils.fid_score import create_inception_graph, check_or_download_inception
import torch
import os
import numpy as np
import torch.nn as nn
from tensorboardX import SummaryWriter
from tqdm import tqdm
from copy import deepcopy
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
def main():
args = cfg.parse_args()
torch.cuda.manual_seed(args.random_seed)
# set tf env
_init_inception()
inception_path = check_or_download_inception(None)
create_inception_graph(inception_path)
# import network
gen_net = eval('models.'+args.model+'.Generator')(args=args).cuda()
dis_net = eval('models.'+args.model+'.Discriminator')(args=args).cuda()
# weight init
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
if args.init_type == 'normal':
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif args.init_type == 'orth':
nn.init.orthogonal_(m.weight.data)
elif args.init_type == 'xavier_uniform':
nn.init.xavier_uniform(m.weight.data, 1.)
else:
raise NotImplementedError('{} unknown inital type'.format(args.init_type))
elif classname.find('BatchNorm2d') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0.0)
gen_net.apply(weights_init)
dis_net.apply(weights_init)
# set optimizer
gen_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, gen_net.parameters()),
args.g_lr, (args.beta1, args.beta2))
dis_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, dis_net.parameters()),
args.d_lr, (args.beta1, args.beta2))
gen_scheduler = LinearLrDecay(gen_optimizer, args.g_lr, 0.0, 0, args.max_iter * args.n_critic)
dis_scheduler = LinearLrDecay(dis_optimizer, args.d_lr, 0.0, 0, args.max_iter * args.n_critic)
# set up data_loader
dataset = datasets.ImageDataset(args)
train_loader = dataset.train
# fid stat
if args.dataset.lower() == 'cifar10':
fid_stat = 'fid_stat/fid_stats_cifar10_train.npz'
elif args.dataset.lower() == 'stl10':
fid_stat = 'fid_stat/stl10_train_unlabeled_fid_stats_48.npz'
else:
raise NotImplementedError(f'no fid stat for {args.dataset.lower()}')
assert os.path.exists(fid_stat)
# epoch number for dis_net
args.max_epoch = args.max_epoch * args.n_critic
if args.max_iter:
args.max_epoch = np.ceil(args.max_iter * args.n_critic / len(train_loader))
# initial
fixed_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (25, args.latent_dim)))
gen_avg_param = copy_params(gen_net)
start_epoch = 0
best_fid = 1e4
# set writer
if args.load_path:
print(f'=> resuming from {args.load_path}')
assert os.path.exists(args.load_path)
checkpoint_file = os.path.join(args.load_path, 'Model', 'checkpoint.pth')
assert os.path.exists(checkpoint_file)
checkpoint = torch.load(checkpoint_file)
start_epoch = checkpoint['epoch']
best_fid = checkpoint['best_fid']
gen_net.load_state_dict(checkpoint['gen_state_dict'])
dis_net.load_state_dict(checkpoint['dis_state_dict'])
gen_optimizer.load_state_dict(checkpoint['gen_optimizer'])
dis_optimizer.load_state_dict(checkpoint['dis_optimizer'])
avg_gen_net = deepcopy(gen_net)
avg_gen_net.load_state_dict(checkpoint['avg_gen_state_dict'])
gen_avg_param = copy_params(avg_gen_net)
del avg_gen_net
args.path_helper = checkpoint['path_helper']
logger = create_logger(args.path_helper['log_path'])
logger.info(f'=> loaded checkpoint {checkpoint_file} (epoch {start_epoch})')
else:
# create new log dir
assert args.exp_name
args.path_helper = set_log_dir('logs', args.exp_name)
logger = create_logger(args.path_helper['log_path'])
logger.info(args)
writer_dict = {
'writer': SummaryWriter(args.path_helper['log_path']),
'train_global_steps': start_epoch * len(train_loader),
'valid_global_steps': start_epoch // args.val_freq,
}
# train loop
lr_schedulers = (gen_scheduler, dis_scheduler) if args.lr_decay else None
for epoch in tqdm(range(int(start_epoch), int(args.max_epoch)), desc='total progress'):
train(args, gen_net, dis_net, gen_optimizer, dis_optimizer, gen_avg_param, train_loader, epoch, writer_dict,
lr_schedulers)
if epoch and epoch % args.val_freq == 0 or epoch == int(args.max_epoch)-1:
backup_param = copy_params(gen_net)
load_params(gen_net, gen_avg_param)
inception_score, fid_score = validate(args, fixed_z, fid_stat, gen_net, writer_dict)
logger.info(f'Inception score: {inception_score}, FID score: {fid_score} || @ epoch {epoch}.')
load_params(gen_net, backup_param)
if fid_score < best_fid:
best_fid = fid_score
is_best = True
else:
is_best = False
else:
is_best = False
avg_gen_net = deepcopy(gen_net)
load_params(avg_gen_net, gen_avg_param)
save_checkpoint({
'epoch': epoch + 1,
'model': args.model,
'gen_state_dict': gen_net.state_dict(),
'dis_state_dict': dis_net.state_dict(),
'avg_gen_state_dict': avg_gen_net.state_dict(),
'gen_optimizer': gen_optimizer.state_dict(),
'dis_optimizer': dis_optimizer.state_dict(),
'best_fid': best_fid,
'path_helper': args.path_helper
}, is_best, args.path_helper['ckpt_path'])
del avg_gen_net
if __name__ == '__main__':
main()
| 6,393 | 37.287425 | 116 | py |
sngan.pytorch | sngan.pytorch-master/models/sngan_64.py | import torch.nn as nn
class GenBlock(nn.Module):
def __init__(self, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1,
activation=nn.ReLU(), upsample=False, n_classes=0):
super(GenBlock, self).__init__()
self.activation = activation
self.upsample = upsample
self.learnable_sc = in_channels != out_channels or upsample
hidden_channels = out_channels if hidden_channels is None else hidden_channels
self.n_classes = n_classes
self.c1 = nn.Conv2d(in_channels, hidden_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(hidden_channels, out_channels, kernel_size=ksize, padding=pad)
self.b1 = nn.BatchNorm2d(in_channels)
self.b2 = nn.BatchNorm2d(hidden_channels)
if self.learnable_sc:
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
def upsample_conv(self, x, conv):
return conv(nn.UpsamplingNearest2d(scale_factor=2)(x))
def residual(self, x):
h = x
h = self.b1(h)
h = self.activation(h)
h = self.upsample_conv(h, self.c1) if self.upsample else self.c1(h)
h = self.b2(h)
h = self.activation(h)
h = self.c2(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.upsample_conv(x, self.c_sc) if self.upsample else self.c_sc(x)
return x
else:
return x
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class Generator(nn.Module):
def __init__(self, args, activation=nn.ReLU(), n_classes=0):
super(Generator, self).__init__()
self.bottom_width = args.bottom_width
self.activation = activation
self.n_classes = n_classes
self.ch = args.gf_dim
self.l1 = nn.Linear(args.latent_dim, (self.bottom_width ** 2) * self.ch)
self.block2 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.block3 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.block4 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.b5 = nn.BatchNorm2d(self.ch)
self.c5 = nn.Conv2d(self.ch, 3, kernel_size=3, stride=1, padding=1)
def forward(self, z):
h = z
h = self.l1(h).view(-1, self.ch, self.bottom_width, self.bottom_width)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.b5(h)
h = self.activation(h)
h = nn.Tanh()(self.c5(h))
return h
"""Discriminator"""
def _downsample(x):
# Downsample (Mean Avg Pooling with 2x2 kernel)
return nn.AvgPool2d(kernel_size=2)(x)
class OptimizedDisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, ksize=3, pad=1, activation=nn.ReLU()):
super(OptimizedDisBlock, self).__init__()
self.activation = activation
self.c1 = nn.Conv2d(in_channels, out_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(out_channels, out_channels, kernel_size=ksize, padding=pad)
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
h = _downsample(h)
return h
def shortcut(self, x):
return self.c_sc(_downsample(x))
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class DisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1,
activation=nn.ReLU(), downsample=False):
super(DisBlock, self).__init__()
self.activation = activation
self.downsample = downsample
self.learnable_sc = (in_channels != out_channels) or downsample
hidden_channels = in_channels if hidden_channels is None else hidden_channels
self.c1 = nn.Conv2d(in_channels, hidden_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(hidden_channels, out_channels, kernel_size=ksize, padding=pad)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
if self.learnable_sc:
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.activation(h)
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
if self.downsample:
h = _downsample(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.c_sc(x)
if self.downsample:
return _downsample(x)
else:
return x
else:
return x
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class Discriminator(nn.Module):
def __init__(self, args, activation=nn.ReLU()):
super(Discriminator, self).__init__()
self.ch = args.df_dim
self.activation = activation
self.block1 = OptimizedDisBlock(args, 3, self.ch)
self.block2 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=True)
self.block3 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=False)
self.block4 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=False)
self.l5 = nn.Linear(self.ch, 1, bias=False)
if args.d_spectral_norm:
self.l5 = nn.utils.spectral_norm(self.l5)
def forward(self, x):
h = x
h = self.block1(h)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.activation(h)
# Global average pooling
h = h.sum(2).sum(2)
output = self.l5(h)
return output
| 6,296 | 34.778409 | 107 | py |
sngan.pytorch | sngan.pytorch-master/models/sngan_stl10.py | import torch.nn as nn
class GenBlock(nn.Module):
def __init__(self, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1,
activation=nn.ReLU(), upsample=False, n_classes=0):
super(GenBlock, self).__init__()
self.activation = activation
self.upsample = upsample
self.learnable_sc = in_channels != out_channels or upsample
hidden_channels = out_channels if hidden_channels is None else hidden_channels
self.n_classes = n_classes
self.c1 = nn.Conv2d(in_channels, hidden_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(hidden_channels, out_channels, kernel_size=ksize, padding=pad)
self.b1 = nn.BatchNorm2d(in_channels)
self.b2 = nn.BatchNorm2d(hidden_channels)
if self.learnable_sc:
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
def upsample_conv(self, x, conv):
return conv(nn.UpsamplingNearest2d(scale_factor=2)(x))
def residual(self, x):
h = x
h = self.b1(h)
h = self.activation(h)
h = self.upsample_conv(h, self.c1) if self.upsample else self.c1(h)
h = self.b2(h)
h = self.activation(h)
h = self.c2(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.upsample_conv(x, self.c_sc) if self.upsample else self.c_sc(x)
return x
else:
return x
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class Generator(nn.Module):
def __init__(self, args, activation=nn.ReLU(), n_classes=0):
super(Generator, self).__init__()
self.bottom_width = args.bottom_width
self.activation = activation
self.n_classes = n_classes
self.ch = 512
self.l1 = nn.Linear(args.latent_dim, (self.bottom_width ** 2) * self.ch)
self.block2 = GenBlock(512, 256, activation=activation, upsample=True, n_classes=n_classes)
self.block3 = GenBlock(256, 128, activation=activation, upsample=True, n_classes=n_classes)
self.block4 = GenBlock(128, 64, activation=activation, upsample=True, n_classes=n_classes)
self.b5 = nn.BatchNorm2d(64)
self.c5 = nn.Conv2d(64, 3, kernel_size=3, stride=1, padding=1)
def forward(self, z):
h = z
h = self.l1(h).view(-1, self.ch, self.bottom_width, self.bottom_width)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.b5(h)
h = self.activation(h)
h = nn.Tanh()(self.c5(h))
return h
"""Discriminator"""
def _downsample(x):
# Downsample (Mean Avg Pooling with 2x2 kernel)
return nn.AvgPool2d(kernel_size=2)(x)
class OptimizedDisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, ksize=3, pad=1, activation=nn.ReLU()):
super(OptimizedDisBlock, self).__init__()
self.activation = activation
self.c1 = nn.Conv2d(in_channels, out_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(out_channels, out_channels, kernel_size=ksize, padding=pad)
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
h = _downsample(h)
return h
def shortcut(self, x):
return self.c_sc(_downsample(x))
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class DisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1,
activation=nn.ReLU(), downsample=False):
super(DisBlock, self).__init__()
self.activation = activation
self.downsample = downsample
self.learnable_sc = (in_channels != out_channels) or downsample
hidden_channels = in_channels if hidden_channels is None else hidden_channels
self.c1 = nn.Conv2d(in_channels, hidden_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(hidden_channels, out_channels, kernel_size=ksize, padding=pad)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
if self.learnable_sc:
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.activation(h)
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
if self.downsample:
h = _downsample(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.c_sc(x)
if self.downsample:
return _downsample(x)
else:
return x
else:
return x
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class Discriminator(nn.Module):
def __init__(self, args, activation=nn.ReLU()):
super(Discriminator, self).__init__()
self.activation = activation
self.block1 = OptimizedDisBlock(args, 3, 64)
self.block2 = DisBlock(args, 64, 128, activation=activation, downsample=True)
self.block3 = DisBlock(args, 128, 256, activation=activation, downsample=True)
self.block4 = DisBlock(args, 256, 512, activation=activation, downsample=True)
self.block5 = DisBlock(args, 512, 1024, activation=activation, downsample=False)
self.l6 = nn.Linear(1024, 1, bias=False)
if args.d_spectral_norm:
self.l6 = nn.utils.spectral_norm(self.l6)
def forward(self, x):
h = x
h = self.block1(h)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.block5(h)
h = self.activation(h)
# Global average pooling
h = h.sum(2).sum(2)
output = self.l6(h)
return output
| 6,305 | 34.426966 | 99 | py |
sngan.pytorch | sngan.pytorch-master/models/sngan_cifar10.py | import torch.nn as nn
from .gen_resblock import GenBlock
class Generator(nn.Module):
def __init__(self, args, activation=nn.ReLU(), n_classes=0):
super(Generator, self).__init__()
self.bottom_width = args.bottom_width
self.activation = activation
self.n_classes = n_classes
self.ch = args.gf_dim
self.l1 = nn.Linear(args.latent_dim, (self.bottom_width ** 2) * self.ch)
self.block2 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.block3 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.block4 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.b5 = nn.BatchNorm2d(self.ch)
self.c5 = nn.Conv2d(self.ch, 3, kernel_size=3, stride=1, padding=1)
def forward(self, z):
h = z
h = self.l1(h).view(-1, self.ch, self.bottom_width, self.bottom_width)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.b5(h)
h = self.activation(h)
h = nn.Tanh()(self.c5(h))
return h
"""Discriminator"""
def _downsample(x):
# Downsample (Mean Avg Pooling with 2x2 kernel)
return nn.AvgPool2d(kernel_size=2)(x)
class OptimizedDisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, ksize=3, pad=1, activation=nn.ReLU()):
super(OptimizedDisBlock, self).__init__()
self.activation = activation
self.c1 = nn.Conv2d(in_channels, out_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(out_channels, out_channels, kernel_size=ksize, padding=pad)
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
h = _downsample(h)
return h
def shortcut(self, x):
return self.c_sc(_downsample(x))
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class DisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1,
activation=nn.ReLU(), downsample=False):
super(DisBlock, self).__init__()
self.activation = activation
self.downsample = downsample
self.learnable_sc = (in_channels != out_channels) or downsample
hidden_channels = in_channels if hidden_channels is None else hidden_channels
self.c1 = nn.Conv2d(in_channels, hidden_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(hidden_channels, out_channels, kernel_size=ksize, padding=pad)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
if self.learnable_sc:
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.activation(h)
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
if self.downsample:
h = _downsample(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.c_sc(x)
if self.downsample:
return _downsample(x)
else:
return x
else:
return x
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class Discriminator(nn.Module):
def __init__(self, args, activation=nn.ReLU()):
super(Discriminator, self).__init__()
self.ch = args.df_dim
self.activation = activation
self.block1 = OptimizedDisBlock(args, 3, self.ch)
self.block2 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=True)
self.block3 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=False)
self.block4 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=False)
self.l5 = nn.Linear(self.ch, 1, bias=False)
if args.d_spectral_norm:
self.l5 = nn.utils.spectral_norm(self.l5)
def forward(self, x):
h = x
h = self.block1(h)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.activation(h)
# Global average pooling
h = h.sum(2).sum(2)
output = self.l5(h)
return output
| 4,805 | 34.338235 | 107 | py |
sngan.pytorch | sngan.pytorch-master/models/gen_resblock.py | # -*- coding: utf-8 -*-
# @Date : 3/26/20
# @Author : Xinyu Gong ([email protected])
# @Link : None
# @Version : 0.0
import torch.nn as nn
class GenBlock(nn.Module):
def __init__(self, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1,
activation=nn.ReLU(), upsample=False, n_classes=0):
super(GenBlock, self).__init__()
self.activation = activation
self.upsample = upsample
self.learnable_sc = in_channels != out_channels or upsample
hidden_channels = out_channels if hidden_channels is None else hidden_channels
self.n_classes = n_classes
self.c1 = nn.Conv2d(in_channels, hidden_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(hidden_channels, out_channels, kernel_size=ksize, padding=pad)
self.b1 = nn.BatchNorm2d(in_channels)
self.b2 = nn.BatchNorm2d(hidden_channels)
if self.learnable_sc:
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
def upsample_conv(self, x, conv):
return conv(nn.UpsamplingNearest2d(scale_factor=2)(x))
def residual(self, x):
h = x
h = self.b1(h)
h = self.activation(h)
h = self.upsample_conv(h, self.c1) if self.upsample else self.c1(h)
h = self.b2(h)
h = self.activation(h)
h = self.c2(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.upsample_conv(x, self.c_sc) if self.upsample else self.c_sc(x)
return x
else:
return x
def forward(self, x):
return self.residual(x) + self.shortcut(x) | 1,671 | 33.833333 | 90 | py |
sngan.pytorch | sngan.pytorch-master/utils/utils.py | # -*- coding: utf-8 -*-
# @Date : 2019-07-25
# @Author : Xinyu Gong ([email protected])
# @Link : None
# @Version : 0.0
import os
import torch
import dateutil.tz
from datetime import datetime
import time
import logging
def create_logger(log_dir, phase='train'):
time_str = time.strftime('%Y-%m-%d-%H-%M')
log_file = '{}_{}.log'.format(time_str, phase)
final_log_file = os.path.join(log_dir, log_file)
head = '%(asctime)-15s %(message)s'
logging.basicConfig(filename=str(final_log_file),
format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
logging.getLogger('').addHandler(console)
return logger
def set_log_dir(root_dir, exp_name):
path_dict = {}
os.makedirs(root_dir, exist_ok=True)
# set log path
exp_path = os.path.join(root_dir, exp_name)
now = datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
prefix = exp_path + '_' + timestamp
os.makedirs(prefix)
path_dict['prefix'] = prefix
# set checkpoint path
ckpt_path = os.path.join(prefix, 'Model')
os.makedirs(ckpt_path)
path_dict['ckpt_path'] = ckpt_path
log_path = os.path.join(prefix, 'Log')
os.makedirs(log_path)
path_dict['log_path'] = log_path
# set sample image path for fid calculation
sample_path = os.path.join(prefix, 'Samples')
os.makedirs(sample_path)
path_dict['sample_path'] = sample_path
return path_dict
def save_checkpoint(states, is_best, output_dir,
filename='checkpoint.pth'):
torch.save(states, os.path.join(output_dir, filename))
if is_best:
torch.save(states, os.path.join(output_dir, 'checkpoint_best.pth'))
| 1,772 | 26.703125 | 75 | py |
neu-nbv | neu-nbv-main/scripts/planning/dtu_experiment.py | import sys
import os
root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.insert(0, root_dir)
from neural_rendering.evaluation.pretrained_model import PretrainedModel
from neural_rendering.data import get_data
from neural_rendering.utils import parser, util
import yaml
from dotmap import DotMap
import torch
import warnings
import numpy as np
import pandas
import seaborn as sb
import copy
from scipy.spatial import distance
from datetime import datetime
import random
import pickle
from dotmap import DotMap
warnings.filterwarnings("ignore")
# follow pixelnerf setup
candidate_index_list = [
6,
7,
8,
9,
10,
13,
14,
15,
16,
17,
21,
22,
23,
24,
25,
31,
32,
33,
34,
35,
41,
42,
43,
44,
45,
]
def setup_random_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def get_nbv_ref_index(
model, images, poses, focal, c, z_near, z_far, candidate_list, budget, ref_index
):
_, _, H, W = images.shape
for i in range(budget):
remain_candidate_list = list(set(candidate_list) - set(ref_index))
reward_list = []
model.network.encode(
images[ref_index].unsqueeze(0),
poses[ref_index].unsqueeze(0),
focal.unsqueeze(0),
c.unsqueeze(0),
)
for target_view in remain_candidate_list:
novel_pose = poses[target_view]
target_rays = util.gen_rays(
novel_pose.unsqueeze(0), W, H, focal, z_near, z_far, c
)
target_rays = target_rays.reshape(1, H * W, -1)
predict = DotMap(model.renderer_par(target_rays))
uncertainty = predict["uncertainty"][0]
reward = torch.sum(uncertainty**2).cpu().numpy()
reward_list.append(reward)
nbv_index = np.argmax(reward_list)
new_ref_index = remain_candidate_list[nbv_index]
ref_index.append(new_ref_index)
return ref_index
def get_camera_view_direction(poses):
poses = poses.cpu().numpy()
view_direction = -poses[..., :3, 2]
view_direction = view_direction / np.linalg.norm(view_direction)
return view_direction
def get_max_dist_ref_index(poses, ref_index, candidate_list, budget):
view_direction = get_camera_view_direction(poses)
for i in range(budget):
remain_candidate_list = list(set(candidate_list) - set(ref_index))
cos_distance_list = []
for idx in remain_candidate_list:
cos_dist = 0.0
for image_idx in ref_index:
cos_dist += distance.cosine(
view_direction[idx], view_direction[image_idx]
)
cos_distance_list.append(cos_dist)
new_ref_index = remain_candidate_list[np.argmax(cos_distance_list)]
ref_index.append(new_ref_index)
return ref_index
def main():
# planning experiment on DTU using baseline planners and our planner
setup_random_seed(10)
args = parser.parse_args(planning_args)
dtu_nbv_planner = DTUNBVPlanning(args)
experiment_path = args.experiment_path
if args.evaluation_only:
with open(f"{experiment_path}/saved_index_dict.pkl", "rb") as f:
index_record = pickle.load(f)
else:
experiment_path = os.path.join(
root_dir,
"experiments",
"dtu",
datetime.now().strftime("%d-%m-%Y-%H-%M"),
)
os.makedirs(experiment_path)
index_record = dtu_nbv_planner.planning()
with open(f"{experiment_path}/saved_index_dict.pkl", "wb") as f:
pickle.dump(index_record, f)
total_df = dtu_nbv_planner.evaluation(index_record)
total_df.to_csv(f"{experiment_path}/dataframe.csv")
class DTUNBVPlanning:
"""
planning on DTU using different view selection methods: max_view_distance, random, and our uncertainty guided
"""
def __init__(self, args):
log_path = os.path.join(root_dir, "neural_rendering", "logs", args.model_name)
assert os.path.exists(log_path), "experiment does not exist"
with open(f"{log_path}/training_setup.yaml", "r") as config_file:
cfg = yaml.safe_load(config_file)
checkpoint_path = os.path.join(log_path, "checkpoints", "best.ckpt")
assert os.path.exists(checkpoint_path), "checkpoint does not exist"
ckpt_file = torch.load(checkpoint_path)
gpu_id = list(map(int, args.gpu_id.split()))
self.device = util.get_cuda(gpu_id[0])
self.repeat = args.repeat
self.model = PretrainedModel(cfg["model"], ckpt_file, self.device, gpu_id)
cfg["data"]["dataset"]["data_rootdir"] = os.path.join(
root_dir, "neural_rendering/data/dataset/dtu_dataset/rs_dtu_4/DTU"
)
datamodule = get_data(cfg["data"])
self.dataset = datamodule.load_dataset("val")
self.z_near = self.dataset.z_near
self.z_far = self.dataset.z_far
def planning(self):
print(f"---------- planning ---------- \n")
ON = len(self.dataset)
selection_type = ["Max. View Distance", "Random", "Ours"]
nview_list = [2, 3, 4, 5, 6, 7, 8, 9] # maximal budget = 9
scene_index = range(ON)
ref_index_record = {}
with torch.no_grad():
for nviews in nview_list:
ref_index_record[nviews] = {}
print(f"---------- {nviews} views experiment---------- \n")
for i in scene_index:
data_instance = self.dataset.__getitem__(i)
scene_title = data_instance["scan_name"]
ref_index_record[nviews][i] = {}
print(f"test on {scene_title}")
images = data_instance["images"].to(self.device)
focal = data_instance["focal"].to(self.device)
c = data_instance["c"].to(self.device)
poses = data_instance["poses"].to(self.device)
# random initialize first 2 ref images for all methods
for r in range(self.repeat):
ref_index_record[nviews][i][r] = {}
initial_ref_index = list(
np.random.choice(candidate_index_list, 2, replace=False)
)
candidate_list = list(
set(candidate_index_list) - set(initial_ref_index)
)
budget = nviews - 2
for stype in selection_type:
print(f"---------- repeat: {r}, {stype} ---------- \n")
if stype == "Max. View Distance":
ref_index = get_max_dist_ref_index(
poses,
copy.deepcopy(initial_ref_index),
candidate_list,
budget,
)
print(ref_index)
elif stype == "Random":
random_ref_index = list(
np.random.choice(
candidate_index_list, budget, replace=True
)
)
ref_index = initial_ref_index + random_ref_index
print(ref_index)
ref_index = np.unique(ref_index)
elif stype == "Ours":
ref_index = get_nbv_ref_index(
self.model,
images,
poses,
focal,
c,
self.z_near,
self.z_far,
candidate_list,
budget,
copy.deepcopy(initial_ref_index),
)
print(ref_index)
ref_index_record[nviews][i][r][stype] = ref_index
return ref_index_record
def evaluation(self, index_record):
print(f"---------- evaluation ---------- \n")
total_df = pandas.DataFrame(
{
"Planning Type": [],
"Reference Image Number": [],
"PSNR": [],
"SSIM": [],
"Scene": [],
}
)
with torch.no_grad():
for nviews, nviews_dict in index_record.items():
print(f"---------- {nviews} views experiment---------- \n")
for scene_id, scene_dict in nviews_dict.items():
data_instance = self.dataset.__getitem__(scene_id)
scene_title = data_instance["scan_name"]
print(f"test on {scene_title}")
images = data_instance["images"].to(self.device)
images_0to1 = images * 0.5 + 0.5
_, _, H, W = images.shape
focal = data_instance["focal"].to(self.device)
c = data_instance["c"].to(self.device)
poses = data_instance["poses"].to(self.device)
psnr_per_scene = []
ssim_per_scene = []
# random initialize first 2 ref images for all methods
for repeat, repeat_dict in scene_dict.items():
for stype, ref_index in repeat_dict.items():
print(f"---------- repeat: {repeat}, {stype} ---------- \n")
print(ref_index)
self.model.network.encode(
images[ref_index].unsqueeze(0),
poses[ref_index].unsqueeze(0),
focal.unsqueeze(0),
c.unsqueeze(0),
)
test_index = list(
set(candidate_index_list) - set(ref_index)
)
psnr_per_test = []
ssim_per_test = []
for target_view in test_index:
gt = (
images_0to1[target_view]
.permute(1, 2, 0)
.cpu()
.numpy()
)
novel_pose = poses[target_view]
target_rays = util.gen_rays(
novel_pose.unsqueeze(0),
W,
H,
focal,
self.z_near,
self.z_far,
c,
)
target_rays = target_rays.reshape(1, H * W, -1)
predict = DotMap(self.model.renderer_par(target_rays))
metrics_dict = util.calc_metrics(
predict, torch.tensor(gt)
)
psnr_per_test.append(metrics_dict["psnr"])
ssim_per_test.append(metrics_dict["ssim"])
psnr_per_scene = np.mean(psnr_per_test)
ssim_per_scene = np.mean(ssim_per_test)
print(psnr_per_scene, ssim_per_scene)
dataframe = pandas.DataFrame(
{
"Planning Type": stype,
"Reference Image Number": nviews,
"PSNR": psnr_per_scene,
"SSIM": ssim_per_scene,
"Scene": scene_id,
},
index=[repeat],
)
total_df = total_df.append(dataframe)
return total_df
def planning_args(parser):
"""
Parse arguments for evaluation setup.
"""
parser.add_argument(
"--model_name",
"-M",
type=str,
required=True,
help="model name of pretrained model",
)
parser.add_argument(
"--repeat",
"-R",
type=int,
default=5,
help="repeat times for planning experiment",
)
# arguments with default values
parser.add_argument(
"--evaluation_only", action="store_true", help="evaluation mode"
)
parser.add_argument(
"--experiment_path",
type=str,
default="not defined",
help="must be defined in evaluation mode",
)
parser.add_argument(
"--gpu_id", type=str, default="0", help="GPU(s) to use, space delimited"
)
return parser
if __name__ == "__main__":
main()
| 13,632 | 34.046272 | 113 | py |
neu-nbv | neu-nbv-main/scripts/planning/simulator_experiment.py | import rospy
import os
import sys
root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.insert(0, root_dir)
import yaml
import argparse
from planner import get_planner
from planner.utils import uniform_sampling
import numpy as np
import scipy.spatial as spatial
from datetime import datetime
import imageio
import glob
from dotmap import DotMap
import torch
from neural_rendering.utils import util
from neural_rendering.evaluation.pretrained_model import PretrainedModel
import pandas
import torch.nn.functional as F
planner_title = {
"max_distance": "Max. View Distance",
"random": "Random",
"neural_nbv": "Ours",
}
def setup_random_seed(seed):
np.random.seed(seed)
def main():
# planning experiment in simulator using baseline planners and our planner
setup_random_seed(10)
rospy.init_node("simulator_experiment")
args = parse_args()
planner_type_list = ["max_distance", "random", "neural_nbv"]
repeat = args.repeat
experiment_path = args.experiment_path
if not args.evaluation_only:
experiment_path = os.path.join(
root_dir,
"experiments",
"simulator",
datetime.now().strftime("%d-%m-%Y-%H-%M"),
)
os.makedirs(experiment_path, exist_ok=True)
print("---------- planning ----------")
for i in range(repeat):
# initialize planning with 2 same views
random_initial_view = []
for _ in range(2):
random_initial_view.append(
uniform_sampling(radius=2, phi_min=0.15)
) # hard-coded, should be the same for config file
for planner_type in planner_type_list:
# find planner configuration file
print(
f"---------- {planner_type} planner, experiment ID {i} ----------\n"
)
planner_cfg_path = os.path.join(
"planning/config", f"{planner_type}_planner.yaml"
)
assert os.path.exists(planner_cfg_path)
with open(planner_cfg_path, "r") as config_file:
planner_cfg = yaml.safe_load(config_file)
planner_cfg.update(args.__dict__)
planner_cfg["planner_type"] = planner_type
planner_cfg["experiment_path"] = experiment_path
planner_cfg["experiment_id"] = i
nbv_planner = get_planner(planner_cfg)
nbv_planner.start(initial_view=random_initial_view)
print("---------- evaluation ----------")
gpu_id = list(map(int, args.gpu_id.split()))
device = util.get_cuda(gpu_id[0])
log_path = os.path.join(root_dir, "neural_rendering", "logs", args.model_name)
assert os.path.exists(log_path), "experiment does not exist"
with open(f"{log_path}/training_setup.yaml", "r") as config_file:
cfg = yaml.safe_load(config_file)
checkpoint_path = os.path.join(log_path, "checkpoints", "best.ckpt")
assert os.path.exists(checkpoint_path), "checkpoint does not exist"
ckpt_file = torch.load(checkpoint_path)
model = PretrainedModel(cfg["model"], ckpt_file, device, gpu_id)
# load test view data as ground truth
test_rgbs, test_poses, focal, c = get_image_data(
args.test_data_path, "normal", device
)
# configure rendering information
nview = int(args.nviews)
_, _, H, W = test_rgbs.shape
z_near = cfg["data"]["dataset"]["z_near"]
z_far = cfg["data"]["dataset"]["z_far"]
step_list = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20]
total_df = pandas.DataFrame(
{
"Planning Type": [],
"Reference Image Num.": [],
"PSNR": [],
"SSIM": [],
}
)
for r in range(repeat):
for planner_type in planner_type_list:
ref_data_path = os.path.join(experiment_path, planner_type, str(r))
ref_rgbs, ref_poses, _, _ = get_image_data(ref_data_path, "normal", device)
for step in step_list:
print(
f"---------- planner:{planner_type}, repeat {r}, step {step} ----------\n"
)
ref_kd_tree = spatial.KDTree(ref_poses[:step, :3, 3].cpu().numpy())
psnr_list = []
ssim_list = []
with torch.no_grad():
for i, rgb in enumerate(test_rgbs):
pose = test_poses[i]
gt = rgb * 0.5 + 0.5
gt = gt.permute(1, 2, 0).cpu().numpy()
_, ref_index = ref_kd_tree.query(
pose[:3, 3].cpu().numpy(), np.minimum(nview, step)
)
model.network.encode(
ref_rgbs[ref_index].unsqueeze(0),
ref_poses[ref_index].unsqueeze(0),
focal.unsqueeze(0),
c.unsqueeze(0),
)
target_rays = util.gen_rays(
pose.unsqueeze(0), W, H, focal, z_near, z_far, c
)
target_rays = target_rays.reshape(1, H * W, -1)
predict = DotMap(model.renderer_par(target_rays))
metrics_dict = util.calc_metrics(predict, torch.tensor(gt))
psnr_list.append(metrics_dict["psnr"])
ssim_list.append(metrics_dict["ssim"])
psnr_mean = np.mean(psnr_list)
ssim_mean = np.mean(ssim_list)
print("psnr:", psnr_mean, "ssim:", ssim_mean)
dataframe = pandas.DataFrame(
{
"Planning Type": planner_title[planner_type],
"Reference Image Num.": step,
"PSNR": psnr_mean,
"SSIM": ssim_mean,
},
index=[r],
)
total_df = total_df.append(dataframe)
total_df.to_csv(f"{experiment_path}/dataframe.csv")
image_to_tensor = util.get_image_to_tensor_balanced()
def get_image_data(data_path, coordinate_format, device, rescale=0.5):
assert os.path.exists(data_path)
rgb_paths = [
x
for x in glob.glob(f"{data_path}/images/*")
if (x.endswith(".jpg") or x.endswith(".png"))
]
rgb_paths = sorted(rgb_paths)
images = []
poses = []
for image_path in rgb_paths:
image = imageio.imread(image_path)[..., :3]
image = image_to_tensor(image)
images.append(image)
pose_list = np.load(f"{data_path}/trajectory.npy")
for pose in pose_list:
pose = util.coordinate_transformation(pose, format=coordinate_format)
poses.append(pose)
with open(f"{data_path}/camera_info.yaml") as file:
intrinsic = yaml.safe_load(file)
images = torch.stack(images).to(device)
poses = torch.stack(poses).to(device)
if rescale != 1:
_, _, H, W = images.shape
H = int(rescale * H)
W = int(rescale * W)
images = F.interpolate(images, size=[W, H], mode="area")
focal = rescale * torch.tensor(intrinsic["focal"], dtype=torch.float32).to(device)
c = rescale * torch.tensor(intrinsic["c"], dtype=torch.float32).to(device)
assert len(images) == len(poses)
return images, poses, focal, c
def test_visualize(results_dict):
import matplotlib.pyplot as plt
H = 400
W = 400
rgb = results_dict.rgb[0].cpu().numpy().reshape(H, W, 3)
depth = results_dict.depth[0].cpu().numpy().reshape(H, W)
uncertainty = results_dict.uncertainty[0].cpu().numpy().reshape(H, W)
fig, axs = plt.subplots(1, 3)
axs[0].imshow(rgb)
axs[1].imshow(uncertainty)
axs[2].imshow(depth)
plt.show()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
"-M",
type=str,
required=True,
help="model name of pretrained model",
)
parser.add_argument(
"--test_data_path",
"-TD",
type=str,
required=True,
help="data path",
)
# mandatory arguments
parser.add_argument(
"--repeat",
"-rp",
type=int,
default=10,
help="repeat experiment",
)
# arguments with default values
parser.add_argument(
"--nviews", "-nv", type=int, default=5, help="number of reference views"
)
parser.add_argument(
"--planning_budget",
"-BG",
type=int,
default=20,
help="maximal measurments for the mission",
)
parser.add_argument(
"--device",
type=str,
default="cuda",
help="config file path",
)
parser.add_argument(
"--gpu_id",
type=str,
default="0",
help="gpu to use, space delimited",
)
parser.add_argument(
"--evaluation_only", action="store_true", help="evaluation mode"
)
parser.add_argument(
"--experiment_path",
type=str,
default="not defined",
help="must be defined in evaluation mode",
)
args = parser.parse_args()
return args
if __name__ == "__main__":
main()
| 9,450 | 29.685065 | 94 | py |
neu-nbv | neu-nbv-main/scripts/planning/planner/neural_nbv/neural_nbv_planner.py | import numpy as np
from scipy.spatial.transform import Rotation as R
from planner.planner import Planner
from planner.utils import view_to_pose_batch, random_view, uniform_sampling
from neural_rendering.evaluation.pretrained_model import PretrainedModel
import torch
from dotmap import DotMap
from neural_rendering.utils import util
import torch.nn.functional as F
import scipy.spatial as spatial
import matplotlib.pyplot as plt
import time
import yaml
import os
class NeuralNBVPlanner(Planner):
def __init__(self, cfg):
super().__init__(cfg)
self.device = cfg["device"]
self.gpu_id = list(map(int, cfg["gpu_id"].split()))
self.init_sensor_model(cfg)
self.image_to_tensor = util.get_image_to_tensor_balanced()
self.num_candidates = cfg["num_candidates"]
self.sample_type = cfg["sample_type"]
self.view_change = cfg["view_change"]
self.local_view_change = cfg["local_view_change"]
self.selection_range = cfg["selection_range"]
self.hierachical_sampling = cfg["use_hierachical_sampling"]
self.sample_ratio = cfg["sample_ratio"]
self.K = cfg["top_k"]
self.max_ref_num = cfg["maximal_ref"]
self.reward_type = cfg["reward_type"]
self.render_batch_size = cfg["render_batch_size"]
self.uncertainty_th = cfg["uncertainty_threshold"]
self.candidate_views = None
self.candidate_poses = None
self.render_pairs = None
self.trajectory_kdtree = None
# self.depth_for_renderer = torch.empty(
# (self.planning_budget, self.H, self.W)
# ).to(self.device)
def init_sensor_model(self, cfg):
assert os.path.exists(cfg["config_path"])
assert os.path.exists(cfg["checkpoint_path"])
with open(cfg["config_path"], "r") as config_file:
model_cfg = yaml.safe_load(config_file)["model"]
ckpt_file = torch.load(cfg["checkpoint_path"])
self.model = PretrainedModel(model_cfg, ckpt_file, self.device, self.gpu_id)
# original image format
H, W = self.camera_info["image_resolution"] # (H, W)
focal = self.camera_info["focal"] # (f_x, f_y)
c = self.camera_info["c"] # (c_x, c_y)
# desired image format for redendering input
render_info = cfg["render_info"]
H_ref, W_ref = render_info["ref_image_resolution"]
ref_focal = [0, 0]
ref_c = [0, 0]
if np.any([H, W] != [H_ref, W_ref]):
scale_h = H_ref / H
scale_w = W_ref / W
ref_focal[0] = scale_w * focal[0]
ref_focal[1] = scale_h * focal[1]
ref_c[0] = scale_w * c[0]
ref_c[1] = scale_h * c[1]
self.ref_focal = torch.tensor(ref_focal, dtype=torch.float32).to(self.device)
self.ref_c = torch.tensor(ref_c, dtype=torch.float32).to(self.device)
self.ref_image_resolution = (H_ref, W_ref)
self.trajectory_for_renderer = torch.empty((self.planning_budget, 4, 4)).to(
self.device
)
self.rgb_for_renderer = torch.empty((self.planning_budget, 3, H_ref, W_ref)).to(
self.device
)
# desired image format for redendering output
render_scale = render_info["render_scale"]
self.H_render = int(render_scale * H_ref)
self.W_render = int(render_scale * W_ref)
render_scale = torch.tensor(
[
self.W_render / W_ref,
self.H_render / H_ref,
]
).to(self.device)
self.render_focal = render_scale * self.ref_focal
self.render_c = render_scale * self.ref_c
self.z_near, self.z_far = render_info["scene_range"]
def render_novel_views(self, candidate_poses):
candidate_num = len(candidate_poses)
reward_list = np.zeros(candidate_num)
distance_all, ref_index_all = self.trajectory_kdtree.query(
candidate_poses[:, :3, 3], np.minimum(self.max_ref_num, self.step)
)
# distance_all = torch.tensor(distance_all)
# ref_index_all = torch.tensor(ref_index_all)
bool_mask = ~np.isinf(distance_all)
novel_poses = util.coordinate_transformation(
candidate_poses, format="normal"
).to(self.device)
# render novel view in batch
split_novel_view = torch.split(
torch.arange(candidate_num), self.render_batch_size, dim=0
)
for i in split_novel_view:
ref_index = torch.tensor(ref_index_all[i] * bool_mask[i])
ref_images = self.rgb_for_renderer[ref_index]
ref_poses = self.trajectory_for_renderer[ref_index]
render_results = self.rendering(ref_images, ref_poses, novel_poses[i])
reward_list[i] = self.cal_reward(render_results)
return reward_list
def rendering(self, ref_images, ref_poses, novel_poses):
NP = len(novel_poses)
with torch.no_grad():
self.model.network.encode(
ref_images,
ref_poses,
self.ref_focal.unsqueeze(0),
self.ref_c.unsqueeze(0),
)
target_rays = util.gen_rays(
novel_poses,
self.W_render,
self.H_render,
self.render_focal,
self.z_near,
self.z_far,
self.render_c,
) # (IN, H, W, 8)
target_rays = target_rays.reshape(NP, self.H_render * self.W_render, -1)
predict = DotMap(self.model.renderer_par(target_rays))
return predict
def cal_reward(self, render_results):
uncertainty = render_results["uncertainty"]
reward = torch.mean(uncertainty**2, dim=-1).cpu().numpy()
reward = np.log10(reward)
return reward
# one stage planning
def start_planning(self):
candidate_views, candidate_poses = self.local_sampling(
self.num_candidates, self.current_pose[:3, 3], view_change=self.view_change
)
reward_list = self.render_novel_views(candidate_poses)
nbv_index = np.argmax(reward_list)
return candidate_views[nbv_index]
def global_sampling(self, num):
view_list = np.empty((num, 2))
for i in range(num):
view_list[i] = uniform_sampling(self.radius, self.phi_min)
pose_list = view_to_pose_batch(view_list, self.radius)
return view_list, pose_list
def local_sampling(self, num, xyz, view_change, min_view_change=0.2):
view_list = np.empty((num, 2))
for i in range(num):
view_list[i] = random_view(
xyz, self.radius, self.phi_min, min_view_change, view_change
)
pose_list = view_to_pose_batch(view_list, self.radius)
return view_list, pose_list
def plan_next_view(self):
import time
if self.step > 1:
t1 = time.time()
nbv = self.start_planning()
t2 = time.time()
print((t2 - t1))
return nbv
# need at least two views to start the planning
else:
random_next_view = random_view(
self.current_pose[:3, 3],
self.radius,
self.phi_min,
self.view_change - 0.1,
self.view_change,
)
return random_next_view
def record_trajectory(self, view, pose):
self.view_trajectory[self.step] = view
self.trajectory[self.step] = pose
# maintain current measurment positions in kd tree
self.trajectory_kdtree = spatial.KDTree(self.trajectory[: self.step + 1, :3, 3])
self.trajectory_for_renderer[self.step] = util.coordinate_transformation(
pose, format="normal"
).to(self.device)
def record_rgb_measurement(self, rgb):
rgb = np.clip(rgb, a_min=0, a_max=255)
rgb = rgb / 255
self.rgb_measurements[self.step] = rgb
ref_image = self.image_to_tensor(rgb).to(self.device)
ref_image = F.interpolate(
ref_image.unsqueeze(0), size=self.ref_image_resolution, mode="area"
).squeeze(0)
self.rgb_for_renderer[self.step] = ref_image
def test_visualize(self, ref_images, results_dict):
import matplotlib.pyplot as plt
H = 60
W = 60
for i in range(self.render_batch_size):
rgb = results_dict.rgb[i].cpu().numpy().reshape(H, W, 3)
depth = results_dict.depth[i].cpu().numpy().reshape(H, W)
uncertainty = results_dict.uncertainty[i].cpu().numpy().reshape(H, W)
fig, axs = plt.subplots(1, 3)
axs[0].imshow(rgb)
axs[1].imshow(uncertainty)
axs[2].imshow(depth)
plt.show()
| 8,864 | 33.901575 | 88 | py |
fitclip | fitclip-main/util/structured_group_utils.py | """Useful utils when using `DataModuleStructuredGroup`."""
from typing import Any, Mapping, Sequence, Tuple
import torch
from aligner.video_text_module import TYPE_INPUT
from util.tensor_utils import pad
TYPE_MULTI_INPUT = Mapping[str, TYPE_INPUT]
# It's like `default_collate` but instead of a sequence we have a mapping, and we do `cat` instead of `stack`.
# It makes sense to be similar because we're merging multiple batches together.
# Note that using collate from the dataloader side. It's simpler, and more GPU-memory efficient.
def _cat_collate(batch: Sequence[Any]) -> Any:
elem = batch[0]
elem_type = type(batch)
if isinstance(elem, torch.Tensor):
return torch.cat(batch) # noqa
elif isinstance(elem, Mapping):
return {k: _cat_collate([d[k] for d in batch]) for k in elem}
elif isinstance(elem, (float, int, bytes, str)):
return batch
elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple
return elem_type(*(_cat_collate(samples) for samples in zip(*batch))) # noqa
elif isinstance(elem, Sequence):
return [x for d in batch for x in d]
else:
raise TypeError(f"Not sure how to collate type {elem_type}")
def _merge_datasets_batch(batches_by_dataset: TYPE_MULTI_INPUT) -> Tuple[TYPE_INPUT, Sequence[int]]:
lengths = [len(batch["video"]) for batch in batches_by_dataset.values()]
max_text_len = max(batch["text"]["input_ids"].shape[-1] for batch in batches_by_dataset.values())
for batch in batches_by_dataset.values():
batch["text"] = {k: pad(v, min_size=max_text_len, dim=-1) for k, v in batch["text"].items()}
batch = _cat_collate(list(batches_by_dataset.values()))
return batch, lengths
| 1,737 | 40.380952 | 110 | py |
fitclip | fitclip-main/util/viz_utils.py | import numpy as np
import torch
import torchvision
from matplotlib import pyplot as plt
from matplotlib.pyplot import subplots_adjust
from torchvision.transforms.functional import to_pil_image
from aligner.encoder.video_text_encoder import VideoTextEncoder
def visualize_images_tensor(images: torch.Tensor) -> plt.Axes:
"""`images` has shape (N, C, H, W)."""
grid = torchvision.utils.make_grid(images)
fig, ax = plt.subplots()
fig.tight_layout()
subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)
ax.autoscale_view("tight")
ax.imshow(np.asarray(to_pil_image(grid)))
ax.set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
return ax
def debug_batch(video: torch.Tensor, text: torch.Tensor, encoder: VideoTextEncoder) -> None:
video, text = video.detach().cpu(), text.detach().cpu()
video = encoder.to_bchw(video)
denormalized_images = encoder.denormalize_video_tensor(video).reshape(-1, *video.shape[2:])
visualize_images_tensor(denormalized_images)
plt.show()
for decoded in encoder.decode_text(text):
print(decoded)
| 1,139 | 29 | 95 | py |
fitclip | fitclip-main/util/tensor_utils.py | from typing import Any, Mapping, Optional, Sequence, TypeVar, Union
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from pytorch_lightning.utilities.apply_func import apply_to_collection
T = TypeVar("T")
def pad(t: torch.Tensor, min_size: int, dim: int = 1, value: Any = 0) -> torch.Tensor:
"""Pads the dim `dim` in `t` with the value `value` so the size is at least `min_size`."""
if dim < 0:
dim += len(t.shape)
if (count := t.shape[dim]) < min_size:
# `pad` keyword arg goes from the last dim to the first one in pairs, where the first value of the pair is
# for left padding and the other one for right padding.
return F.pad(t, pad=(0, 0) * (len(t.shape) - 1 - dim) + (0, min_size - count), value=value)
else:
return t
def split_in_collection(data: T, split_size_or_sections: Union[int, Sequence[int]]) -> Sequence[T]:
"""Applies `split` to the inside tensors of the collections and also generates one collection for each of the
returned elements from `split`."""
type_ = type(data)
if isinstance(data, torch.Tensor):
return data.split(split_size_or_sections)
elif isinstance(data, Mapping):
zipped = zip(*(split_in_collection(v, split_size_or_sections) for v in data.values()))
return [type_((k, v) for k, v in zip(data.keys(), z)) for z in zipped]
elif isinstance(data, Sequence):
return [type_(z) for z in zip(*(split_in_collection(e, split_size_or_sections) for e in data))]
else:
raise ValueError(f"Unsupported type for split: {type_}")
def _first_tensor_in_collection(data: Any) -> torch.Tensor:
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, Mapping):
return _first_tensor_in_collection(data.values())
else:
return _first_tensor_in_collection(next(iter(data)))
def all_gather(lightning_module: pl.LightningModule, data: Any, group: Optional[Any] = None,
sync_grads: bool = False, return_world_size_dim: bool = False) -> Any:
"""Gathers a tensor, or multiple tensors inside a collection, so that the output number of dimensions is the same
regardless of the accelerator.
Note this is different from `pl.LightningModule.all_gather`, that for a single GPU it doesn't return a new
dimension but for the parallel settings it does.
"""
first_tensor_old_shape = _first_tensor_in_collection(data).shape
output = lightning_module.all_gather(data, group=group, sync_grads=sync_grads)
if len(first_tensor_new_shape := _first_tensor_in_collection(output).shape) == len(first_tensor_old_shape) + 1:
return output if return_world_size_dim else apply_to_collection(output, torch.Tensor,
lambda t: t.view(-1, *t.shape[2:]))
elif len(first_tensor_new_shape) == len(first_tensor_old_shape):
return apply_to_collection(output, torch.Tensor, torch.Tensor.unsqueeze, 0) if return_world_size_dim else output
else:
raise ValueError(f"Unexpected new shape for the first tensor in the collection: {first_tensor_new_shape} (old "
f"was {first_tensor_old_shape}). "
f"The new shape was expected to have the same number of dimensions or one more.")
| 3,355 | 49.089552 | 120 | py |
fitclip | fitclip-main/util/checkpoint_utils.py | from typing import MutableMapping
import torch
from cached_path import cached_path
from util.typing_utils import TYPE_PATH
def state_dict_from_checkpoint_path(checkpoint_path: TYPE_PATH, prefix: str = "") -> MutableMapping[str, torch.Tensor]:
prefix += ("" if prefix.endswith(".") or not prefix else ".")
checkpoint = torch.load(cached_path(checkpoint_path))
return {k[len(prefix):]: v for k, v in checkpoint["state_dict"].items() if k.startswith(prefix)}
| 472 | 35.384615 | 119 | py |
fitclip | fitclip-main/util/video_utils.py | import os
from typing import Any, Callable, Iterable, Iterator, Optional, Sequence
from torchvision.datasets.video_utils import VideoClips
from util.typing_utils import TYPE_PATH
# From https://en.wikipedia.org/wiki/Video_file_format
VIDEO_FILE_EXTENSIONS = (".3g2", ".3gp", ".amv", ".asf", ".avi", ".drc", ".f4a", ".f4b", ".f4p", ".f4v", ".flv",
".gif", ".gifv", ".m2ts", ".m2v", ".m4p", ".m4v", ".mkv", ".mng", ".mov", ".mp2", ".mp4",
".mpe", ".mpeg", ".mpg", ".mpv", ".mts", ".mxf", ".nsv", ".ogg", ".ogv", ".qt", ".rm",
".rmvb", ".roq", ".svi", ".ts", ".viv", ".vob", ".webm", ".wmv", ".yuv")
def get_videos_in_folder(path: TYPE_PATH,
extensions: Optional[Iterable[str]] = VIDEO_FILE_EXTENSIONS) -> Iterator[str]:
extensions = None if extensions is None else tuple(extensions)
for folder, _, filenames in os.walk(path, followlinks=True):
for filename in filenames:
if os.path.isfile(full_path := os.path.join(folder, filename)) \
and (not extensions or filename.lower().endswith(extensions)):
yield full_path
def get_sorted_videos_in_folder(path: TYPE_PATH,
extensions: Optional[Iterable[str]] = VIDEO_FILE_EXTENSIONS,
key: Optional[Callable[[str], Any]] = None, reverse: bool = False) -> Iterator[str]:
"""Returns a sorted version of `get_videos_in_folder`.
Even though this can be simply applied by the caller, the fact that the main use case of `get_videos_in_folder`
is from a video dataset and that its order should be deterministic (but that `get_videos_in_folder` doesn't
guarantee it) makes this function handy and a wake-up call for this issue.
The videos in a PyTorch `Dataset` need to be deterministic e.g. for a distributed setting, when e.g. using
`DistributedSampler` for it to guarantee each data sample is used once and only once between all processes.
"""
return sorted(get_videos_in_folder(path, extensions), key=key, reverse=reverse)
def resample(num_frames: int, original_fps: float, new_fps: float) -> Sequence[int]:
"""Returns essentially the same as `VideoClips._resample_video_idx`. Unlike it, it always checks for the max frames
(the mentioned function doesn't do it when it returns a `slice`)."""
indices = VideoClips._resample_video_idx(num_frames, original_fps, new_fps)
if isinstance(indices, slice) and indices.stop is None:
indices = range(*indices.indices((indices.start or 0) + num_frames * indices.step))
return indices
| 2,659 | 53.285714 | 119 | py |
fitclip | fitclip-main/scripts/apply_wise_ft.py | #!/usr/bin/env python
import argparse
import torch
from aligner.encoder.clip_video_text_encoder import load_clip_model
from aligner.wise import wise_state_dict
from util.argparse_with_defaults import ArgumentParserWithDefaults
def parse_args() -> argparse.Namespace:
parser = ArgumentParserWithDefaults("Applies weight-space ensembles for fine-tuning (WiSE-FT) on 2 CLIP "
"checkpoints.",
description="See https://arxiv.org/abs/2109.01903 for more info.")
parser.add_argument("input_path_or_name1", metavar="INPUT_FILE_OR_NAME_1")
parser.add_argument("input_path_or_name2", metavar="INPUT_FILE_OR_NAME_2")
parser.add_argument("output_path", metavar="OUTPUT_FILE")
parser.add_argument("--weight-for-2", type=float, default=0.5)
return parser.parse_args()
def main() -> None:
args = parse_args()
model1 = load_clip_model(args.input_path_or_name1)
model2 = load_clip_model(args.input_path_or_name2)
# We don't use the logic scale from CLIP but ours, so we had deleted it. Here we need to re-create the variable,
# so it doesn't fail when using the checkpoints.
model1.logit_scale = getattr(model1, "logit_scale", torch.tensor(float("nan")))
model2.logit_scale = getattr(model2, "logit_scale", torch.tensor(float("nan")))
state_dict = wise_state_dict(model1, model2, weight_for_2=args.weight_for_2)
torch.save(state_dict, args.output_path)
if __name__ == "__main__":
main()
| 1,526 | 37.175 | 116 | py |
fitclip | fitclip-main/scripts/subcorr.py | #!/usr/bin/env python
import argparse
import sys
from typing import Any, Callable, Iterable, MutableMapping, Optional, Sequence, Union
import PIL.Image
import clip
import decord
import numpy as np
import seaborn as sns
import torch
from clip.model import CLIP
from matplotlib import pyplot as plt
from matplotlib.offsetbox import AnnotationBbox, OffsetImage
from spacy.tokens import Doc, Span
def get_video_info(path: str) -> MutableMapping[str, Any]:
video_reader = decord.VideoReader(path)
frame_indices = list(range(0, len(video_reader), 10))
frames = [PIL.Image.fromarray(f) for f in video_reader.get_batch(frame_indices).asnumpy()]
thumbnails_frame_indices = video_reader.get_key_indices()
thumbnails = [PIL.Image.fromarray(f) for f in video_reader.get_batch(thumbnails_frame_indices).asnumpy()]
thumbnails = [f.copy() for f in thumbnails]
for thumbnail in thumbnails:
thumbnail.thumbnail((64, 64))
return {
"frames": frames,
"frame_times": video_reader.get_frame_timestamp(frame_indices).mean(axis=-1), # noqa
"thumbnails": thumbnails,
"thumbnail_times": video_reader.get_frame_timestamp(thumbnails_frame_indices).mean(axis=-1), # noqa
}
def encode_visual(images: Iterable[PIL.Image.Image], clip_model: CLIP,
image_preprocessor: Callable[[PIL.Image.Image], torch.Tensor],
device: Optional[Any] = None) -> torch.Tensor:
images = torch.stack([image_preprocessor(image) for image in images])
if device is not None:
images = images.to(device)
with torch.inference_mode():
encoded_images = clip_model.encode_image(images)
return encoded_images / encoded_images.norm(dim=-1, keepdim=True)
def encode_text(text: str, clip_model: CLIP, device: Optional[Any] = None) -> torch.Tensor:
tokenized_texts = clip.tokenize([text])
if device is not None:
tokenized_texts = tokenized_texts.to(device)
with torch.inference_mode():
encoded_texts = clip_model.encode_text(tokenized_texts)
return encoded_texts / encoded_texts.norm(dim=-1, keepdim=True)
def text_probs(encoded_images: torch.Tensor, encoded_texts: torch.Tensor) -> np.ndarray:
with torch.inference_mode():
# clip_model.logit_scale.exp() == 100
return (100 * encoded_images @ encoded_texts.T).softmax(dim=0).squeeze(-1).cpu().numpy() # noqa
def create_figure(times: Sequence[float], probs: Sequence[float], thumbnail_times: Sequence[float],
thumbnails: Iterable[PIL.Image.Image], title: Union[Doc, Span, str]) -> plt.Axes:
# noinspection SpellCheckingInspection
sns.set(rc={"figure.figsize": (1.0 * len(thumbnail_times), 1.5)})
ax = sns.lineplot(x=times, y=probs)
plt.xticks(thumbnail_times)
ax.set_title(title.text if isinstance(title, (Doc, Span)) else title, fontsize=35, y=0.6)
ax.set(xlabel="time", ylabel="probability")
plt.fill_between(times, probs)
if isinstance(title, (Doc, Span)):
start_time = title[0]._.start_time
end_time = title[-1]._.end_time
plt.axvspan(start_time, end_time, alpha=0.5, color="red")
for i, (time, thumbnail) in enumerate(zip(thumbnail_times, thumbnails)):
im = OffsetImage(thumbnail, axes=ax)
ab = AnnotationBbox(im, (time, 0), xybox=(0, -60), frameon=False, boxcoords="offset points", pad=0)
ax.add_artist(ab)
plt.margins(x=0, tight=True)
plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
return ax
def create_figure_for_text(encoded_frames: torch.Tensor, text: Union[Doc, Span, str], clip_model: CLIP,
times: Sequence[float], thumbnail_times: Sequence[float],
thumbnails: Iterable[PIL.Image.Image]) -> plt.Axes:
encoded_texts = encode_text(text.text if isinstance(text, (Doc, Span)) else text, clip_model,
device=encoded_frames.device)
probs = text_probs(encoded_frames, encoded_texts)
return create_figure(times, probs, thumbnail_times, thumbnails, text)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("path", metavar="PATH")
return parser.parse_args()
def main() -> None:
sns.set_theme()
args = parse_args()
device = "cuda" if torch.cuda.is_available() else "cpu"
clip_model, image_preprocessor = clip.load("ViT-B/16", device=device)
# noinspection SpellCheckingInspection
video_info = get_video_info(args.path)
encoded_frames = encode_visual(video_info["frames"], clip_model, image_preprocessor, device=device)
for text in sys.stdin:
if text := text.strip():
create_figure_for_text(encoded_frames, text, clip_model, video_info["frame_times"],
video_info["thumbnail_times"], video_info["thumbnails"])
plt.show()
if __name__ == "__main__":
main()
| 4,981 | 35.101449 | 109 | py |
fitclip | fitclip-main/scripts/prepare_trained_clip_checkpoint_for_evaluation.py | #!/usr/bin/env python
import argparse
import torch
from util.checkpoint_utils import state_dict_from_checkpoint_path
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("input_path", metavar="INPUT_FILE")
parser.add_argument("output_path", metavar="OUTPUT_FILE")
parser.add_argument("--prefix", default="encoder.model.")
return parser.parse_args()
def main() -> None:
args = parse_args()
state_dict = state_dict_from_checkpoint_path(args.input_path, prefix=args.prefix)
# We don't use the logic scale from CLIP but ours, so we had deleted it. Here we need to re-create the variable,
# so it doesn't fail when loading this `state_dict`.
state_dict["logit_scale"] = torch.tensor(float("nan"))
torch.save(state_dict, args.output_path)
if __name__ == "__main__":
main()
| 868 | 27.032258 | 116 | py |
fitclip | fitclip-main/scripts/checkpoint_to_state_dict.py | #!/usr/bin/env python
import argparse
import sys
import torch
from util.checkpoint_utils import state_dict_from_checkpoint_path
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("input_path", metavar="INPUT_FILE")
parser.add_argument("--prefix", default="encoder.model.")
return parser.parse_args()
def main() -> None:
args = parse_args()
state_dict = state_dict_from_checkpoint_path(args.input_path, prefix=args.prefix)
torch.save(state_dict, sys.stdout.buffer)
if __name__ == "__main__":
main()
| 582 | 22.32 | 85 | py |
fitclip | fitclip-main/scripts/prepare_trained_checkpoint_for_evaluation.py | #!/usr/bin/env python
import argparse
import torch
from cached_path import cached_path
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("input_path", metavar="INPUT_FILE", type=cached_path)
parser.add_argument("output_path", metavar="OUTPUT_FILE")
parser.add_argument("--prefix", default="encoder.model.")
return parser.parse_args()
def main() -> None:
args = parse_args()
checkpoint = torch.load(args.input_path)
prefix = args.prefix + ("" if args.prefix.endswith(".") else ".")
checkpoint["state_dict"] = {k[len(prefix):]: v for k, v in checkpoint["state_dict"].items() if k.startswith(prefix)}
torch.save(checkpoint, args.output_path)
if __name__ == "__main__":
main()
| 771 | 26.571429 | 120 | py |
fitclip | fitclip-main/scripts/open_clip_checkpoint_to_model.py | #!/usr/bin/env python
import argparse
import torch
from cached_path import cached_path
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("input_path", metavar="INPUT_FILE", type=cached_path)
parser.add_argument("output_path", metavar="OUTPUT_FILE")
return parser.parse_args()
def main() -> None:
args = parse_args()
checkpoint = torch.load(args.input_path)
state_dict = checkpoint["state_dict"]
first_key = next(iter(state_dict))
prefix = next(prefix for prefix in ["model", "module"] if first_key.startswith(prefix + "."))
torch.save({k[len(prefix + "."):]: v for k, v in state_dict.items()}, args.output_path)
if __name__ == "__main__":
main()
| 744 | 25.607143 | 97 | py |
fitclip | fitclip-main/aligner/video_text_module.py | from typing import Any, Literal, Mapping, MutableMapping, Optional, Sequence, Tuple, Union
import math
import pytorch_lightning as pl
import torch.distributed.nn
from overrides import overrides
from torch import nn
from torch.nn.modules.loss import _Loss
from aligner.encoder.video_text_encoder import TYPE_OUTPUT, VideoTextEncoder
from aligner.loss import NCELoss
from util.tensor_utils import all_gather
TYPE_INPUT = MutableMapping[str, Any]
TYPE_SPLIT = Literal["train", "val"]
def log_lr(pl_module: pl.LightningModule, **kwargs) -> None:
for i, optimizer in enumerate(pl_module.trainer.optimizers):
for j, param_group in enumerate(optimizer.param_groups):
if (lr := param_group.get("lr")) is not None: # noqa
pl_module.log(f"lr_{i}_group_{j}", lr, **kwargs)
class VideoTextLightningModule(pl.LightningModule): # noqa
def __init__(self, encoder: VideoTextEncoder, init_temperature: float = 0.05, min_temperature: float = 0.001,
fit_temperature: bool = True, loss: Optional[_Loss] = None) -> None:
super().__init__()
self.encoder = encoder
# Use the temperature as in CLIP: save it in log-space and fit it along with the model.
self.logit_scale = nn.Parameter(torch.tensor([- math.log(init_temperature)]), requires_grad=fit_temperature)
# The following constant is set also as a parameter, so it's moved to the correct device automatically.
self.max_logit_scale = nn.Parameter(torch.tensor([- math.log(min_temperature)]), requires_grad=False)
self.loss = loss or NCELoss()
@overrides(check_signature=False)
def forward(self, batch: TYPE_INPUT,
_batch_idx: int = 0) -> Union[TYPE_OUTPUT, Tuple[torch.Tensor, torch.Tensor, Sequence[str]]]:
batch.pop("video_id", None)
return self.encoder(**batch)
def _step(self, batch: TYPE_INPUT, batch_idx: int = 0) -> TYPE_OUTPUT:
return self(batch, batch_idx)
@overrides(check_signature=False)
def training_step(self, batch: TYPE_INPUT, _batch_idx: int = 0) -> TYPE_OUTPUT:
output = self._step(batch, _batch_idx)
# Need to log the step because PL doesn't log it in Neptune.
# See https://github.com/PyTorchLightning/pytorch-lightning/pull/5510
first_video_value = next(v for k, v in batch.items() if k.startswith("video"))
self.log("step", float(self.global_step), batch_size=len(first_video_value))
return output
def _step_end(self, output: TYPE_OUTPUT, split: TYPE_SPLIT,
log_kwargs: Optional[Mapping[str, Any]] = None) -> Union[torch.Tensor, TYPE_OUTPUT]:
log_kwargs = log_kwargs or {}
encoded_video, encoded_text = all_gather(self, output, sync_grads=split == "train")
batch_size = len(encoded_video)
logit_scale = self.logit_scale.exp()
scores = logit_scale * encoded_video @ encoded_text.T
loss = self.loss(scores)
# Note train loss it's already shown in the progress bar by PL by default.
#
# Note that we need to pass the batch size in the first step log
# as it can't be easily inferred by PL in our case.
self.log(f"loss/{split}", loss, prog_bar=split != "train", batch_size=batch_size, **log_kwargs)
if split == "train":
self.log("batch_size", float(batch_size), batch_size=batch_size)
self.log("temperature", 1 / logit_scale, batch_size=batch_size)
return loss if split == "train" else (encoded_video, encoded_text)
@overrides(check_signature=False)
def training_step_end(self, output: TYPE_OUTPUT) -> torch.Tensor:
loss = self._step_end(output, split="train")
log_lr(self)
return loss
@overrides(check_signature=False)
def predict_step(self, batch: TYPE_INPUT, batch_idx: int = 0) -> Mapping[str, torch.Tensor]:
encoded_video, encoded_text = self._step(batch, batch_idx)
return {
"encoded_videos": encoded_video,
"encoded_texts": encoded_text,
"video_ids": batch["video_id"]
}
@overrides(check_signature=False)
def optimizer_step(self, *args, **kwargs) -> None:
super().optimizer_step(*args, **kwargs)
if self.logit_scale >= self.max_logit_scale:
self.logit_scale.copy_(self.max_logit_scale)
| 4,376 | 43.663265 | 116 | py |
fitclip | fitclip-main/aligner/__main__.py | #!/usr/bin/env python
import logging
import os
from time import strftime
from typing import Mapping, Optional
import hydra
import torch
from omegaconf import DictConfig
from pytorch_lightning.loggers import NeptuneLogger, TensorBoardLogger
from aligner.cli import create_model_data_module_trainer_and_ckpt_path, init_cli
from aligner.logger_utils import get_logger_by_type
# Note it's better to have this as a module, so it's importable and DDP works fine in debug mode.
# Maybe this issue is caused by Hydra moving the CWD to somewhere else.
LOGGER = logging.getLogger(__name__)
# Set an env var, if empty, to the desired working directory in sweep mode. Then we read it from the config.
# This way we make sure all processes use the same folder.
# See https://github.com/PyTorchLightning/pytorch-lightning/issues/2727
os.environ.setdefault("SWEEP_DIR", f"multirun/{strftime('%Y-%m-%d')}/{strftime('%H-%M-%S')}")
@hydra.main(config_path="../config", config_name="trainer")
def main(cfg: DictConfig) -> Optional[float]:
init_cli(cfg)
if cfg.get("trainer", {}).get("strategy") == "dp":
LOGGER.warning("DP strategy not supported by the current metric logging scheme."
" See https://torchmetrics.readthedocs.io/en/stable/pages/lightning.html#logging-torchmetrics")
model, data_module, trainer, ckpt_path = create_model_data_module_trainer_and_ckpt_path(cfg)
output = None
if cfg.command == "train":
if cfg.get("validate_before_training"):
LOGGER.info("Validation before training started.")
with torch.inference_mode():
metrics_list = trainer.validate(model, datamodule=data_module, ckpt_path=ckpt_path)
LOGGER.info("Validation before training finished.")
if (tb_logger := get_logger_by_type(trainer, TensorBoardLogger)) and not tb_logger._default_hp_metric:
tb_logger.log_hyperparams(model.hparams_initial, metrics={k: v for metrics in metrics_list
for k, v in metrics.items()})
LOGGER.info("Training started.")
trainer.fit(model, datamodule=data_module, ckpt_path=ckpt_path)
if optimized_metric_name := cfg.get("optimized_metric_name"):
output = trainer.callback_metrics.get(optimized_metric_name)
elif cfg.command == "tune":
assert ckpt_path is None, "Checkpoint path not supported when tuning."
if trainer._accelerator_connector.is_distributed:
LOGGER.warning("Tuning with the PL Trainer is known to have some issues in distributed settings."
" See e.g. https://github.com/PyTorchLightning/pytorch-lightning/issues/4280")
LOGGER.info("Tuning started.")
trainer.tune(model, datamodule=data_module)
elif cfg.command in {"evaluate", "validate"}:
with torch.inference_mode():
trainer.validate(model, datamodule=data_module, ckpt_path=ckpt_path)
elif cfg.command == "test":
with torch.inference_mode():
trainer.test(model, datamodule=data_module, ckpt_path=ckpt_path)
elif cfg.command == "predict":
if trainer._accelerator_connector.is_distributed:
LOGGER.warning("Predicting with the PL Trainer is known to have some issues in distributed settings."
" See e.g. https://github.com/PyTorchLightning/pytorch-lightning/issues/10618")
output_path = cfg.get("output_path", "predictions.pt")
with torch.inference_mode():
predictions = trainer.predict(model, datamodule=data_module, ckpt_path=ckpt_path)
assert predictions
first_prediction = predictions[0]
assert isinstance(first_prediction, Mapping)
keys = first_prediction
predictions_map = {k: torch.cat([prediction[k] for prediction in predictions])
if isinstance(first_prediction[k], torch.Tensor)
else [p for prediction in predictions for p in prediction[k]]
for k in keys}
torch.save(predictions_map, output_path)
else:
raise ValueError(f"Unrecognized command: {cfg.command}")
if (neptune_logger := get_logger_by_type(trainer, NeptuneLogger)) and trainer.is_global_zero:
# In a Hydra multirun (sweep) scenario, Neptune experiments from finished runs are marked as still running
# unless we stop them manually. See https://github.com/PyTorchLightning/pytorch-lightning/issues/11368
neptune_logger.run.stop()
# Return the optimized metric value for hparam search.
return output
if __name__ == "__main__":
main()
| 4,715 | 43.490566 | 118 | py |
fitclip | fitclip-main/aligner/logger_utils.py | from typing import Optional, Type, TypeVar
import pytorch_lightning as pl
from pytorch_lightning.loggers import LightningLoggerBase, LoggerCollection
T = TypeVar("T", bound=LightningLoggerBase)
def get_logger_by_type(trainer: pl.Trainer, logger_class: Type[T]) -> Optional[T]:
if isinstance(trainer.logger, LoggerCollection):
return next((logger for logger in trainer.logger._logger_iterable if isinstance(logger, logger_class)), None)
elif isinstance(trainer.logger, logger_class):
return trainer.logger
else:
return None
| 564 | 32.235294 | 117 | py |
fitclip | fitclip-main/aligner/teacher_student.py | import itertools
from typing import Iterable, Mapping, MutableMapping, Optional, Tuple, Union
import torch.distributed.nn
from overrides import overrides
from torch import nn
from aligner.encoder import video_text_encoder
from aligner.encoder.video_text_encoder import TYPE_TOKENIZER, VideoTextEncoder
from aligner.loss import TeacherStudentNCELoss
from aligner.text_video_retrieval import TextVideoRetrievalLightningModule
from aligner.video_text_module import TYPE_INPUT, TYPE_SPLIT, log_lr
from util.tensor_utils import all_gather, pad, split_in_collection
TYPE_OUTPUT = Tuple[video_text_encoder.TYPE_OUTPUT, video_text_encoder.TYPE_OUTPUT]
TYPE_MULTI_OUTPUT = Mapping[str, TYPE_OUTPUT]
def _replace_in_tokenized_text(tokenized_text: MutableMapping[str, torch.Tensor],
new_tokenized_text: Mapping[str, torch.Tensor], start_idx: int, end_idx: int,
tokenizer: TYPE_TOKENIZER) -> None:
"""Replaces the content in the tensor `tokenized_text` from the index `start_idx` to `end_idx` (exclusive) for
`new_tokenized_text`.
When it needs to know details about the tokenization, it uses `tokenizer`.
"""
for k in tokenized_text:
padding_value = 0 if "mask" in k else getattr(tokenizer, "pad_token_id", 0)
# We suppose right padding.
if tokenized_text[k].shape[1] > new_tokenized_text[k].shape[1]:
padded = pad(new_tokenized_text[k], min_size=tokenized_text[k].shape[1], value=padding_value)
tokenized_text[k] = torch.cat((tokenized_text[k][:start_idx], padded, tokenized_text[k][end_idx:]))
elif tokenized_text[k].shape[1] < new_tokenized_text[k].shape[1]:
padded = pad(tokenized_text[k], min_size=new_tokenized_text[k].shape[1], value=padding_value)
tokenized_text[k] = torch.cat((padded[:start_idx], new_tokenized_text[k], padded[end_idx:]))
else:
tokenized_text[k] = torch.cat((tokenized_text[k][:start_idx], new_tokenized_text[k],
tokenized_text[k][end_idx:]))
class TeacherStudentLightningModule(TextVideoRetrievalLightningModule): # noqa
"""
Distillation training module.
If specified, `prompts` is used with the unlabeled dataset videos instead of the labels it provides (if any).
"""
def __init__(self, encoder: VideoTextEncoder, teacher: VideoTextEncoder, labeled_dataset_name: str = "labeled",
labeled_dataset_loss_share: Optional[float] = None,
dataset_names: Iterable[str] = ("labeled", "unlabeled"), prompts: Optional[Iterable[str]] = None,
**kwargs) -> None:
super().__init__(encoder=encoder, dataset_names=dataset_names, **kwargs)
self.teacher = teacher
assert self.dataset_names, "This module uses dataset names."
assert len(self.dataset_names) == 2, "The current implementation needs exactly 2 datasets."
# FIXME: it doesn't work with different datasets for training and evaluation, because it needs certain names
# for training; and this logic assumes the same dataset names for both.
if labeled_dataset_loss_share is None:
self.dataset_loss_share = {name: 1 / len(self.dataset_names) for name in self.dataset_names}
else:
self.dataset_loss_share = {labeled_dataset_name: labeled_dataset_loss_share}
self.dataset_loss_share.update((name, (1 - labeled_dataset_loss_share) / (len(self.dataset_names) - 1))
for name in self.dataset_names
if name != labeled_dataset_name)
self.teacher_student_logit_scale = nn.Parameter(self.logit_scale.clone(),
requires_grad=self.logit_scale.requires_grad)
# noinspection SpellCheckingInspection
self.teacher_student_loss = TeacherStudentNCELoss(reduction="batchmean")
for p in self.teacher.parameters():
p.requires_grad = False
self.labeled_dataset_name = labeled_dataset_name
self.unlabeled_dataset_name = next(k for k in self.dataset_names if k != labeled_dataset_name)
if prompts is None:
self.tokenized_prompts = None
self.teacher_tokenized_prompts = None
else:
prompts = list(prompts)
# We use parameters so the device and dtype are moved correctly along with this module.
self.tokenized_prompts = nn.ParameterDict((k, nn.Parameter(v, requires_grad=False)) # noqa
for k, v in encoder.get_tokenizer()(prompts).items())
self.teacher_tokenized_prompts = nn.ParameterDict((k, nn.Parameter(v, requires_grad=False)) # noqa
for k, v in teacher.get_tokenizer()(prompts).items())
@overrides(check_signature=False)
def _step(self, batch: TYPE_INPUT, _batch_idx: int = 0) -> TYPE_OUTPUT:
# Note we pass the labeled dataset portion to the teacher, but then we don't use it.
return self({"video": batch["video_student"], "text": batch["text_student"]}), \
self.teacher(video=batch["video_teacher"], text=batch["text_teacher"])
@overrides(check_signature=False)
def training_step(self, batch: TYPE_INPUT, _batch_idx: int = 0) -> TYPE_MULTI_OUTPUT:
keys, lengths = zip(*((key, sum(1 for _ in group))
for key, group in itertools.groupby(dataset for dataset in batch.pop("dataset"))))
assert len(keys) == len(self.dataset_names), "All datasets should be present in each batch."
if self.tokenized_prompts is None:
unlabeled_dataset_idx = None
else:
unlabeled_dataset_idx = keys.index(self.unlabeled_dataset_name)
start_idx_in_batch = sum(lengths[i] for i in range(unlabeled_dataset_idx))
end_idx_in_batch = start_idx_in_batch + lengths[unlabeled_dataset_idx]
_replace_in_tokenized_text(tokenized_text=batch["text_student"],
new_tokenized_text=self.tokenized_prompts, start_idx=start_idx_in_batch,
end_idx=end_idx_in_batch, tokenizer=self.encoder.get_tokenizer())
_replace_in_tokenized_text(tokenized_text=batch["text_teacher"],
new_tokenized_text=self.teacher_tokenized_prompts,
start_idx=start_idx_in_batch, end_idx=end_idx_in_batch,
tokenizer=self.teacher.get_tokenizer())
output = self._step(batch, _batch_idx)
# Need to log the step because PL doesn't log it in Neptune.
# See https://github.com/PyTorchLightning/pytorch-lightning/pull/5510
first_video_value = next(v for k, v in batch.items() if k.startswith("video"))
self.log(f"step", self.global_step, batch_size=len(first_video_value))
if self.tokenized_prompts is None:
split_output = split_in_collection(output, lengths)
else:
text_split_sections = list(lengths)
text_split_sections[unlabeled_dataset_idx] = len(next(iter(self.tokenized_prompts.values())))
student_video_sections = split_in_collection(output[0][0], lengths)
student_text_sections = split_in_collection(output[0][1], text_split_sections)
teacher_video_sections = split_in_collection(output[1][0], lengths)
teacher_text_sections = split_in_collection(output[1][1], text_split_sections)
split_output = (((student_video_sections[i], student_text_sections[i]),
(teacher_video_sections[i], teacher_text_sections[i]))
for i in range(len(student_video_sections)))
return dict(zip(keys, split_output))
def _dataset_step_end(self, output: TYPE_OUTPUT, split: TYPE_SPLIT,
dataset_name: Optional[str] = None) -> Union[torch.Tensor, video_text_encoder.TYPE_OUTPUT]:
gathered_output = all_gather(self, output, sync_grads=split == "train")
(encoded_video, encoded_text), (teacher_encoded_video, teacher_encoded_text) = gathered_output
batch_size = len(encoded_video)
logit_scale = self.logit_scale.exp()
scores = logit_scale * encoded_video @ encoded_text.T
if dataset_name == self.labeled_dataset_name:
loss = self.loss(scores)
else:
teacher_student_logit_scale = self.teacher_student_logit_scale.exp()
teacher_scores = teacher_student_logit_scale * teacher_encoded_video @ teacher_encoded_text.T
loss = self.teacher_student_loss(scores, teacher_scores) * teacher_student_logit_scale ** 2
if split == "train":
# Note that we need to pass the batch size in the first step log
# as it can't be easily inferred by PL in our case.
self.log("batch_size", float(batch_size), batch_size=batch_size)
self.log("temperature/labeled", 1 / logit_scale)
self.log("temperature/unlabeled", 1 / teacher_student_logit_scale)
prefix = f"loss/{split}_{dataset_name}" if dataset_name else f"loss/{split}"
# Note that we need to pass the batch size in the first step log
# as it can't be easily inferred by PL in our case.
self.log(prefix, loss, prog_bar=split != "train", batch_size=batch_size, add_dataloader_idx=False)
return loss if split == "train" else (encoded_video, encoded_text)
@overrides(check_signature=False)
def training_step_end(self, output: TYPE_MULTI_OUTPUT) -> torch.Tensor:
loss = sum(self._dataset_step_end(batch, split="train", dataset_name=name) * self.dataset_loss_share[name]
for name, batch in output.items())
self.log("loss/train", loss) # Note train loss it's already shown in the progress bar by PL by default.
log_lr(self)
return loss
@overrides(check_signature=False)
def _validation_dataset_step_end(self, output: TYPE_OUTPUT,
dataset_name: Optional[str] = None) -> video_text_encoder.TYPE_OUTPUT:
return self._dataset_step_end(output, split="val", dataset_name=dataset_name)
@overrides(check_signature=False)
def optimizer_step(self, *args, **kwargs) -> None:
super().optimizer_step(*args, **kwargs)
if self.teacher_student_logit_scale >= self.max_logit_scale:
self.teacher_student_logit_scale.copy_(self.max_logit_scale)
| 10,735 | 54.05641 | 117 | py |
fitclip | fitclip-main/aligner/loss.py | from typing import Literal
import torch
from overrides import overrides
from torch.nn import functional as F
from torch.nn.modules.loss import _Loss
TYPE_REDUCTION = Literal["none", "mean", "sum"]
# noinspection SpellCheckingInspection
TYPE_REDUCTION_KL_DIV = Literal["none", "batchmean", "mean", "sum"]
def _rows_to_columns_nce_loss(scores: torch.Tensor, reduction: TYPE_REDUCTION = "mean") -> torch.Tensor:
loss = - F.log_softmax(scores, dim=-1).diag()
if reduction == "mean":
return loss.mean()
elif reduction == "sum":
return loss.sum()
else:
return loss
def nce_loss(scores: torch.Tensor, reduction: TYPE_REDUCTION = "mean") -> torch.Tensor:
return (_rows_to_columns_nce_loss(scores, reduction=reduction)
+ _rows_to_columns_nce_loss(scores.T, reduction=reduction))
def _rows_to_columns_teacher_student_nce_loss(scores: torch.Tensor, teacher_scores: torch.Tensor,
reduction: TYPE_REDUCTION_KL_DIV = "mean") -> torch.Tensor:
logits = F.log_softmax(scores, dim=-1)
teacher_probs = F.softmax(teacher_scores, dim=-1)
return F.kl_div(logits, teacher_probs, reduction=reduction)
def teacher_student_nce_loss(scores: torch.Tensor, teacher_scores: torch.Tensor,
reduction: TYPE_REDUCTION_KL_DIV = "mean") -> torch.Tensor:
return (_rows_to_columns_teacher_student_nce_loss(scores, teacher_scores, reduction=reduction)
+ _rows_to_columns_teacher_student_nce_loss(scores.T, teacher_scores.T, reduction=reduction))
class NCELoss(_Loss):
@overrides(check_signature=False)
def forward(self, scores: torch.Tensor) -> torch.Tensor:
return nce_loss(scores, reduction=self.reduction) # noqa
class TeacherStudentNCELoss(_Loss):
@overrides(check_signature=False)
def forward(self, scores: torch.Tensor, teacher_scores: torch.Tensor) -> torch.Tensor:
return teacher_student_nce_loss(scores, teacher_scores, reduction=self.reduction) # noqa
class SimilarityLoss(_Loss):
@overrides(check_signature=False)
def forward(self, scores: torch.Tensor) -> torch.Tensor:
# Note we actually don't need all the scores.
loss = - torch.log(torch.sigmoid(scores.diag()))
if self.reduction == "mean":
return loss.mean()
elif self.reduction == "sum":
return loss.sum()
else:
return loss
| 2,447 | 36.090909 | 105 | py |
fitclip | fitclip-main/aligner/text_video_retrieval.py | from collections import OrderedDict
from typing import Iterable, Mapping, Optional, Sequence, Tuple, Union
import torch
import torch.distributed.nn
from overrides import overrides
from torch import nn
from torchmetrics import Metric, Recall
from aligner.encoder.video_text_encoder import TYPE_OUTPUT
from aligner.metrics import MedianRank, Rank
from aligner.video_text_module import TYPE_INPUT, VideoTextLightningModule
from util.tensor_utils import all_gather
class TextVideoRetrievalLightningModule(VideoTextLightningModule): # noqa
def __init__(self, *args, dataset_names: Optional[Iterable[str]] = None, compute_rank: bool = False,
**kwargs) -> None:
super().__init__(*args, **kwargs)
metrics_dict = {"r1": Recall(), "r5": Recall(top_k=5), "r10": Recall(top_k=10), "mr": MedianRank()}
if compute_rank:
metrics_dict["rank"] = Rank()
self.dataset_names = list(dataset_names) if dataset_names else None
self.multiple_datasets = self.dataset_names is not None and len(self.dataset_names) > 1
if self.multiple_datasets:
assert all("_" not in name for name in self.dataset_names), \
"Underscores in dataset names are problematic because of how we get their corresponding metrics."
self.metrics: Mapping[str, Metric] = nn.ModuleDict((f"{name}_{dataset_name}", metric.clone()) # noqa
for dataset_name in self.dataset_names
for name, metric in metrics_dict.items())
else:
self.metrics: Mapping[str, Metric] = nn.ModuleDict(metrics_dict)
@overrides(check_signature=False)
def validation_step(self, batch: TYPE_INPUT, batch_idx: int = 0,
dataloader_idx: Optional[int] = None) -> Tuple[TYPE_OUTPUT, Optional[int]]:
return self._step(batch, batch_idx), dataloader_idx
def _validation_dataset_step_end(self, output: TYPE_OUTPUT, dataset_name: Optional[str] = None) -> TYPE_OUTPUT:
encoded_video, encoded_text = all_gather(self, output)
batch_size = len(encoded_video)
logit_scale = self.logit_scale.exp()
scores = logit_scale * encoded_video @ encoded_text.T
loss = self.loss(scores)
# Note that we need to pass the batch size in the first step log
# as it can't be easily inferred by PL in our case.
key = "loss/val" + ("" if dataset_name is None else f"_{dataset_name}")
self.log(key, loss, prog_bar=True, batch_size=batch_size, add_dataloader_idx=False)
return encoded_video, encoded_text
@overrides(check_signature=False)
def validation_step_end(self, output: Tuple[TYPE_OUTPUT, int]) -> TYPE_OUTPUT:
step_output, data_loader_idx = output
assert self.multiple_datasets == (data_loader_idx is not None)
dataset_name = self.dataset_names[data_loader_idx] if self.multiple_datasets else None
return self._validation_dataset_step_end(step_output, dataset_name=dataset_name)
def _validate_dataset(self, outputs: Sequence[TYPE_OUTPUT], dataset_name: Optional[str] = None) -> None:
assert self.multiple_datasets == (dataset_name is not None)
encoded_videos, encoded_texts = (torch.cat(x) for x in zip(*outputs))
batch_size = len(encoded_videos)
scores = encoded_texts @ encoded_videos.T
target = torch.arange(scores.shape[-1], device=scores.device)
for name, metric in self.metrics.items():
if not dataset_name or name.endswith(f"_{dataset_name}"):
metric(scores, target)
# Note that we need to pass the batch size in the first step log
# as it can't be easily inferred by PL in our case.
self.log(name, metric, prog_bar=True, batch_size=batch_size, add_dataloader_idx=False)
@overrides(check_signature=False)
def validation_epoch_end(self, outputs: Union[Sequence[TYPE_OUTPUT], Sequence[Sequence[TYPE_OUTPUT]]]) -> None:
if self.multiple_datasets:
for i, (name, dataset_output) in enumerate(zip(self.dataset_names, outputs)):
# Necessary to set the current data loader ID so PL knows to which one the logged metrics belong
# (because it returns the metrics by data loader).
self._current_dataloader_idx = i
self._validate_dataset(dataset_output, dataset_name=name) # noqa
self._current_dataloader_idx = None
else:
self._validate_dataset(outputs)
if "rank" in self.metrics:
self.print(self.metrics["rank"].compute().tolist())
@overrides
def load_state_dict(self, state_dict: "OrderedDict[str, torch.Tensor]", strict: bool = True):
# If it's exactly this class, then ignore any teacher-related thing.
# We do it here, so we can control it more, and avoid bugs with a general solution.
if type(self) is TextVideoRetrievalLightningModule:
incompatible_keys = super().load_state_dict(state_dict, strict=False)
unexpected_keys = set(incompatible_keys.unexpected_keys)
for key in incompatible_keys.unexpected_keys:
if key.startswith("teacher"):
unexpected_keys.remove(key)
# We then do as in super:
if strict:
error_msgs = []
if unexpected_keys:
unexpected_key_str = ", ".join(f'"{k}"' for k in unexpected_keys)
error_msgs.append(f"Unexpected key(s) in state_dict: {unexpected_key_str}. ")
if incompatible_keys.missing_keys:
missing_keys_str = ', '.join(f'"{k}"' for k in incompatible_keys.missing_keys)
error_msgs.append(f"Missing key(s) in state_dict: {missing_keys_str}. ")
if error_msgs:
error_msgs_str = "\n\t".join(error_msgs)
raise RuntimeError(f"Error(s) in loading state_dict for {self.__class__.__name__}:\n\t"
f"{error_msgs_str}")
return incompatible_keys
else:
return super().load_state_dict(state_dict, strict)
| 6,325 | 46.924242 | 115 | py |
fitclip | fitclip-main/aligner/video_text_classification.py | import logging
import math
from typing import Any, Iterable, Mapping, Optional, Sequence, TypeVar
import torch
from overrides import overrides
from pytorch_lightning.callbacks import RichProgressBar
from pytorch_lightning.utilities.apply_func import apply_to_collection
from torch import nn
from torchmetrics import Accuracy, Metric
from aligner.encoder.video_text_encoder import VideoTextEncoder
from aligner.metrics import MedianRank
from aligner.video_text_module import VideoTextLightningModule
from util import iter_utils
LOGGER = logging.getLogger(__name__)
T = TypeVar("T")
def batch_tokenized_text(tokenized: Mapping[str, Sequence[T]], n: int) -> Iterable[Mapping[str, T]]:
tokenized_dicts = {k: iter(iter_utils.batch_sequence(v, n)) for k, v in tokenized.items()}
length = math.ceil(len(next(iter(tokenized.values()))) / n)
for _ in range(length):
yield {k: next(tokenized_dicts[k]) for k in tokenized}
class VideoTextClassificationLightningModule(VideoTextLightningModule): # noqa
def __init__(self, encoder: VideoTextEncoder, labels: Iterable[str], templates: Optional[Iterable[str]],
return_metrics_by_class: bool = False, **kwargs) -> None:
super().__init__(encoder, **kwargs)
labels = list(labels)
label_count = len(labels)
# If different templates are provided, we used them for each label
# and reset the labels to be {labels} x {templates}.
if templates:
templates = list(templates)
self.template_count = len(templates)
labels = [template.format(label) for label in labels for template in templates]
else:
self.template_count = 1
# We tokenize all the labels but defer the encoding until the model is in the device.
tokenized_labels = encoder.get_tokenizer()(labels)
device = next(encoder.parameters()).device
tokenized_labels = apply_to_collection(tokenized_labels, torch.Tensor, torch.Tensor.to, device)
self.tokenized_labels = nn.ParameterDict(apply_to_collection(tokenized_labels, torch.Tensor, nn.Parameter,
requires_grad=False))
# We encode just one label to allocate the size correctly.
encoded_text = self.encoder.encode_text({k: v[:1] for k, v in tokenized_labels.items()})
self.encoded_labels = nn.Parameter(torch.empty(label_count, encoded_text.shape[-1]), requires_grad=False)
self.metrics: Mapping[str, Metric] = nn.ModuleDict({"a1": Accuracy(), "a5": Accuracy(top_k=5),
"mr": MedianRank()})
if return_metrics_by_class:
self.metrics_by_class = nn.ModuleDict({f"a1_{k}": Accuracy() for k in range(label_count)})
else:
self.metrics_by_class = None
def _on_start(self) -> None:
# Note that for training they should be encoded at running time, not here.
# But we aren't training any text classification model but evaluating them.
#
# We compute them here and not during init because here the model is already in the device.
# This is especially much faster than in CPU (init) when using templates.
batch_size = 32
callback = next(callback for callback in self.trainer.callbacks if isinstance(callback, RichProgressBar))
progress = callback.progress
if self.trainer.is_global_zero:
progress_task = progress.add_task(
description="Encoding the labels",
total=math.ceil(len(next(iter(self.tokenized_labels.values()))) / batch_size))
else:
progress_task = None
encoded_label_list = []
for tokenized_labels_batch in batch_tokenized_text(self.tokenized_labels, batch_size):
encoded_label_list.append(self.encoder.encode_text(tokenized_labels_batch))
if progress_task is not None:
progress.update(progress_task, advance=1)
encoded_labels = torch.cat(encoded_label_list)
encoded_labels = encoded_labels.reshape(-1, self.template_count, encoded_labels.shape[1]).mean(dim=1)
self.encoded_labels.copy_(encoded_labels)
if progress_task is not None:
# If we remove it, it later fails, not sure why. So we just hide it.
progress.update(progress_task, visible=False)
@overrides
def on_validation_start(self) -> None:
self._on_start()
@overrides
def on_test_start(self) -> None:
self._on_start()
@overrides
def on_predict_start(self) -> None:
self._on_start()
@overrides(check_signature=False)
def forward(self, video: torch.Tensor) -> torch.Tensor:
return self.encoder.encode_video(video) @ self.encoded_labels.T
@overrides(check_signature=False)
def validation_step(self, batch: Mapping[str, Any], _batch_idx: int = 0) -> None:
scores = self(batch["video"])
label_id = batch["target"][1]
for name, metric in self.metrics.items():
metric(scores, label_id)
# Note that we need to pass the batch size in the first step log
# as it can't be easily inferred by PL in our case.
self.log(name, metric, prog_bar=True, batch_size=len(batch["video"]))
if self.metrics_by_class is not None:
for scores_instance, label_id_instance in zip(scores, label_id):
metric = self.metrics_by_class[f"a1_{label_id_instance}"]
metric(scores_instance.unsqueeze(0), label_id_instance.unsqueeze(0))
self.log(f"a1_{label_id_instance}", metric, batch_size=1)
@overrides(check_signature=False)
def predict_step(self, batch: Mapping[str, Any], _batch_idx: int = 0) -> Mapping[str, torch.Tensor]:
return {
"predictions": self(batch["video"]).argmax(dim=-1),
"labels": batch["target"][1],
"video_ids": batch["video_id"],
}
| 6,045 | 41.879433 | 114 | py |
fitclip | fitclip-main/aligner/cli.py | #!/usr/bin/env python
import copy
import logging
import warnings
from types import MethodType
from typing import Any, Mapping, Optional, Tuple, Type
import hydra
import pytorch_lightning as pl
from cached_path import cached_path
from omegaconf import DictConfig
from pytorch_lightning import seed_everything
from torch.optim import Optimizer
from aligner.data.data_module_group import DataModuleStructuredGroup, EvalDataModuleGroup, MixedBatchDataModule, \
TrainAndEvalDataModules
from aligner.data.video_data_module import ENCODER_OR_ENCODER_MAP, VideoClassificationDataModule
from aligner.encoder.video_text_encoder import VideoTextEncoder
from aligner.video_text_classification import VideoTextClassificationLightningModule
from aligner.video_text_module import VideoTextLightningModule
LOGGER = logging.getLogger(__name__)
# This is because PL can't automatically infer the batch size, that's needed for logging. But we set it manually
# within the modules.
warnings.filterwarnings("ignore", message=r"^Trying to infer the `batch_size` from an ambiguous collection\. .+")
# From https://stackoverflow.com/a/2020083/1165181
def fullname(klass: Type[Any]) -> str:
return f"{klass.__module__}.{klass.__qualname__}"
def set_logging_level(level: int) -> None:
logging.basicConfig(level=level)
# `basicConfig` will only work for new loggers, so we also need to set up the existing ones:
for logger in logging.root.manager.loggerDict.values():
if isinstance(logger, logging.Logger): # Otherwise, it could be a `logging.PlaceHolder`.
logger.setLevel(level)
logging.getLogger().setLevel(level) # The root logger is not present in the previous iterable.
def init_cli(cfg: DictConfig) -> None:
if cfg.get("silent"):
set_logging_level(logging.WARNING)
else:
set_logging_level(logging.INFO)
if "seed" in cfg:
seed_everything(cfg.seed, workers=True)
def instantiate_data_module(cfg: DictConfig, encoder: ENCODER_OR_ENCODER_MAP) -> pl.LightningDataModule:
kwargs = {}
if cfg._target_ in {fullname(klass) for klass in [DataModuleStructuredGroup, EvalDataModuleGroup,
MixedBatchDataModule]}:
if isinstance(cfg.data_modules, Mapping):
kwargs["data_modules"] = {k: instantiate_data_module(v, encoder=encoder) # noqa
for k, v in cfg.data_modules.items()}
else:
kwargs["data_modules"] = {instantiate_data_module(cfg_dm, encoder=encoder)
for cfg_dm in cfg.data_modules}
# Convert because otherwise the passed `data_modules` is a `DictConfig` instead of a `dict` and
# `train_dataloader` can't respect the same collection type as `DictConfig` can't have normal classes.
kwargs["_convert_"] = "all"
elif cfg._target_ == fullname(TrainAndEvalDataModules):
kwargs["train_data_module"] = instantiate_data_module(cfg.train_data_module, encoder=encoder)
kwargs["eval_data_module"] = instantiate_data_module(cfg.eval_data_module, encoder=encoder)
else:
kwargs["encoder"] = encoder
# Necessary as well when the encoder is a dict.
kwargs["_convert_"] = "all"
return hydra.utils.instantiate(cfg, **kwargs)
def create_model_data_module_trainer_and_ckpt_path(
cfg: DictConfig, model_kwargs: Optional[Mapping[str, Any]] = None) -> Tuple[VideoTextLightningModule,
pl.LightningDataModule, pl.Trainer,
str]:
model_kwargs = model_kwargs or {}
LOGGER.info(f"Instantiating encoder <{getattr(cfg.encoder, '_target_', type(cfg.encoder).__name__)}>…")
encoder: ENCODER_OR_ENCODER_MAP = hydra.utils.instantiate(cfg.encoder)
if isinstance(encoder, Mapping) and cfg.get("use_student_encoder_for_data_preprocessing"):
encoder_for_data_preprocessing = encoder["student"]
else:
encoder_for_data_preprocessing = encoder
LOGGER.info("Encoder instantiated.")
LOGGER.info(f"Instantiating data module <{cfg.data._target_}>…")
data_module = instantiate_data_module(cfg.data, encoder=encoder_for_data_preprocessing)
LOGGER.info("Data module instantiated.")
LOGGER.info(f"Instantiating model <{cfg.model._target_}>…")
if isinstance(encoder, Mapping):
model_kwargs.setdefault("encoder", encoder["student"])
model_kwargs.setdefault("teacher", encoder["teacher"])
else:
model_kwargs.setdefault("encoder", encoder)
if isinstance(data_module, VideoClassificationDataModule):
assert isinstance(encoder_for_data_preprocessing, VideoTextEncoder), \
"Encoder can't be a mapping and has to support text when doing classification."
cfg.model._target_ = fullname(VideoTextClassificationLightningModule)
model_kwargs.setdefault("labels", data_module.categories)
model_kwargs.setdefault("templates", data_module.templates)
if prompts_path := cfg.get("prompts"): # noqa
with open(cached_path(prompts_path)) as file:
model_kwargs.setdefault("prompts", [stripped_line
for line in file
if (stripped_line := line.strip())]) # noqa
model: VideoTextLightningModule = hydra.utils.instantiate(cfg.model, **model_kwargs)
LOGGER.info("Model instantiated.")
if "optimizer" in cfg:
LOGGER.info(f"Instantiating Optimizer <{cfg.optimizer._target_}>…")
def configure_optimizers(self: pl.LightningModule) -> Optimizer:
if (lr_ := self.hparams.get("lr")) is not None: # To be used by auto LR find.
cfg.optimizer["lr"] = lr_
return hydra.utils.instantiate(cfg.optimizer, self.parameters())
model.configure_optimizers = MethodType(configure_optimizers, model)
LOGGER.info("Optimizer instantiated.")
LOGGER.info(f"Instantiating trainer <{cfg.trainer._target_}>…")
trainer: pl.Trainer = hydra.utils.instantiate(cfg.trainer)
LOGGER.info("Trainer instantiated.")
# We do what `model.save_hyperparameters(cfg)` would do but without needing a current frame to get the args from.
# It turns out that, even if you provide args, it still checks the current frame for args, and set those
# conditioned by the provided args.
model._log_hyperparams = trainer.logger
model._set_hparams(cfg) # noqa
model._hparams_initial = copy.deepcopy(model._hparams)
ckpt_path = cached_path(cfg.checkpoint_path) if cfg.get("path") else None
return model, data_module, trainer, ckpt_path
| 6,809 | 44.099338 | 119 | py |
fitclip | fitclip-main/aligner/wise.py | import copy
from typing import Mapping, TypeVar
import torch
from torch import nn
T = TypeVar("T", bound=nn.Module)
def wise_state_dict(model1: T, model2: T, weight_for_2: float = 0.5) -> Mapping[str, torch.Tensor]:
state_dict1 = dict(model1.named_parameters())
state_dict2 = dict(model2.named_parameters())
assert set(state_dict1) == set(state_dict2)
return {k: (1 - weight_for_2) * state_dict1[k] + weight_for_2 * state_dict2[k] for k in state_dict1}
def wise(model1: T, model2: T, weight_for_2: float = 0.5, copy_model1: bool = True) -> T:
assert type(model1) is type(model2)
model = copy.deepcopy(model1 if copy_model1 else model2)
model.load_state_dict(wise_state_dict(model1, model2, weight_for_2=weight_for_2)) # noqa
return model
| 779 | 31.5 | 104 | py |
fitclip | fitclip-main/aligner/param_freezer.py | # Inspired from https://github.com/allenai/allennlp/blob/0d8c0fc/allennlp/training/optimizers.py
import logging
import re
from typing import Iterable, Optional, Union
import pytorch_lightning as pl
from overrides import overrides
LOGGER = logging.getLogger(__name__)
class ParamFreezer(pl.Callback):
def __init__(self, regexes: Iterable[Union[str, re.Pattern]]) -> None:
super().__init__()
self.regexes = [re.compile(regex) for regex in regexes]
@overrides
def setup(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", stage: Optional[str] = None) -> None:
unused_regexes = {p.pattern for p in self.regexes}
params_to_tune = []
frozen_params = []
for name, param in pl_module.named_parameters():
for regex in self.regexes:
if regex.search(name):
param.requires_grad = False
if regex.pattern in unused_regexes:
unused_regexes.remove(regex.pattern)
frozen_params.append(name)
break
else:
params_to_tune.append(name)
LOGGER.debug(f"Params to tune: {params_to_tune}")
LOGGER.debug(f"Frozen params: {frozen_params}")
if unused_regexes:
LOGGER.warning(f"The following param regexes used for freezing didn't match any param name: "
f"{unused_regexes}")
| 1,450 | 32.744186 | 113 | py |
fitclip | fitclip-main/aligner/metrics.py | import torch
from overrides import overrides
from torchmetrics import Metric
class Rank(Metric):
is_differentiable: bool = False
higher_is_better: bool = False
full_state_update: bool = False
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self.add_state("ranks", default=[], dist_reduce_fx="cat")
@overrides(check_signature=False)
def update(self, predictions: torch.Tensor, target: torch.Tensor) -> None:
sorted_predicted_positions = predictions.argsort(dim=1, descending=True)
ranks = torch.where(sorted_predicted_positions == target.unsqueeze(-1))[1] # noqa
self.ranks.append(ranks)
@overrides
def compute(self) -> torch.Tensor:
# It could be already reduced depending on when we call it (e.g., at the epoch end).
return self.ranks if isinstance(self.ranks, torch.Tensor) else torch.cat(self.ranks)
class MeanRank(Rank):
@overrides
def compute(self) -> torch.Tensor:
return super().compute().mean() + 1
class MedianRank(Rank):
@overrides
def compute(self) -> torch.Tensor:
return super().compute().median() + 1
| 1,162 | 30.432432 | 92 | py |
fitclip | fitclip-main/aligner/transforms.py | """From https://github.com/pytorch/vision/blob/993325d/references/video_classification/transforms.py"""
import random
from typing import Any
import torch
import torch.nn as nn
from overrides import overrides
from torchvision.transforms import InterpolationMode, RandomResizedCrop, functional as F
from util.tensor_utils import pad
class ConvertBHWCtoBCHW(nn.Module):
"""Convert tensor from (B, H, W, C) to (B, C, H, W)."""
@overrides(check_signature=False)
def forward(self, v: torch.Tensor) -> torch.Tensor:
return v.permute(0, 3, 1, 2)
class ConvertBCHWtoCBHW(nn.Module):
"""Convert tensor from (B, C, H, W) to (C, B, H, W)."""
@overrides(check_signature=False)
def forward(self, v: torch.Tensor) -> torch.Tensor:
return v.permute(1, 0, 2, 3)
# Added by me:
class ConvertBHWCtoCBHW(nn.Module):
"""Convert tensor from (B, H, W, C) to (C, B, H, W)."""
@overrides(check_signature=False)
def forward(self, v: torch.Tensor) -> torch.Tensor:
return v.permute(3, 0, 1, 2)
class PadToMinFrames:
def __init__(self, min_frames: int, frame_dim: int = 0, padding_value: Any = 0) -> None:
self.min_frames = min_frames
self.frame_dim = frame_dim
self.padding_value = padding_value
def __call__(self, video: torch.Tensor) -> torch.Tensor:
return pad(video, min_size=self.min_frames, dim=self.frame_dim, value=self.padding_value)
class MaxFrames:
def __init__(self, max_frames: int, frame_dim: int = 0) -> None:
self.max_frames = max_frames
self.frame_dim = frame_dim
def __call__(self, video: torch.Tensor) -> torch.Tensor:
return video[(slice(None),) * self.frame_dim + (slice(None, self.max_frames),)]
class RandomResizedCropWithRandomInterpolation(RandomResizedCrop):
@overrides
def forward(self, img: torch.Tensor) -> torch.Tensor:
i, j, h, w = self.get_params(img, self.scale, self.ratio) # noqa
interpolation = random.choice([InterpolationMode.BILINEAR, InterpolationMode.BICUBIC])
return F.resized_crop(img, i, j, h, w, self.size, interpolation)
| 2,123 | 33.258065 | 103 | py |
fitclip | fitclip-main/aligner/tests/data/multi_source_sampler_test.py | import string
from typing import Literal
from torch.utils.data import ConcatDataset, DataLoader, SequentialSampler
from aligner.data.multi_source_sampler import RoundRobinMultiSourceSampler
def _create_sample_data_loader(mode: Literal["min_size", "max_size_cycle"]) -> DataLoader:
dataset1 = string.ascii_lowercase
dataset2 = range(10)
dataset = ConcatDataset([dataset1, dataset2]) # noqa
sampler = RoundRobinMultiSourceSampler([SequentialSampler(dataset1), SequentialSampler(dataset2)],
sequence_sizes=[4, 3], mode=mode)
return DataLoader(dataset, sampler=sampler, batch_size=None)
def test_multi_source_sampler_min_size() -> None:
data_loader = _create_sample_data_loader(mode="min_size")
expected_list = ["a", "b", "c", "d", 0, 1, 2, "e", "f", "g", "h", 3, 4, 5, "i", "j", "k", "l", 6, 7, 8, "m", "n",
"o", "p", 9]
assert len(data_loader) == len(expected_list)
assert list(data_loader) == expected_list
def test_multi_source_sampler_max_size_cycle() -> None:
data_loader = _create_sample_data_loader(mode="max_size_cycle")
expected_list = ["a", "b", "c", "d", 0, 1, 2, "e", "f", "g", "h", 3, 4, 5, "i", "j", "k", "l", 6, 7, 8, "m", "n",
"o", "p", 9, 0, 1, "q", "r", "s", "t", 2, 3, 4, "u", "v", "w", "x", 5, 6, 7, "y", "z"]
assert len(data_loader) == len(expected_list)
assert list(data_loader) == expected_list
| 1,463 | 42.058824 | 117 | py |