repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/apis/matting_inference.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmedit.datasets.pipelines import Compose
from mmedit.models import build_model
def init_model(config, checkpoint=None, device='cuda:0'):
"""Initialize a model from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
device (str): Which device the model will deploy. Default: 'cuda:0'.
Returns:
nn.Module: The constructed model.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
config.model.pretrained = None
config.test_cfg.metrics = None
model = build_model(config.model, test_cfg=config.test_cfg)
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint)
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
def matting_inference(model, img, trimap):
"""Inference image(s) with the model.
Args:
model (nn.Module): The loaded model.
img (str): Image file path.
trimap (str): Trimap file path.
Returns:
np.ndarray: The predicted alpha matte.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# remove alpha from test_pipeline
keys_to_remove = ['alpha', 'ori_alpha']
for key in keys_to_remove:
for pipeline in list(cfg.test_pipeline):
if 'key' in pipeline and key == pipeline['key']:
cfg.test_pipeline.remove(pipeline)
if 'keys' in pipeline and key in pipeline['keys']:
pipeline['keys'].remove(key)
if len(pipeline['keys']) == 0:
cfg.test_pipeline.remove(pipeline)
if 'meta_keys' in pipeline and key in pipeline['meta_keys']:
pipeline['meta_keys'].remove(key)
# build the data pipeline
test_pipeline = Compose(cfg.test_pipeline)
# prepare data
data = dict(merged_path=img, trimap_path=trimap)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# forward the model
with torch.no_grad():
result = model(test_mode=True, **data)
return result['pred_alpha']
| 2,659 | 33.545455 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/apis/video_interpolation_inference.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import os
import os.path as osp
import cv2
import mmcv
import numpy as np
import torch
from mmcv.fileio import FileClient
from mmcv.parallel import collate
from mmedit.datasets.pipelines import Compose
VIDEO_EXTENSIONS = ('.mp4', '.mov', '.avi')
FILE_CLIENT = FileClient('disk')
def read_image(filepath):
"""Read image from file.
Args:
filepath (str): File path.
Returns:
image (np.array): Image.
"""
img_bytes = FILE_CLIENT.get(filepath)
image = mmcv.imfrombytes(
img_bytes, flag='color', channel_order='rgb', backend='pillow')
return image
def read_frames(source, start_index, num_frames, from_video, end_index):
"""Read frames from file or video.
Args:
source (list | mmcv.VideoReader): Source of frames.
start_index (int): Start index of frames.
num_frames (int): frames number to be read.
from_video (bool): Weather read frames from video.
end_index (int): The end index of frames.
Returns:
images (np.array): Images.
"""
images = []
last_index = min(start_index + num_frames, end_index)
# read frames from video
if from_video:
for index in range(start_index, last_index):
if index >= source.frame_cnt:
break
images.append(np.flip(source.get_frame(index), axis=2))
else:
files = source[start_index:last_index]
images = [read_image(f) for f in files]
return images
def video_interpolation_inference(model,
input_dir,
output_dir,
start_idx=0,
end_idx=None,
batch_size=4,
fps_multiplier=0,
fps=0,
filename_tmpl='{:08d}.png'):
"""Inference image with the model.
Args:
model (nn.Module): The loaded model.
input_dir (str): Directory of the input video.
output_dir (str): Directory of the output video.
start_idx (int): The index corresponding to the first frame in the
sequence. Default: 0
end_idx (int | None): The index corresponding to the last interpolated
frame in the sequence. If it is None, interpolate to the last
frame of video or sequence. Default: None
batch_size (int): Batch size. Default: 4
fps_multiplier (float): multiply the fps based on the input video.
Default: 0.
fps (float): frame rate of the output video. Default: 0.
filename_tmpl (str): template of the file names. Default: '{:08d}.png'
Returns:
output (list[numpy.array]): The predicted interpolation result.
It is an image sequence.
input_fps (float): The fps of input video. If the input is an image
sequence, input_fps=0.0
"""
device = next(model.parameters()).device # model device
# build the data pipeline
if model.cfg.get('demo_pipeline', None):
test_pipeline = model.cfg.demo_pipeline
elif model.cfg.get('test_pipeline', None):
test_pipeline = model.cfg.test_pipeline
else:
test_pipeline = model.cfg.val_pipeline
# remove the data loading pipeline
tmp_pipeline = []
for pipeline in test_pipeline:
if pipeline['type'] not in [
'GenerateSegmentIndices', 'LoadImageFromFileList',
'LoadImageFromFile'
]:
tmp_pipeline.append(pipeline)
test_pipeline = tmp_pipeline
# compose the pipeline
test_pipeline = Compose(test_pipeline)
# check if the input is a video
input_file_extension = os.path.splitext(input_dir)[1]
if input_file_extension in VIDEO_EXTENSIONS:
source = mmcv.VideoReader(input_dir)
input_fps = source.fps
length = source.frame_cnt
from_video = True
h, w = source.height, source.width
if fps_multiplier:
assert fps_multiplier > 0, '`fps_multiplier` cannot be negative'
output_fps = fps_multiplier * input_fps
else:
output_fps = fps if fps > 0 else input_fps * 2
else:
files = os.listdir(input_dir)
files = [osp.join(input_dir, f) for f in files]
files.sort()
source = files
length = files.__len__()
from_video = False
example_frame = read_image(files[0])
h, w = example_frame.shape[:2]
output_fps = fps
# check if the output is a video
output_file_extension = os.path.splitext(output_dir)[1]
if output_file_extension in VIDEO_EXTENSIONS:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
target = cv2.VideoWriter(output_dir, fourcc, output_fps, (w, h))
to_video = True
else:
to_video = False
end_idx = min(end_idx, length) if end_idx is not None else length
# calculate step args
step_size = model.step_frames * batch_size
lenth_per_step = model.required_frames + model.step_frames * (
batch_size - 1)
repeat_frame = model.required_frames - model.step_frames
prog_bar = mmcv.ProgressBar(
math.ceil(
(end_idx + step_size - lenth_per_step - start_idx) / step_size))
output_index = start_idx
for start_index in range(start_idx, end_idx, step_size):
images = read_frames(
source, start_index, lenth_per_step, from_video, end_index=end_idx)
# data prepare
data = dict(inputs=images, inputs_path=None, key=input_dir)
data = [test_pipeline(data)]
data = collate(data, samples_per_gpu=1)['inputs']
# data.shape: [1, t, c, h, w]
# forward the model
data = model.split_frames(data)
input_tensors = data.clone().detach()
with torch.no_grad():
output = model(data.to(device), test_mode=True)['output']
if len(output.shape) == 4:
output = output.unsqueeze(1)
output_tensors = output.cpu()
if len(output_tensors.shape) == 4:
output_tensors = output_tensors.unsqueeze(1)
result = model.merge_frames(input_tensors, output_tensors)
if not start_idx == start_index:
result = result[0 - repeat_frame:]
prog_bar.update()
# save frames
if to_video:
for frame in result:
target.write(frame)
else:
for frame in result:
save_path = osp.join(output_dir,
filename_tmpl.format(output_index))
mmcv.imwrite(frame, save_path)
output_index += 1
if start_index + lenth_per_step >= end_idx:
break
print()
print(f'Output dir: {output_dir}')
if to_video:
target.release()
| 6,978 | 33.043902 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/apis/train.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import random
import warnings
import mmcv
import numpy as np
import torch
import torch.distributed as dist
from mmcv.parallel import MMDataParallel
from mmcv.runner import HOOKS, IterBasedRunner, get_dist_info
from mmcv.utils import build_from_cfg
from mmedit.core import DistEvalIterHook, EvalIterHook, build_optimizers
from mmedit.core.distributed_wrapper import DistributedDataParallelWrapper
from mmedit.datasets.builder import build_dataloader, build_dataset
from mmedit.utils import get_root_logger
def init_random_seed(seed=None, device='cuda'):
"""Initialize random seed.
If the seed is not set, the seed will be automatically randomized,
and then broadcast to all processes to prevent some potential bugs.
Args:
seed (int, Optional): The seed. Default to None.
device (str): The device where the seed will be put on.
Default to 'cuda'.
Returns:
int: Seed to be used.
"""
if seed is not None:
return seed
# Make sure all ranks share the same random seed to prevent
# some potential bugs. Please refer to
# https://github.com/open-mmlab/mmdetection/issues/6339
rank, world_size = get_dist_info()
seed = np.random.randint(2**31)
if world_size == 1:
return seed
if rank == 0:
random_num = torch.tensor(seed, dtype=torch.int32, device=device)
else:
random_num = torch.tensor(0, dtype=torch.int32, device=device)
dist.broadcast(random_num, src=0)
return random_num.item()
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def train_model(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None):
"""Train model entry function.
Args:
model (nn.Module): The model to be trained.
dataset (:obj:`Dataset`): Train dataset.
cfg (dict): The config dict for training.
distributed (bool): Whether to use distributed training.
Default: False.
validate (bool): Whether to do evaluation. Default: False.
timestamp (str | None): Local time for runner. Default: None.
meta (dict | None): Meta dict to record some important information.
Default: None
"""
logger = get_root_logger(log_level=cfg.log_level)
# start training
if distributed:
_dist_train(
model,
dataset,
cfg,
validate=validate,
logger=logger,
timestamp=timestamp,
meta=meta)
else:
_non_dist_train(
model,
dataset,
cfg,
validate=validate,
logger=logger,
timestamp=timestamp,
meta=meta)
def _dist_train(model,
dataset,
cfg,
validate=False,
logger=None,
timestamp=None,
meta=None):
"""Distributed training function.
Args:
model (nn.Module): The model to be trained.
dataset (:obj:`Dataset`): Train dataset.
cfg (dict): The config dict for training.
validate (bool): Whether to do evaluation. Default: False.
logger (logging.Logger | None): Logger for training. Default: None.
timestamp (str | None): Local time for runner. Default: None.
meta (dict | None): Meta dict to record some important information.
Default: None.
"""
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
# step 1: give default values and override (if exist) from cfg.data
loader_cfg = {
**dict(seed=cfg.get('seed'), drop_last=False, dist=True),
**({} if torch.__version__ != 'parrots' else dict(
prefetch_num=2,
pin_memory=False,
)),
**dict((k, cfg.data[k]) for k in [
'samples_per_gpu',
'workers_per_gpu',
'shuffle',
'seed',
'drop_last',
'prefetch_num',
'pin_memory',
] if k in cfg.data)
}
# step 2: cfg.data.train_dataloader has highest priority
train_loader_cfg = dict(loader_cfg, **cfg.data.get('train_dataloader', {}))
data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset]
# put model on gpus
find_unused_parameters = cfg.get('find_unused_parameters', False)
model = DistributedDataParallelWrapper(
model,
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
# build runner
optimizer = build_optimizers(model, cfg.optimizers)
runner = IterBasedRunner(
model,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta)
# an ugly walkaround to make the .log and .log.json filenames the same
runner.timestamp = timestamp
# register hooks
runner.register_training_hooks(
cfg.lr_config,
checkpoint_config=cfg.checkpoint_config,
log_config=cfg.log_config)
# visual hook
if cfg.get('visual_config', None) is not None:
cfg.visual_config['output_dir'] = os.path.join(
cfg.work_dir, cfg.visual_config['output_dir'])
runner.register_hook(mmcv.build_from_cfg(cfg.visual_config, HOOKS))
# evaluation hook
if validate and cfg.get('evaluation', None) is not None:
dataset = build_dataset(cfg.data.val)
if ('val_samples_per_gpu' in cfg.data
or 'val_workers_per_gpu' in cfg.data):
warnings.warn('"val_samples_per_gpu/val_workers_per_gpu" have '
'been deprecated. Please use '
'"val_dataloader=dict(samples_per_gpu=1)" instead. '
'Details see '
'https://github.com/open-mmlab/mmediting/pull/201')
val_loader_cfg = {
**loader_cfg,
**dict(shuffle=False, drop_last=False),
**dict((newk, cfg.data[oldk]) for oldk, newk in [
('val_samples_per_gpu', 'samples_per_gpu'),
('val_workers_per_gpu', 'workers_per_gpu'),
] if oldk in cfg.data),
**cfg.data.get('val_dataloader', {})
}
data_loader = build_dataloader(dataset, **val_loader_cfg)
save_path = osp.join(cfg.work_dir, 'val_visuals')
runner.register_hook(
DistEvalIterHook(
data_loader, save_path=save_path, **cfg.evaluation),
priority='LOW')
# user-defined hooks
if cfg.get('custom_hooks', None):
custom_hooks = cfg.custom_hooks
assert isinstance(custom_hooks, list), \
f'custom_hooks expect list type, but got {type(custom_hooks)}'
for hook_cfg in cfg.custom_hooks:
assert isinstance(hook_cfg, dict), \
'Each item in custom_hooks expects dict type, but got ' \
f'{type(hook_cfg)}'
hook_cfg = hook_cfg.copy()
priority = hook_cfg.pop('priority', 'NORMAL')
hook = build_from_cfg(hook_cfg, HOOKS)
runner.register_hook(hook, priority=priority)
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_iters)
def _non_dist_train(model,
dataset,
cfg,
validate=False,
logger=None,
timestamp=None,
meta=None):
"""Non-Distributed training function.
Args:
model (nn.Module): The model to be trained.
dataset (:obj:`Dataset`): Train dataset.
cfg (dict): The config dict for training.
validate (bool): Whether to do evaluation. Default: False.
logger (logging.Logger | None): Logger for training. Default: None.
timestamp (str | None): Local time for runner. Default: None.
meta (dict | None): Meta dict to record some important information.
Default: None.
"""
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
# step 1: give default values and override (if exist) from cfg.data
loader_cfg = {
**dict(
seed=cfg.get('seed'),
drop_last=False,
dist=False,
num_gpus=cfg.gpus),
**({} if torch.__version__ != 'parrots' else dict(
prefetch_num=2,
pin_memory=False,
)),
**dict((k, cfg.data[k]) for k in [
'samples_per_gpu',
'workers_per_gpu',
'shuffle',
'seed',
'drop_last',
'prefetch_num',
'pin_memory',
] if k in cfg.data)
}
# step 2: cfg.data.train_dataloader has highest priority
train_loader_cfg = dict(loader_cfg, **cfg.data.get('train_dataloader', {}))
data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset]
# put model on gpus/cpus
model = MMDataParallel(model, device_ids=range(cfg.gpus))
# build runner
optimizer = build_optimizers(model, cfg.optimizers)
runner = IterBasedRunner(
model,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta)
# an ugly walkaround to make the .log and .log.json filenames the same
runner.timestamp = timestamp
# register hooks
runner.register_training_hooks(
cfg.lr_config,
checkpoint_config=cfg.checkpoint_config,
log_config=cfg.log_config)
# visual hook
if cfg.get('visual_config', None) is not None:
cfg.visual_config['output_dir'] = os.path.join(
cfg.work_dir, cfg.visual_config['output_dir'])
runner.register_hook(mmcv.build_from_cfg(cfg.visual_config, HOOKS))
# evaluation hook
if validate and cfg.get('evaluation', None) is not None:
dataset = build_dataset(cfg.data.val)
if ('val_samples_per_gpu' in cfg.data
or 'val_workers_per_gpu' in cfg.data):
warnings.warn('"val_samples_per_gpu/val_workers_per_gpu" have '
'been deprecated. Please use '
'"val_dataloader=dict(samples_per_gpu=1)" instead. '
'Details see '
'https://github.com/open-mmlab/mmediting/pull/201')
val_loader_cfg = {
**loader_cfg,
**dict(shuffle=False, drop_last=False),
**dict((newk, cfg.data[oldk]) for oldk, newk in [
('val_samples_per_gpu', 'samples_per_gpu'),
('val_workers_per_gpu', 'workers_per_gpu'),
] if oldk in cfg.data),
**cfg.data.get('val_dataloader', {})
}
data_loader = build_dataloader(dataset, **val_loader_cfg)
save_path = osp.join(cfg.work_dir, 'val_visuals')
runner.register_hook(
EvalIterHook(data_loader, save_path=save_path, **cfg.evaluation),
priority='LOW')
# user-defined hooks
if cfg.get('custom_hooks', None):
custom_hooks = cfg.custom_hooks
assert isinstance(custom_hooks, list), \
f'custom_hooks expect list type, but got {type(custom_hooks)}'
for hook_cfg in cfg.custom_hooks:
assert isinstance(hook_cfg, dict), \
'Each item in custom_hooks expects dict type, but got ' \
f'{type(hook_cfg)}'
hook_cfg = hook_cfg.copy()
priority = hook_cfg.pop('priority', 'NORMAL')
hook = build_from_cfg(hook_cfg, HOOKS)
runner.register_hook(hook, priority=priority)
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_iters)
| 12,897 | 34.629834 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/apis/restoration_video_inference.py | # Copyright (c) OpenMMLab. All rights reserved.
import glob
import os.path as osp
import re
from functools import reduce
import mmcv
import numpy as np
import torch
from mmedit.datasets.pipelines import Compose
VIDEO_EXTENSIONS = ('.mp4', '.mov')
def pad_sequence(data, window_size):
padding = window_size // 2
data = torch.cat([
data[:, 1 + padding:1 + 2 * padding].flip(1), data,
data[:, -1 - 2 * padding:-1 - padding].flip(1)
],
dim=1)
return data
def restoration_video_inference(model,
img_dir,
window_size,
start_idx,
filename_tmpl,
max_seq_len=None):
"""Inference image with the model.
Args:
model (nn.Module): The loaded model.
img_dir (str): Directory of the input video.
window_size (int): The window size used in sliding-window framework.
This value should be set according to the settings of the network.
A value smaller than 0 means using recurrent framework.
start_idx (int): The index corresponds to the first frame in the
sequence.
filename_tmpl (str): Template for file name.
max_seq_len (int | None): The maximum sequence length that the model
processes. If the sequence length is larger than this number,
the sequence is split into multiple segments. If it is None,
the entire sequence is processed at once.
Returns:
Tensor: The predicted restoration result.
"""
device = next(model.parameters()).device # model device
# build the data pipeline
if model.cfg.get('demo_pipeline', None):
test_pipeline = model.cfg.demo_pipeline
elif model.cfg.get('test_pipeline', None):
test_pipeline = model.cfg.test_pipeline
else:
test_pipeline = model.cfg.val_pipeline
# check if the input is a video
file_extension = osp.splitext(img_dir)[1]
if file_extension in VIDEO_EXTENSIONS:
video_reader = mmcv.VideoReader(img_dir)
# load the images
data = dict(lq=[], lq_path=None, key=img_dir)
for frame in video_reader:
data['lq'].append(np.flip(frame, axis=2))
# remove the data loading pipeline
tmp_pipeline = []
for pipeline in test_pipeline:
if pipeline['type'] not in [
'GenerateSegmentIndices', 'LoadImageFromFileList'
]:
tmp_pipeline.append(pipeline)
test_pipeline = tmp_pipeline
else:
# the first element in the pipeline must be 'GenerateSegmentIndices'
if test_pipeline[0]['type'] != 'GenerateSegmentIndices':
raise TypeError('The first element in the pipeline must be '
f'"GenerateSegmentIndices", but got '
f'"{test_pipeline[0]["type"]}".')
# specify start_idx and filename_tmpl
test_pipeline[0]['start_idx'] = start_idx
test_pipeline[0]['filename_tmpl'] = filename_tmpl
# prepare data
sequence_length = len(glob.glob(osp.join(img_dir, '*')))
img_dir_split = re.split(r'[\\/]', img_dir)
key = img_dir_split[-1]
lq_folder = reduce(osp.join, img_dir_split[:-1])
data = dict(
lq_path=lq_folder,
gt_path='',
key=key,
sequence_length=sequence_length)
# compose the pipeline
test_pipeline = Compose(test_pipeline)
data = test_pipeline(data)
data = data['lq'].unsqueeze(0) # in cpu
# forward the model
with torch.no_grad():
if window_size > 0: # sliding window framework
data = pad_sequence(data, window_size)
result = []
for i in range(0, data.size(1) - 2 * (window_size // 2)):
data_i = data[:, i:i + window_size].to(device)
result.append(model(lq=data_i, test_mode=True)['output'].cpu())
result = torch.stack(result, dim=1)
else: # recurrent framework
if max_seq_len is None:
result = model(
lq=data.to(device), test_mode=True)['output'].cpu()
else:
result = []
for i in range(0, data.size(1), max_seq_len):
result.append(
model(
lq=data[:, i:i + max_seq_len].to(device),
test_mode=True)['output'].cpu())
result = torch.cat(result, dim=1)
return result
| 4,669 | 34.923077 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/distributed_wrapper.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.parallel import MODULE_WRAPPERS, MMDistributedDataParallel
from mmcv.parallel.scatter_gather import scatter_kwargs
from torch.cuda._utils import _get_device_index
@MODULE_WRAPPERS.register_module()
class DistributedDataParallelWrapper(nn.Module):
"""A DistributedDataParallel wrapper for models in MMediting.
In MMedting, there is a need to wrap different modules in the models
with separate DistributedDataParallel. Otherwise, it will cause
errors for GAN training.
More specific, the GAN model, usually has two sub-modules:
generator and discriminator. If we wrap both of them in one
standard DistributedDataParallel, it will cause errors during training,
because when we update the parameters of the generator (or discriminator),
the parameters of the discriminator (or generator) is not updated, which is
not allowed for DistributedDataParallel.
So we design this wrapper to separately wrap DistributedDataParallel
for generator and discriminator.
In this wrapper, we perform two operations:
1. Wrap the modules in the models with separate MMDistributedDataParallel.
Note that only modules with parameters will be wrapped.
2. Do scatter operation for 'forward', 'train_step' and 'val_step'.
Note that the arguments of this wrapper is the same as those in
`torch.nn.parallel.distributed.DistributedDataParallel`.
Args:
module (nn.Module): Module that needs to be wrapped.
device_ids (list[int | `torch.device`]): Same as that in
`torch.nn.parallel.distributed.DistributedDataParallel`.
dim (int, optional): Same as that in the official scatter function in
pytorch. Defaults to 0.
broadcast_buffers (bool): Same as that in
`torch.nn.parallel.distributed.DistributedDataParallel`.
Defaults to False.
find_unused_parameters (bool, optional): Same as that in
`torch.nn.parallel.distributed.DistributedDataParallel`.
Traverse the autograd graph of all tensors contained in returned
value of the wrapped module’s forward function. Defaults to False.
kwargs (dict): Other arguments used in
`torch.nn.parallel.distributed.DistributedDataParallel`.
"""
def __init__(self,
module,
device_ids,
dim=0,
broadcast_buffers=False,
find_unused_parameters=False,
**kwargs):
super().__init__()
assert len(device_ids) == 1, (
'Currently, DistributedDataParallelWrapper only supports one'
'single CUDA device for each process.'
f'The length of device_ids must be 1, but got {len(device_ids)}.')
self.module = module
self.dim = dim
self.to_ddp(
device_ids=device_ids,
dim=dim,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=find_unused_parameters,
**kwargs)
self.output_device = _get_device_index(device_ids[0], True)
def to_ddp(self, device_ids, dim, broadcast_buffers,
find_unused_parameters, **kwargs):
"""Wrap models with separate MMDistributedDataParallel.
It only wraps the modules with parameters.
"""
for name, module in self.module._modules.items():
if next(module.parameters(), None) is None:
module = module.cuda()
elif all(not p.requires_grad for p in module.parameters()):
module = module.cuda()
else:
module = MMDistributedDataParallel(
module.cuda(),
device_ids=device_ids,
dim=dim,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=find_unused_parameters,
**kwargs)
self.module._modules[name] = module
def scatter(self, inputs, kwargs, device_ids):
"""Scatter function.
Args:
inputs (Tensor): Input Tensor.
kwargs (dict): Args for
``mmcv.parallel.scatter_gather.scatter_kwargs``.
device_ids (int): Device id.
"""
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def forward(self, *inputs, **kwargs):
"""Forward function.
Args:
inputs (tuple): Input data.
kwargs (dict): Args for
``mmcv.parallel.scatter_gather.scatter_kwargs``.
"""
inputs, kwargs = self.scatter(inputs, kwargs,
[torch.cuda.current_device()])
return self.module(*inputs[0], **kwargs[0])
def train_step(self, *inputs, **kwargs):
"""Train step function.
Args:
inputs (Tensor): Input Tensor.
kwargs (dict): Args for
``mmcv.parallel.scatter_gather.scatter_kwargs``.
"""
inputs, kwargs = self.scatter(inputs, kwargs,
[torch.cuda.current_device()])
output = self.module.train_step(*inputs[0], **kwargs[0])
return output
def val_step(self, *inputs, **kwargs):
"""Validation step function.
Args:
inputs (tuple): Input data.
kwargs (dict): Args for ``scatter_kwargs``.
"""
inputs, kwargs = self.scatter(inputs, kwargs,
[torch.cuda.current_device()])
output = self.module.val_step(*inputs[0], **kwargs[0])
return output
| 5,720 | 39.864286 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/misc.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import numpy as np
import torch
from torchvision.utils import make_grid
def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
"""Convert torch Tensors into image numpy arrays.
After clamping to (min, max), image values will be normalized to [0, 1].
For different tensor shapes, this function will have different behaviors:
1. 4D mini-batch Tensor of shape (N x 3/1 x H x W):
Use `make_grid` to stitch images in the batch dimension, and then
convert it to numpy array.
2. 3D Tensor of shape (3/1 x H x W) and 2D Tensor of shape (H x W):
Directly change to numpy array.
Note that the image channel in input tensors should be RGB order. This
function will convert it to cv2 convention, i.e., (H x W x C) with BGR
order.
Args:
tensor (Tensor | list[Tensor]): Input tensors.
out_type (numpy type): Output types. If ``np.uint8``, transform outputs
to uint8 type with range [0, 255]; otherwise, float type with
range [0, 1]. Default: ``np.uint8``.
min_max (tuple): min and max values for clamp.
Returns:
(Tensor | list[Tensor]): 3D ndarray of shape (H x W x C) or 2D ndarray
of shape (H x W).
"""
if not (torch.is_tensor(tensor) or
(isinstance(tensor, list)
and all(torch.is_tensor(t) for t in tensor))):
raise TypeError(
f'tensor or list of tensors expected, got {type(tensor)}')
if torch.is_tensor(tensor):
tensor = [tensor]
result = []
for _tensor in tensor:
# Squeeze two times so that:
# 1. (1, 1, h, w) -> (h, w) or
# 3. (1, 3, h, w) -> (3, h, w) or
# 2. (n>1, 3/1, h, w) -> (n>1, 3/1, h, w)
_tensor = _tensor.squeeze(0).squeeze(0)
_tensor = _tensor.float().detach().cpu().clamp_(*min_max)
_tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])
n_dim = _tensor.dim()
if n_dim == 4:
img_np = make_grid(
_tensor, nrow=int(math.sqrt(_tensor.size(0))),
normalize=False).numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0))
elif n_dim == 3:
img_np = _tensor.numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0))
elif n_dim == 2:
img_np = _tensor.numpy()
else:
raise ValueError('Only support 4D, 3D or 2D tensor. '
f'But received with dimension: {n_dim}')
if out_type == np.uint8:
# Unlike MATLAB, numpy.unit8() WILL NOT round by default.
img_np = (img_np * 255.0).round()
img_np = img_np.astype(out_type)
result.append(img_np)
result = result[0] if len(result) == 1 else result
return result
| 2,898 | 37.653333 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/evaluation/eval_hooks.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from mmcv.runner import Hook
from torch.utils.data import DataLoader
class EvalIterHook(Hook):
"""Non-Distributed evaluation hook for iteration-based runner.
This hook will regularly perform evaluation in a given interval when
performing in non-distributed environment.
Args:
dataloader (DataLoader): A PyTorch dataloader.
interval (int): Evaluation interval. Default: 1.
eval_kwargs (dict): Other eval kwargs. It contains:
save_image (bool): Whether to save image.
save_path (str): The path to save image.
"""
def __init__(self, dataloader, interval=1, **eval_kwargs):
if not isinstance(dataloader, DataLoader):
raise TypeError('dataloader must be a pytorch DataLoader, '
f'but got { type(dataloader)}')
self.dataloader = dataloader
self.interval = interval
self.eval_kwargs = eval_kwargs
self.save_image = self.eval_kwargs.pop('save_image', False)
self.save_path = self.eval_kwargs.pop('save_path', None)
def after_train_iter(self, runner):
"""The behavior after each train iteration.
Args:
runner (``mmcv.runner.BaseRunner``): The runner.
"""
if not self.every_n_iters(runner, self.interval):
return
runner.log_buffer.clear()
from mmedit.apis import single_gpu_test
results = single_gpu_test(
runner.model,
self.dataloader,
save_image=self.save_image,
save_path=self.save_path,
iteration=runner.iter)
self.evaluate(runner, results)
def evaluate(self, runner, results):
"""Evaluation function.
Args:
runner (``mmcv.runner.BaseRunner``): The runner.
results (dict): Model forward results.
"""
eval_res = self.dataloader.dataset.evaluate(
results, logger=runner.logger, **self.eval_kwargs)
for name, val in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True
class DistEvalIterHook(EvalIterHook):
"""Distributed evaluation hook.
Args:
dataloader (DataLoader): A PyTorch dataloader.
interval (int): Evaluation interval. Default: 1.
tmpdir (str | None): Temporary directory to save the results of all
processes. Default: None.
gpu_collect (bool): Whether to use gpu or cpu to collect results.
Default: False.
eval_kwargs (dict): Other eval kwargs. It may contain:
save_image (bool): Whether save image.
save_path (str): The path to save image.
"""
def __init__(self,
dataloader,
interval=1,
gpu_collect=False,
**eval_kwargs):
super().__init__(dataloader, interval, **eval_kwargs)
self.gpu_collect = gpu_collect
def after_train_iter(self, runner):
"""The behavior after each train iteration.
Args:
runner (``mmcv.runner.BaseRunner``): The runner.
"""
if not self.every_n_iters(runner, self.interval):
return
runner.log_buffer.clear()
from mmedit.apis import multi_gpu_test
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=osp.join(runner.work_dir, '.eval_hook'),
gpu_collect=self.gpu_collect,
save_image=self.save_image,
save_path=self.save_path,
iteration=runner.iter)
if runner.rank == 0:
print('\n')
self.evaluate(runner, results)
| 3,766 | 33.87963 | 75 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/export/wrappers.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import warnings
import numpy as np
import onnxruntime as ort
import torch
from torch import nn
from mmedit.models import BaseMattor, BasicRestorer, build_model
def inference_with_session(sess, io_binding, output_names, input_tensor):
device_type = input_tensor.device.type
device_id = input_tensor.device.index
device_id = 0 if device_id is None else device_id
io_binding.bind_input(
name='input',
device_type=device_type,
device_id=device_id,
element_type=np.float32,
shape=input_tensor.shape,
buffer_ptr=input_tensor.data_ptr())
for name in output_names:
io_binding.bind_output(name)
sess.run_with_iobinding(io_binding)
pred = io_binding.copy_outputs_to_cpu()
return pred
class ONNXRuntimeMattor(nn.Module):
def __init__(self, sess, io_binding, output_names, base_model):
super(ONNXRuntimeMattor, self).__init__()
self.sess = sess
self.io_binding = io_binding
self.output_names = output_names
self.base_model = base_model
def forward(self,
merged,
trimap,
meta,
test_mode=False,
save_image=False,
save_path=None,
iteration=None):
input_tensor = torch.cat((merged, trimap), 1).contiguous()
pred_alpha = inference_with_session(self.sess, self.io_binding,
self.output_names, input_tensor)[0]
pred_alpha = pred_alpha.squeeze()
pred_alpha = self.base_model.restore_shape(pred_alpha, meta)
eval_result = self.base_model.evaluate(pred_alpha, meta)
if save_image:
self.base_model.save_image(pred_alpha, meta, save_path, iteration)
return {'pred_alpha': pred_alpha, 'eval_result': eval_result}
class RestorerGenerator(nn.Module):
def __init__(self, sess, io_binding, output_names):
super(RestorerGenerator, self).__init__()
self.sess = sess
self.io_binding = io_binding
self.output_names = output_names
def forward(self, x):
pred = inference_with_session(self.sess, self.io_binding,
self.output_names, x)[0]
pred = torch.from_numpy(pred)
return pred
class ONNXRuntimeRestorer(nn.Module):
def __init__(self, sess, io_binding, output_names, base_model):
super(ONNXRuntimeRestorer, self).__init__()
self.sess = sess
self.io_binding = io_binding
self.output_names = output_names
self.base_model = base_model
restorer_generator = RestorerGenerator(self.sess, self.io_binding,
self.output_names)
base_model.generator = restorer_generator
def forward(self, lq, gt=None, test_mode=False, **kwargs):
return self.base_model(lq, gt=gt, test_mode=test_mode, **kwargs)
class ONNXRuntimeEditing(nn.Module):
def __init__(self, onnx_file, cfg, device_id):
super(ONNXRuntimeEditing, self).__init__()
ort_custom_op_path = ''
try:
from mmcv.ops import get_onnxruntime_op_path
ort_custom_op_path = get_onnxruntime_op_path()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, \
you may have to build mmcv with ONNXRuntime from source.')
session_options = ort.SessionOptions()
# register custom op for onnxruntime
if osp.exists(ort_custom_op_path):
session_options.register_custom_ops_library(ort_custom_op_path)
sess = ort.InferenceSession(onnx_file, session_options)
providers = ['CPUExecutionProvider']
options = [{}]
is_cuda_available = ort.get_device() == 'GPU'
if is_cuda_available:
providers.insert(0, 'CUDAExecutionProvider')
options.insert(0, {'device_id': device_id})
sess.set_providers(providers, options)
self.sess = sess
self.device_id = device_id
self.io_binding = sess.io_binding()
self.output_names = [_.name for _ in sess.get_outputs()]
base_model = build_model(
cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
if isinstance(base_model, BaseMattor):
WrapperClass = ONNXRuntimeMattor
elif isinstance(base_model, BasicRestorer):
WrapperClass = ONNXRuntimeRestorer
self.wrapper = WrapperClass(self.sess, self.io_binding,
self.output_names, base_model)
def forward(self, **kwargs):
return self.wrapper(**kwargs)
| 4,767 | 34.318519 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/hooks/visualization.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import mmcv
import torch
from mmcv.runner import HOOKS, Hook
from mmcv.runner.dist_utils import master_only
from torchvision.utils import save_image
@HOOKS.register_module()
class VisualizationHook(Hook):
"""Visualization hook.
In this hook, we use the official api `save_image` in torchvision to save
the visualization results.
Args:
output_dir (str): The file path to store visualizations.
res_name_list (str): The list contains the name of results in outputs
dict. The results in outputs dict must be a torch.Tensor with shape
(n, c, h, w).
interval (int): The interval of calling this hook. If set to -1,
the visualization hook will not be called. Default: -1.
filename_tmpl (str): Format string used to save images. The output file
name will be formatted as this args. Default: 'iter_{}.png'.
rerange (bool): Whether to rerange the output value from [-1, 1] to
[0, 1]. We highly recommend users should preprocess the
visualization results on their own. Here, we just provide a simple
interface. Default: True.
bgr2rgb (bool): Whether to reformat the channel dimension from BGR to
RGB. The final image we will save is following RGB style.
Default: True.
nrow (int): The number of samples in a row. Default: 1.
padding (int): The number of padding pixels between each samples.
Default: 4.
"""
def __init__(self,
output_dir,
res_name_list,
interval=-1,
filename_tmpl='iter_{}.png',
rerange=True,
bgr2rgb=True,
nrow=1,
padding=4):
assert mmcv.is_list_of(res_name_list, str)
self.output_dir = output_dir
self.res_name_list = res_name_list
self.interval = interval
self.filename_tmpl = filename_tmpl
self.bgr2rgb = bgr2rgb
self.rerange = rerange
self.nrow = nrow
self.padding = padding
mmcv.mkdir_or_exist(self.output_dir)
@master_only
def after_train_iter(self, runner):
"""The behavior after each train iteration.
Args:
runner (object): The runner.
"""
if not self.every_n_iters(runner, self.interval):
return
results = runner.outputs['results']
filename = self.filename_tmpl.format(runner.iter + 1)
img_list = [x for k, x in results.items() if k in self.res_name_list]
img_cat = torch.cat(img_list, dim=3).detach()
if self.rerange:
img_cat = ((img_cat + 1) / 2)
if self.bgr2rgb:
img_cat = img_cat[:, [2, 1, 0], ...]
img_cat = img_cat.clamp_(0, 1)
save_image(
img_cat,
osp.join(self.output_dir, filename),
nrow=self.nrow,
padding=self.padding)
| 3,050 | 34.894118 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/hooks/ema.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
from copy import deepcopy
from functools import partial
import mmcv
import torch
from mmcv.parallel import is_module_wrapper
from mmcv.runner import HOOKS, Hook
@HOOKS.register_module()
class ExponentialMovingAverageHook(Hook):
"""Exponential Moving Average Hook.
Exponential moving average is a trick that widely used in current GAN
literature, e.g., PGGAN, StyleGAN, and BigGAN. This general idea of it is
maintaining a model with the same architecture, but its parameters are
updated as a moving average of the trained weights in the original model.
In general, the model with moving averaged weights achieves better
performance.
Args:
module_keys (str | tuple[str]): The name of the ema model. Note that we
require these keys are followed by '_ema' so that we can easily
find the original model by discarding the last four characters.
interp_mode (str, optional): Mode of the interpolation method.
Defaults to 'lerp'.
interp_cfg (dict | None, optional): Set arguments of the interpolation
function. Defaults to None.
interval (int, optional): Evaluation interval (by iterations).
Default: -1.
start_iter (int, optional): Start iteration for ema. If the start
iteration is not reached, the weights of ema model will maintain
the same as the original one. Otherwise, its parameters are updated
as a moving average of the trained weights in the original model.
Default: 0.
"""
def __init__(self,
module_keys,
interp_mode='lerp',
interp_cfg=None,
interval=-1,
start_iter=0):
super().__init__()
assert isinstance(module_keys, str) or mmcv.is_tuple_of(
module_keys, str)
self.module_keys = (module_keys, ) if isinstance(module_keys,
str) else module_keys
# sanity check for the format of module keys
for k in self.module_keys:
assert k.endswith(
'_ema'), 'You should give keys that end with "_ema".'
self.interp_mode = interp_mode
self.interp_cfg = dict() if interp_cfg is None else deepcopy(
interp_cfg)
self.interval = interval
self.start_iter = start_iter
assert hasattr(
self, interp_mode
), f'Currently, we do not support {self.interp_mode} for EMA.'
self.interp_func = partial(
getattr(self, interp_mode), **self.interp_cfg)
@staticmethod
def lerp(a, b, momentum=0.999, momentum_nontrainable=0., trainable=True):
m = momentum if trainable else momentum_nontrainable
return a + (b - a) * m
def every_n_iters(self, runner, n):
if runner.iter < self.start_iter:
return True
return (runner.iter + 1 - self.start_iter) % n == 0 if n > 0 else False
@torch.no_grad()
def after_train_iter(self, runner):
if not self.every_n_iters(runner, self.interval):
return
model = runner.model.module if is_module_wrapper(
runner.model) else runner.model
for key in self.module_keys:
# get current ema states
ema_net = getattr(model, key)
states_ema = ema_net.state_dict(keep_vars=False)
# get currently original states
net = getattr(model, key[:-4])
states_orig = net.state_dict(keep_vars=True)
for k, v in states_orig.items():
if runner.iter < self.start_iter:
states_ema[k].data.copy_(v.data)
else:
states_ema[k] = self.interp_func(
v, states_ema[k], trainable=v.requires_grad).detach()
ema_net.load_state_dict(states_ema, strict=True)
def before_run(self, runner):
model = runner.model.module if is_module_wrapper(
runner.model) else runner.model
# sanity check for ema model
for k in self.module_keys:
if not hasattr(model, k) and not hasattr(model, k[:-4]):
raise RuntimeError(
f'Cannot find both {k[:-4]} and {k} network for EMA hook.')
if not hasattr(model, k) and hasattr(model, k[:-4]):
setattr(model, k, deepcopy(getattr(model, k[:-4])))
warnings.warn(
f'We do not suggest construct and initialize EMA model {k}'
' in hook. You may explicitly define it by yourself.')
| 4,719 | 40.403509 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/utils/dist_utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
import torch.distributed as dist
from mmcv.runner import get_dist_info
def sync_random_seed(seed=None, device='cuda'):
"""Make sure different ranks share the same seed.
All workers must call this function, otherwise it will deadlock.
This method is generally used in `DistributedSampler`,
because the seed should be identical across all processes
in the distributed group.
Args:
seed (int, Optional): The seed. Default to None.
device (str): The device where the seed will be put on.
Default to 'cuda'.
Returns:
int: Seed to be used.
"""
if seed is None:
seed = np.random.randint(2**31)
assert isinstance(seed, int)
rank, world_size = get_dist_info()
if world_size == 1:
return seed
if rank == 0:
random_num = torch.tensor(seed, dtype=torch.int32, device=device)
else:
random_num = torch.tensor(0, dtype=torch.int32, device=device)
dist.broadcast(random_num, src=0)
return random_num.item()
| 1,108 | 29.805556 | 73 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/optimizer/builder.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.runner import build_optimizer
def build_optimizers(model, cfgs):
"""Build multiple optimizers from configs.
If `cfgs` contains several dicts for optimizers, then a dict for each
constructed optimizers will be returned.
If `cfgs` only contains one optimizer config, the constructed optimizer
itself will be returned.
For example,
1) Multiple optimizer configs:
.. code-block:: python
optimizer_cfg = dict(
model1=dict(type='SGD', lr=lr),
model2=dict(type='SGD', lr=lr))
The return dict is
``dict('model1': torch.optim.Optimizer, 'model2': torch.optim.Optimizer)``
2) Single optimizer config:
.. code-block:: python
optimizer_cfg = dict(type='SGD', lr=lr)
The return is ``torch.optim.Optimizer``.
Args:
model (:obj:`nn.Module`): The model with parameters to be optimized.
cfgs (dict): The config dict of the optimizer.
Returns:
dict[:obj:`torch.optim.Optimizer`] | :obj:`torch.optim.Optimizer`:
The initialized optimizers.
"""
optimizers = {}
if hasattr(model, 'module'):
model = model.module
# determine whether 'cfgs' has several dicts for optimizers
is_dict_of_dict = True
for key, cfg in cfgs.items():
if not isinstance(cfg, dict):
is_dict_of_dict = False
if is_dict_of_dict:
for key, cfg in cfgs.items():
cfg_ = cfg.copy()
module = getattr(model, key)
optimizers[key] = build_optimizer(module, cfg_)
return optimizers
return build_optimizer(model, cfgs)
| 1,679 | 27.474576 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/base.py | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import torch
import torch.nn as nn
class BaseModel(nn.Module, metaclass=ABCMeta):
"""Base model.
All models should subclass it.
All subclass should overwrite:
``init_weights``, supporting to initialize models.
``forward_train``, supporting to forward when training.
``forward_test``, supporting to forward when testing.
``train_step``, supporting to train one step when training.
"""
@abstractmethod
def init_weights(self):
"""Abstract method for initializing weight.
All subclass should overwrite it.
"""
@abstractmethod
def forward_train(self, imgs, labels):
"""Abstract method for training forward.
All subclass should overwrite it.
"""
@abstractmethod
def forward_test(self, imgs):
"""Abstract method for testing forward.
All subclass should overwrite it.
"""
def forward(self, imgs, labels, test_mode, **kwargs):
"""Forward function for base model.
Args:
imgs (Tensor): Input image(s).
labels (Tensor): Ground-truth label(s).
test_mode (bool): Whether in test mode.
kwargs (dict): Other arguments.
Returns:
Tensor: Forward results.
"""
if test_mode:
return self.forward_test(imgs, **kwargs)
return self.forward_train(imgs, labels, **kwargs)
@abstractmethod
def train_step(self, data_batch, optimizer):
"""Abstract method for one training step.
All subclass should overwrite it.
"""
def val_step(self, data_batch, **kwargs):
"""Abstract method for one validation step.
All subclass should overwrite it.
"""
output = self.forward_test(**data_batch, **kwargs)
return output
def parse_losses(self, losses):
"""Parse losses dict for different loss variants.
Args:
losses (dict): Loss dict.
Returns:
loss (float): Sum of the total loss.
log_vars (dict): loss dict for different variants.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
log_vars['loss'] = loss
for name in log_vars:
log_vars[name] = log_vars[name].item()
return loss, log_vars
| 2,948 | 26.820755 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/builder.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv import build_from_cfg
from .registry import BACKBONES, COMPONENTS, LOSSES, MODELS
def build(cfg, registry, default_args=None):
"""Build module function.
Args:
cfg (dict): Configuration for building modules.
registry (obj): ``registry`` object.
default_args (dict, optional): Default arguments. Defaults to None.
"""
if isinstance(cfg, list):
modules = [
build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
]
return nn.Sequential(*modules)
return build_from_cfg(cfg, registry, default_args)
def build_backbone(cfg):
"""Build backbone.
Args:
cfg (dict): Configuration for building backbone.
"""
return build(cfg, BACKBONES)
def build_component(cfg):
"""Build component.
Args:
cfg (dict): Configuration for building component.
"""
return build(cfg, COMPONENTS)
def build_loss(cfg):
"""Build loss.
Args:
cfg (dict): Configuration for building loss.
"""
return build(cfg, LOSSES)
def build_model(cfg, train_cfg=None, test_cfg=None):
"""Build model.
Args:
cfg (dict): Configuration for building model.
train_cfg (dict): Training configuration. Default: None.
test_cfg (dict): Testing configuration. Default: None.
"""
return build(cfg, MODELS, dict(train_cfg=train_cfg, test_cfg=test_cfg))
| 1,482 | 23.311475 | 75 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/restorers/basicvsr.py | # Copyright (c) OpenMMLab. All rights reserved.
import numbers
import os.path as osp
import mmcv
import numpy as np
import torch
from mmedit.core import tensor2img
from ..registry import MODELS
from .basic_restorer import BasicRestorer
@MODELS.register_module()
class BasicVSR(BasicRestorer):
"""BasicVSR model for video super-resolution.
Note that this model is used for IconVSR.
Paper:
BasicVSR: The Search for Essential Components in Video Super-Resolution
and Beyond, CVPR, 2021
Args:
generator (dict): Config for the generator structure.
pixel_loss (dict): Config for pixel-wise loss.
ensemble (dict): Config for ensemble. Default: None.
train_cfg (dict): Config for training. Default: None.
test_cfg (dict): Config for testing. Default: None.
pretrained (str): Path for pretrained model. Default: None.
"""
def __init__(self,
generator,
pixel_loss,
ensemble=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super().__init__(generator, pixel_loss, train_cfg, test_cfg,
pretrained)
# fix pre-trained networks
self.fix_iter = train_cfg.get('fix_iter', 0) if train_cfg else 0
self.is_weight_fixed = False
# count training steps
self.register_buffer('step_counter', torch.zeros(1))
# ensemble
self.forward_ensemble = None
if ensemble is not None:
if ensemble['type'] == 'SpatialTemporalEnsemble':
from mmedit.models.common.ensemble import \
SpatialTemporalEnsemble
is_temporal = ensemble.get('is_temporal_ensemble', False)
self.forward_ensemble = SpatialTemporalEnsemble(is_temporal)
else:
raise NotImplementedError(
'Currently support only '
'"SpatialTemporalEnsemble", but got type '
f'[{ensemble["type"]}]')
def check_if_mirror_extended(self, lrs):
"""Check whether the input is a mirror-extended sequence.
If mirror-extended, the i-th (i=0, ..., t-1) frame is equal to the
(t-1-i)-th frame.
Args:
lrs (tensor): Input LR images with shape (n, t, c, h, w)
"""
is_mirror_extended = False
if lrs.size(1) % 2 == 0:
lrs_1, lrs_2 = torch.chunk(lrs, 2, dim=1)
if torch.norm(lrs_1 - lrs_2.flip(1)) == 0:
is_mirror_extended = True
return is_mirror_extended
def train_step(self, data_batch, optimizer):
"""Train step.
Args:
data_batch (dict): A batch of data.
optimizer (obj): Optimizer.
Returns:
dict: Returned output.
"""
# fix SPyNet and EDVR at the beginning
if self.step_counter < self.fix_iter:
if not self.is_weight_fixed:
self.is_weight_fixed = True
for k, v in self.generator.named_parameters():
if 'spynet' in k or 'edvr' in k:
v.requires_grad_(False)
elif self.step_counter == self.fix_iter:
# train all the parameters
self.generator.requires_grad_(True)
outputs = self(**data_batch, test_mode=False)
loss, log_vars = self.parse_losses(outputs.pop('losses'))
# optimize
optimizer['generator'].zero_grad()
loss.backward()
optimizer['generator'].step()
self.step_counter += 1
outputs.update({'log_vars': log_vars})
return outputs
def evaluate(self, output, gt):
"""Evaluation function.
If the output contains multiple frames, we compute the metric
one by one and take an average.
Args:
output (Tensor): Model output with shape (n, t, c, h, w).
gt (Tensor): GT Tensor with shape (n, t, c, h, w).
Returns:
dict: Evaluation results.
"""
crop_border = self.test_cfg.crop_border
convert_to = self.test_cfg.get('convert_to', None)
eval_result = dict()
for metric in self.test_cfg.metrics:
if output.ndim == 5: # a sequence: (n, t, c, h, w)
avg = []
for i in range(0, output.size(1)):
output_i = tensor2img(output[:, i, :, :, :])
gt_i = tensor2img(gt[:, i, :, :, :])
avg.append(self.allowed_metrics[metric](
output_i, gt_i, crop_border, convert_to=convert_to))
eval_result[metric] = np.mean(avg)
elif output.ndim == 4: # an image: (n, c, t, w), for Vimeo-90K-T
output_img = tensor2img(output)
gt_img = tensor2img(gt)
value = self.allowed_metrics[metric](
output_img, gt_img, crop_border, convert_to=convert_to)
eval_result[metric] = value
return eval_result
def forward_test(self,
lq,
gt=None,
meta=None,
save_image=False,
save_path=None,
iteration=None):
"""Testing forward function.
Args:
lq (Tensor): LQ Tensor with shape (n, t, c, h, w).
gt (Tensor): GT Tensor with shape (n, t, c, h, w). Default: None.
save_image (bool): Whether to save image. Default: False.
save_path (str): Path to save image. Default: None.
iteration (int): Iteration for the saving image name.
Default: None.
Returns:
dict: Output results.
"""
with torch.no_grad():
if self.forward_ensemble is not None:
output = self.forward_ensemble(lq, self.generator)
else:
output = self.generator(lq)
# If the GT is an image (i.e. the center frame), the output sequence is
# turned to an image.
if gt is not None and gt.ndim == 4:
t = output.size(1)
if self.check_if_mirror_extended(lq): # with mirror extension
output = 0.5 * (output[:, t // 4] + output[:, -1 - t // 4])
else: # without mirror extension
output = output[:, t // 2]
if self.test_cfg is not None and self.test_cfg.get('metrics', None):
assert gt is not None, (
'evaluation with metrics must have gt images.')
results = dict(eval_result=self.evaluate(output, gt))
else:
results = dict(lq=lq.cpu(), output=output.cpu())
if gt is not None:
results['gt'] = gt.cpu()
# save image
if save_image:
if output.ndim == 4: # an image, key = 000001/0000 (Vimeo-90K)
img_name = meta[0]['key'].replace('/', '_')
if isinstance(iteration, numbers.Number):
save_path = osp.join(
save_path, f'{img_name}-{iteration + 1:06d}.png')
elif iteration is None:
save_path = osp.join(save_path, f'{img_name}.png')
else:
raise ValueError('iteration should be number or None, '
f'but got {type(iteration)}')
mmcv.imwrite(tensor2img(output), save_path)
elif output.ndim == 5: # a sequence, key = 000
folder_name = meta[0]['key'].split('/')[0]
for i in range(0, output.size(1)):
if isinstance(iteration, numbers.Number):
save_path_i = osp.join(
save_path, folder_name,
f'{i:08d}-{iteration + 1:06d}.png')
elif iteration is None:
save_path_i = osp.join(save_path, folder_name,
f'{i:08d}.png')
else:
raise ValueError('iteration should be number or None, '
f'but got {type(iteration)}')
mmcv.imwrite(
tensor2img(output[:, i, :, :, :]), save_path_i)
return results
| 8,430 | 36.471111 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/flow_warp.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn.functional as F
def flow_warp(x,
flow,
interpolation='bilinear',
padding_mode='zeros',
align_corners=True):
"""Warp an image or a feature map with optical flow.
Args:
x (Tensor): Tensor with size (n, c, h, w).
flow (Tensor): Tensor with size (n, h, w, 2). The last dimension is
a two-channel, denoting the width and height relative offsets.
Note that the values are not normalized to [-1, 1].
interpolation (str): Interpolation mode: 'nearest' or 'bilinear'.
Default: 'bilinear'.
padding_mode (str): Padding mode: 'zeros' or 'border' or 'reflection'.
Default: 'zeros'.
align_corners (bool): Whether align corners. Default: True.
Returns:
Tensor: Warped image or feature map.
"""
if x.size()[-2:] != flow.size()[1:3]:
raise ValueError(f'The spatial sizes of input ({x.size()[-2:]}) and '
f'flow ({flow.size()[1:3]}) are not the same.')
_, _, h, w = x.size()
# create mesh grid
grid_y, grid_x = torch.meshgrid(torch.arange(0, h), torch.arange(0, w))
grid = torch.stack((grid_x, grid_y), 2).type_as(x) # (h, w, 2)
grid.requires_grad = False
grid_flow = grid + flow
# scale grid_flow to [-1,1]
grid_flow_x = 2.0 * grid_flow[:, :, :, 0] / max(w - 1, 1) - 1.0
grid_flow_y = 2.0 * grid_flow[:, :, :, 1] / max(h - 1, 1) - 1.0
grid_flow = torch.stack((grid_flow_x, grid_flow_y), dim=3)
output = F.grid_sample(
x,
grid_flow,
mode=interpolation,
padding_mode=padding_mode,
align_corners=align_corners)
return output
| 1,781 | 36.125 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/aspp.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.cnn import ConvModule
from torch import nn
from torch.nn import functional as F
from .separable_conv_module import DepthwiseSeparableConvModule
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels, out_channels, conv_cfg, norm_cfg, act_cfg):
super().__init__(
nn.AdaptiveAvgPool2d(1),
ConvModule(
in_channels,
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
def forward(self, x):
size = x.shape[-2:]
for mod in self:
x = mod(x)
return F.interpolate(
x, size=size, mode='bilinear', align_corners=False)
class ASPP(nn.Module):
"""ASPP module from DeepLabV3.
The code is adopted from
https://github.com/pytorch/vision/blob/master/torchvision/models/
segmentation/deeplabv3.py
For more information about the module:
`"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>`_.
Args:
in_channels (int): Input channels of the module.
out_channels (int): Output channels of the module.
mid_channels (int): Output channels of the intermediate ASPP conv
modules.
dilations (Sequence[int]): Dilation rate of three ASPP conv module.
Default: [12, 24, 36].
conv_cfg (dict): Config dict for convolution layer. If "None",
nn.Conv2d will be applied. Default: None.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
separable_conv (bool): Whether replace normal conv with depthwise
separable conv which is faster. Default: False.
"""
def __init__(self,
in_channels,
out_channels=256,
mid_channels=256,
dilations=(12, 24, 36),
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
separable_conv=False):
super().__init__()
if separable_conv:
conv_module = DepthwiseSeparableConvModule
else:
conv_module = ConvModule
modules = []
modules.append(
ConvModule(
in_channels,
mid_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
for dilation in dilations:
modules.append(
conv_module(
in_channels,
mid_channels,
3,
padding=dilation,
dilation=dilation,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
modules.append(
ASPPPooling(in_channels, mid_channels, conv_cfg, norm_cfg,
act_cfg))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(
ConvModule(
5 * mid_channels,
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg), nn.Dropout(0.5))
def forward(self, x):
"""Forward function for ASPP module.
Args:
x (Tensor): Input tensor with shape (N, C, H, W).
Returns:
Tensor: Output tensor.
"""
res = []
for conv in self.convs:
res.append(conv(x))
res = torch.cat(res, dim=1)
return self.project(res)
| 3,861 | 29.650794 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/sr_backbone_utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import constant_init, kaiming_init
from mmcv.utils.parrots_wrapper import _BatchNorm
def default_init_weights(module, scale=1):
"""Initialize network weights.
Args:
modules (nn.Module): Modules to be initialized.
scale (float): Scale initialized weights, especially for residual
blocks.
"""
for m in module.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m, a=0, mode='fan_in', bias=0)
m.weight.data *= scale
elif isinstance(m, nn.Linear):
kaiming_init(m, a=0, mode='fan_in', bias=0)
m.weight.data *= scale
elif isinstance(m, _BatchNorm):
constant_init(m.weight, val=1, bias=0)
def make_layer(block, num_blocks, **kwarg):
"""Make layers by stacking the same blocks.
Args:
block (nn.module): nn.module class for basic block.
num_blocks (int): number of blocks.
Returns:
nn.Sequential: Stacked blocks in nn.Sequential.
"""
layers = []
for _ in range(num_blocks):
layers.append(block(**kwarg))
return nn.Sequential(*layers)
class ResidualBlockNoBN(nn.Module):
"""Residual block without BN.
It has a style of:
::
---Conv-ReLU-Conv-+-
|________________|
Args:
mid_channels (int): Channel number of intermediate features.
Default: 64.
res_scale (float): Used to scale the residual before addition.
Default: 1.0.
"""
def __init__(self, mid_channels=64, res_scale=1.0):
super().__init__()
self.res_scale = res_scale
self.conv1 = nn.Conv2d(mid_channels, mid_channels, 3, 1, 1, bias=True)
self.conv2 = nn.Conv2d(mid_channels, mid_channels, 3, 1, 1, bias=True)
self.relu = nn.ReLU(inplace=True)
# if res_scale < 1.0, use the default initialization, as in EDSR.
# if res_scale = 1.0, use scaled kaiming_init, as in MSRResNet.
if res_scale == 1.0:
self.init_weights()
def init_weights(self):
"""Initialize weights for ResidualBlockNoBN.
Initialization methods like `kaiming_init` are for VGG-style
modules. For modules with residual paths, using smaller std is
better for stability and performance. We empirically use 0.1.
See more details in "ESRGAN: Enhanced Super-Resolution Generative
Adversarial Networks"
"""
for m in [self.conv1, self.conv2]:
default_init_weights(m, 0.1)
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
identity = x
out = self.conv2(self.relu(self.conv1(x)))
return identity + out * self.res_scale
| 2,919 | 28.795918 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/model_utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
def set_requires_grad(nets, requires_grad=False):
"""Set requires_grad for all the networks.
Args:
nets (nn.Module | list[nn.Module]): A list of networks or a single
network.
requires_grad (bool): Whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def extract_bbox_patch(bbox, img, channel_first=True):
"""Extract patch from a given bbox
Args:
bbox (torch.Tensor | numpy.array): Bbox with (top, left, h, w). If
`img` has batch dimension, the `bbox` must be stacked at first
dimension. The shape should be (4,) or (n, 4).
img (torch.Tensor | numpy.array): Image data to be extracted. If
organized in batch dimension, the batch dimension must be the first
order like (n, h, w, c) or (n, c, h, w).
channel_first (bool): If True, the channel dimension of img is before
height and width, e.g. (c, h, w). Otherwise, the img shape (samples
in the batch) is like (h, w, c).
Returns:
(torch.Tensor | numpy.array): Extracted patches. The dimension of the \
output should be the same as `img`.
"""
def _extract(bbox, img):
assert len(bbox) == 4
t, l, h, w = bbox
if channel_first:
img_patch = img[..., t:t + h, l:l + w]
else:
img_patch = img[t:t + h, l:l + w, ...]
return img_patch
input_size = img.shape
assert len(input_size) == 3 or len(input_size) == 4
bbox_size = bbox.shape
assert bbox_size == (4, ) or (len(bbox_size) == 2
and bbox_size[0] == input_size[0])
# images with batch dimension
if len(input_size) == 4:
output_list = []
for i in range(input_size[0]):
img_patch_ = _extract(bbox[i], img[i:i + 1, ...])
output_list.append(img_patch_)
if isinstance(img, torch.Tensor):
img_patch = torch.cat(output_list, dim=0)
else:
img_patch = np.concatenate(output_list, axis=0)
# standardize image
else:
img_patch = _extract(bbox, img)
return img_patch
def scale_bbox(bbox, target_size):
"""Modify bbox to target size.
The original bbox will be enlarged to the target size with the original
bbox in the center of the new bbox.
Args:
bbox (np.ndarray | torch.Tensor): Bboxes to be modified. Bbox can
be in batch or not. The shape should be (4,) or (n, 4).
target_size (tuple[int]): Target size of final bbox.
Returns:
(np.ndarray | torch.Tensor): Modified bboxes.
"""
def _mod(bbox, target_size):
top_ori, left_ori, h_ori, w_ori = bbox
h, w = target_size
assert h >= h_ori and w >= w_ori
top = int(max(0, top_ori - (h - h_ori) // 2))
left = int(max(0, left_ori - (w - w_ori) // 2))
if isinstance(bbox, torch.Tensor):
bbox_new = torch.Tensor([top, left, h, w]).type_as(bbox)
else:
bbox_new = np.asarray([top, left, h, w])
return bbox_new
if isinstance(bbox, torch.Tensor):
bbox_new = torch.zeros_like(bbox)
elif isinstance(bbox, np.ndarray):
bbox_new = np.zeros_like(bbox)
else:
raise TypeError('bbox mush be torch.Tensor or numpy.ndarray'
f'but got type {type(bbox)}')
bbox_shape = list(bbox.shape)
if len(bbox_shape) == 2:
for i in range(bbox_shape[0]):
bbox_new[i, :] = _mod(bbox[i], target_size)
else:
bbox_new = _mod(bbox, target_size)
return bbox_new
def extract_around_bbox(img, bbox, target_size, channel_first=True):
"""Extract patches around the given bbox.
Args:
bbox (np.ndarray | torch.Tensor): Bboxes to be modified. Bbox can
be in batch or not.
target_size (List(int)): Target size of final bbox.
Returns:
(torch.Tensor | numpy.array): Extracted patches. The dimension of the \
output should be the same as `img`.
"""
bbox_new = scale_bbox(bbox, target_size)
img_patch = extract_bbox_patch(bbox_new, img, channel_first=channel_first)
return img_patch, bbox_new
| 4,502 | 31.868613 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/separable_conv_module.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
class DepthwiseSeparableConvModule(nn.Module):
"""Depthwise separable convolution module.
See https://arxiv.org/pdf/1704.04861.pdf for details.
This module can replace a ConvModule with the conv block replaced by two
conv block: depthwise conv block and pointwise conv block. The depthwise
conv block contains depthwise-conv/norm/activation layers. The pointwise
conv block contains pointwise-conv/norm/activation layers. It should be
noted that there will be norm/activation layer in the depthwise conv block
if ``norm_cfg`` and ``act_cfg`` are specified.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): Same as nn.Conv2d.
kernel_size (int or tuple[int]): Same as nn.Conv2d.
stride (int or tuple[int]): Same as nn.Conv2d. Default: 1.
padding (int or tuple[int]): Same as nn.Conv2d. Default: 0.
dilation (int or tuple[int]): Same as nn.Conv2d. Default: 1.
norm_cfg (dict): Default norm config for both depthwise ConvModule and
pointwise ConvModule. Default: None.
act_cfg (dict): Default activation config for both depthwise ConvModule
and pointwise ConvModule. Default: dict(type='ReLU').
dw_norm_cfg (dict): Norm config of depthwise ConvModule. If it is
'default', it will be the same as ``norm_cfg``. Default: 'default'.
dw_act_cfg (dict): Activation config of depthwise ConvModule. If it is
'default', it will be the same as ``act_cfg``. Default: 'default'.
pw_norm_cfg (dict): Norm config of pointwise ConvModule. If it is
'default', it will be the same as `norm_cfg`. Default: 'default'.
pw_act_cfg (dict): Activation config of pointwise ConvModule. If it is
'default', it will be the same as ``act_cfg``. Default: 'default'.
kwargs (optional): Other shared arguments for depthwise and pointwise
ConvModule. See ConvModule for ref.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
dw_norm_cfg='default',
dw_act_cfg='default',
pw_norm_cfg='default',
pw_act_cfg='default',
**kwargs):
super().__init__()
assert 'groups' not in kwargs, 'groups should not be specified'
# if norm/activation config of depthwise/pointwise ConvModule is not
# specified, use default config.
dw_norm_cfg = dw_norm_cfg if dw_norm_cfg != 'default' else norm_cfg
dw_act_cfg = dw_act_cfg if dw_act_cfg != 'default' else act_cfg
pw_norm_cfg = pw_norm_cfg if pw_norm_cfg != 'default' else norm_cfg
pw_act_cfg = pw_act_cfg if pw_act_cfg != 'default' else act_cfg
# depthwise convolution
self.depthwise_conv = ConvModule(
in_channels,
in_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=in_channels,
norm_cfg=dw_norm_cfg,
act_cfg=dw_act_cfg,
**kwargs)
self.pointwise_conv = ConvModule(
in_channels,
out_channels,
1,
norm_cfg=pw_norm_cfg,
act_cfg=pw_act_cfg,
**kwargs)
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (N, C, H, W).
Returns:
Tensor: Output tensor.
"""
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
return x
| 3,907 | 38.877551 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/linear_module.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import build_activation_layer, kaiming_init
class LinearModule(nn.Module):
"""A linear block that contains linear/norm/activation layers.
For low level vision, we add spectral norm and padding layer.
Args:
in_features (int): Same as nn.Linear.
out_features (int): Same as nn.Linear.
bias (bool): Same as nn.Linear.
act_cfg (dict): Config dict for activation layer, "relu" by default.
inplace (bool): Whether to use inplace mode for activation.
with_spectral_norm (bool): Whether use spectral norm in linear module.
order (tuple[str]): The order of linear/activation layers. It is a
sequence of "linear", "norm" and "act". Examples are
("linear", "act") and ("act", "linear").
"""
def __init__(self,
in_features,
out_features,
bias=True,
act_cfg=dict(type='ReLU'),
inplace=True,
with_spectral_norm=False,
order=('linear', 'act')):
super().__init__()
assert act_cfg is None or isinstance(act_cfg, dict)
self.act_cfg = act_cfg
self.inplace = inplace
self.with_spectral_norm = with_spectral_norm
self.order = order
assert isinstance(self.order, tuple) and len(self.order) == 2
assert set(order) == set(['linear', 'act'])
self.with_activation = act_cfg is not None
self.with_bias = bias
# build linear layer
self.linear = nn.Linear(in_features, out_features, bias=bias)
# export the attributes of self.linear to a higher level for
# convenience
self.in_features = self.linear.in_features
self.out_features = self.linear.out_features
if self.with_spectral_norm:
self.linear = nn.utils.spectral_norm(self.linear)
# build activation layer
if self.with_activation:
act_cfg_ = act_cfg.copy()
act_cfg_.setdefault('inplace', inplace)
self.activate = build_activation_layer(act_cfg_)
# Use msra init by default
self.init_weights()
def init_weights(self):
if self.with_activation and self.act_cfg['type'] == 'LeakyReLU':
nonlinearity = 'leaky_relu'
a = self.act_cfg.get('negative_slope', 0.01)
else:
nonlinearity = 'relu'
a = 0
kaiming_init(self.linear, a=a, nonlinearity=nonlinearity)
def forward(self, x, activate=True):
"""Forward Function.
Args:
x (torch.Tensor): Input tensor with shape of :math:`(n, *, c)`.
Same as ``torch.nn.Linear``.
activate (bool, optional): Whether to use activation layer.
Defaults to True.
Returns:
torch.Tensor: Same as ``torch.nn.Linear``.
"""
for layer in self.order:
if layer == 'linear':
x = self.linear(x)
elif layer == 'act' and activate and self.with_activation:
x = self.activate(x)
return x
| 3,204 | 34.611111 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/contextual_attention.py | # Copyright (c) OpenMMLab. All rights reserved.
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
class ContextualAttentionModule(nn.Module):
"""Contexture attention module.
The details of this module can be found in:
Generative Image Inpainting with Contextual Attention
Args:
unfold_raw_kernel_size (int): Kernel size used in unfolding raw
feature. Default: 4.
unfold_raw_stride (int): Stride used in unfolding raw feature. Default:
2.
unfold_raw_padding (int): Padding used in unfolding raw feature.
Default: 1.
unfold_corr_kernel_size (int): Kernel size used in unfolding
context for computing correlation maps. Default: 3.
unfold_corr_stride (int): Stride used in unfolding context for
computing correlation maps. Default: 1.
unfold_corr_dilation (int): Dilation used in unfolding context for
computing correlation maps. Default: 1.
unfold_corr_padding (int): Padding used in unfolding context for
computing correlation maps. Default: 1.
scale (float): The resale factor used in resize input features.
Default: 0.5.
fuse_kernel_size (int): The kernel size used in fusion module.
Default: 3.
softmax_scale (float): The scale factor for softmax function.
Default: 10.
return_attention_score (bool): If True, the attention score will be
returned. Default: True.
"""
def __init__(self,
unfold_raw_kernel_size=4,
unfold_raw_stride=2,
unfold_raw_padding=1,
unfold_corr_kernel_size=3,
unfold_corr_stride=1,
unfold_corr_dilation=1,
unfold_corr_padding=1,
scale=0.5,
fuse_kernel_size=3,
softmax_scale=10,
return_attention_score=True):
super().__init__()
self.unfold_raw_kernel_size = unfold_raw_kernel_size
self.unfold_raw_stride = unfold_raw_stride
self.unfold_raw_padding = unfold_raw_padding
self.unfold_corr_kernel_size = unfold_corr_kernel_size
self.unfold_corr_stride = unfold_corr_stride
self.unfold_corr_dilation = unfold_corr_dilation
self.unfold_corr_padding = unfold_corr_padding
self.scale = scale
self.fuse_kernel_size = fuse_kernel_size
self.with_fuse_correlation = fuse_kernel_size > 1
self.softmax_scale = softmax_scale
self.return_attention_score = return_attention_score
if self.with_fuse_correlation:
assert fuse_kernel_size % 2 == 1
fuse_kernel = torch.eye(fuse_kernel_size).view(
1, 1, fuse_kernel_size, fuse_kernel_size)
self.register_buffer('fuse_kernel', fuse_kernel)
padding = int((fuse_kernel_size - 1) // 2)
self.fuse_conv = partial(F.conv2d, padding=padding, stride=1)
self.softmax = nn.Softmax(dim=1)
def forward(self, x, context, mask=None):
"""Forward Function.
Args:
x (torch.Tensor): Tensor with shape (n, c, h, w).
context (torch.Tensor): Tensor with shape (n, c, h, w).
mask (torch.Tensor): Tensor with shape (n, 1, h, w). Default: None.
Returns:
tuple(torch.Tensor): Features after contextural attention.
"""
# raw features to be used in copy (deconv)
raw_context = context
raw_context_cols = self.im2col(
raw_context,
kernel_size=self.unfold_raw_kernel_size,
stride=self.unfold_raw_stride,
padding=self.unfold_raw_padding,
normalize=False,
return_cols=True)
# resize the feature to reduce computational cost
x = F.interpolate(x, scale_factor=self.scale)
context = F.interpolate(context, scale_factor=self.scale)
context_cols = self.im2col(
context,
kernel_size=self.unfold_corr_kernel_size,
stride=self.unfold_corr_stride,
padding=self.unfold_corr_padding,
dilation=self.unfold_corr_dilation,
normalize=True,
return_cols=True)
h_unfold, w_unfold = self.calculate_unfold_hw(
context.size()[-2:],
kernel_size=self.unfold_corr_kernel_size,
stride=self.unfold_corr_stride,
padding=self.unfold_corr_padding,
dilation=self.unfold_corr_dilation,
)
# reshape context_cols to
# (n*h_unfold*w_unfold, c, unfold_mks, unfold_mks)
# 'mks' is short for 'mask_kernel_size'
context_cols = context_cols.reshape(-1, *context_cols.shape[2:])
# the shape of correlation map should be:
# (n, h_unfold*w_unfold, h', w')
correlation_map = self.patch_correlation(x, context_cols)
# fuse correlation map to enlarge consistent attention region.
if self.with_fuse_correlation:
correlation_map = self.fuse_correlation_map(
correlation_map, h_unfold, w_unfold)
correlation_map = self.mask_correlation_map(correlation_map, mask=mask)
attention_score = self.softmax(correlation_map * self.softmax_scale)
raw_context_filter = raw_context_cols.reshape(
-1, *raw_context_cols.shape[2:])
output = self.patch_copy_deconv(attention_score, raw_context_filter)
# deconv will cause overlap and we need to remove the effects of that
overlap_factor = self.calculate_overlap_factor(attention_score)
output /= overlap_factor
if self.return_attention_score:
n, _, h_s, w_s = attention_score.size()
attention_score = attention_score.view(n, h_unfold, w_unfold, h_s,
w_s)
return output, attention_score
return output
def patch_correlation(self, x, kernel):
"""Calculate patch correlation.
Args:
x (torch.Tensor): Input tensor.
kernel (torch.Tensor): Kernel tensor.
Returns:
torch.Tensor: Tensor with shape of (n, l, h, w).
"""
n, _, h_in, w_in = x.size()
patch_corr = F.conv2d(
x.view(1, -1, h_in, w_in),
kernel,
stride=self.unfold_corr_stride,
padding=self.unfold_corr_padding,
dilation=self.unfold_corr_dilation,
groups=n)
h_out, w_out = patch_corr.size()[-2:]
return patch_corr.view(n, -1, h_out, w_out)
def patch_copy_deconv(self, attention_score, context_filter):
"""Copy patches using deconv.
Args:
attention_score (torch.Tensor): Tensor with shape of (n, l , h, w).
context_filter (torch.Tensor): Filter kernel.
Returns:
torch.Tensor: Tensor with shape of (n, c, h, w).
"""
n, _, h, w = attention_score.size()
attention_score = attention_score.view(1, -1, h, w)
output = F.conv_transpose2d(
attention_score,
context_filter,
stride=self.unfold_raw_stride,
padding=self.unfold_raw_padding,
groups=n)
h_out, w_out = output.size()[-2:]
return output.view(n, -1, h_out, w_out)
def fuse_correlation_map(self, correlation_map, h_unfold, w_unfold):
"""Fuse correlation map.
This operation is to fuse correlation map for increasing large
consistent correlation regions.
The mechanism behind this op is simple and easy to understand. A
standard 'Eye' matrix will be applied as a filter on the correlation
map in horizontal and vertical direction.
The shape of input correlation map is (n, h_unfold*w_unfold, h, w).
When adopting fusing, we will apply convolutional filter in the
reshaped feature map with shape of (n, 1, h_unfold*w_fold, h*w).
A simple specification for horizontal direction is shown below:
.. code-block:: python
(h, (h, (h, (h,
0) 1) 2) 3) ...
(h, 0)
(h, 1) 1
(h, 2) 1
(h, 3) 1
...
"""
# horizontal direction
n, _, h_map, w_map = correlation_map.size()
map_ = correlation_map.permute(0, 2, 3, 1)
map_ = map_.reshape(n, h_map * w_map, h_unfold * w_unfold, 1)
map_ = map_.permute(0, 3, 1, 2).contiguous()
map_ = self.fuse_conv(map_, self.fuse_kernel)
correlation_map = map_.view(n, h_unfold, w_unfold, h_map, w_map)
# vertical direction
map_ = correlation_map.permute(0, 2, 1, 4,
3).reshape(n, 1, h_unfold * w_unfold,
h_map * w_map)
map_ = self.fuse_conv(map_, self.fuse_kernel)
# Note that the dimension should be transposed since the convolution of
# eye matrix will put the normed scores into the last several dimension
correlation_map = map_.view(n, w_unfold, h_unfold, w_map,
h_map).permute(0, 4, 3, 2, 1)
correlation_map = correlation_map.reshape(n, -1, h_unfold, w_unfold)
return correlation_map
def calculate_unfold_hw(self,
input_size,
kernel_size=3,
stride=1,
dilation=1,
padding=0):
"""Calculate (h, w) after unfolding
The official implementation of `unfold` in pytorch will put the
dimension (h, w) into `L`. Thus, this function is just to calculate the
(h, w) according to the equation in:
https://pytorch.org/docs/stable/nn.html#torch.nn.Unfold
"""
h_in, w_in = input_size
h_unfold = int((h_in + 2 * padding - dilation *
(kernel_size - 1) - 1) / stride + 1)
w_unfold = int((w_in + 2 * padding - dilation *
(kernel_size - 1) - 1) / stride + 1)
return h_unfold, w_unfold
def calculate_overlap_factor(self, attention_score):
"""Calculate the overlap factor after applying deconv.
Args:
attention_score (torch.Tensor): The attention score with shape of
(n, c, h, w).
Returns:
torch.Tensor: The overlap factor will be returned.
"""
h, w = attention_score.shape[-2:]
kernel_size = self.unfold_raw_kernel_size
ones_input = torch.ones(1, 1, h, w).to(attention_score)
ones_filter = torch.ones(1, 1, kernel_size,
kernel_size).to(attention_score)
overlap = F.conv_transpose2d(
ones_input,
ones_filter,
stride=self.unfold_raw_stride,
padding=self.unfold_raw_padding)
# avoid division by zero
overlap[overlap == 0] = 1.
return overlap
def mask_correlation_map(self, correlation_map, mask):
"""Add mask weight for correlation map.
Add a negative infinity number to the masked regions so that softmax
function will result in 'zero' in those regions.
Args:
correlation_map (torch.Tensor): Correlation map with shape of
(n, h_unfold*w_unfold, h_map, w_map).
mask (torch.Tensor): Mask tensor with shape of (n, c, h, w). '1'
in the mask indicates masked region while '0' indicates valid
region.
Returns:
torch.Tensor: Updated correlation map with mask.
"""
if mask is not None:
mask = F.interpolate(mask, scale_factor=self.scale)
# if any pixel is masked in patch, the patch is considered to be
# masked
mask_cols = self.im2col(
mask,
kernel_size=self.unfold_corr_kernel_size,
stride=self.unfold_corr_stride,
padding=self.unfold_corr_padding,
dilation=self.unfold_corr_dilation)
mask_cols = (mask_cols.sum(dim=1, keepdim=True) > 0).float()
mask_cols = mask_cols.permute(0, 2,
1).reshape(mask.size(0), -1, 1, 1)
# add negative inf will bring zero in softmax
mask_cols[mask_cols == 1] = -float('inf')
correlation_map += mask_cols
return correlation_map
def im2col(self,
img,
kernel_size,
stride=1,
padding=0,
dilation=1,
normalize=False,
return_cols=False):
"""Reshape image-style feature to columns.
This function is used for unfold feature maps to columns. The
details of this function can be found in:
https://pytorch.org/docs/1.1.0/nn.html?highlight=unfold#torch.nn.Unfold
Args:
img (torch.Tensor): Features to be unfolded. The shape of this
feature should be (n, c, h, w).
kernel_size (int): In this function, we only support square kernel
with same height and width.
stride (int): Stride number in unfolding. Default: 1.
padding (int): Padding number in unfolding. Default: 0.
dilation (int): Dilation number in unfolding. Default: 1.
normalize (bool): If True, the unfolded feature will be normalized.
Default: False.
return_cols (bool): The official implementation in PyTorch of
unfolding will return features with shape of
(n, c*$prod{kernel_size}$, L). If True, the features will be
reshaped to (n, L, c, kernel_size, kernel_size). Otherwise,
the results will maintain the shape as the official
implementation.
Returns:
torch.Tensor: Unfolded columns. If `return_cols` is True, the \
shape of output tensor is \
`(n, L, c, kernel_size, kernel_size)`. Otherwise, the shape \
will be `(n, c*$prod{kernel_size}$, L)`.
"""
# unfold img to columns with shape (n, c*kernel_size**2, num_cols)
img_unfold = F.unfold(
img,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation)
# normalize the feature map
if normalize:
norm = torch.sqrt((img_unfold**2).sum(dim=1, keepdim=True))
eps = torch.tensor([1e-4]).to(img)
img_unfold = img_unfold / torch.max(norm, eps)
if return_cols:
img_unfold_ = img_unfold.permute(0, 2, 1)
n, num_cols = img_unfold_.size()[:2]
img_cols = img_unfold_.view(n, num_cols, img.size(1), kernel_size,
kernel_size)
return img_cols
return img_unfold
| 15,214 | 39.039474 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/gated_conv_module.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, build_activation_layer
class SimpleGatedConvModule(nn.Module):
"""Simple Gated Convolutional Module.
This module is a simple gated convolutional module. The detailed formula
is:
.. math::
y = \\phi(conv1(x)) * \\sigma(conv2(x)),
where `phi` is the feature activation function and `sigma` is the gate
activation function. In default, the gate activation function is sigmoid.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): The number of channels of the output feature. Note
that `out_channels` in the conv module is doubled since this module
contains two convolutions for feature and gate separately.
kernel_size (int or tuple[int]): Same as nn.Conv2d.
feat_act_cfg (dict): Config dict for feature activation layer.
gate_act_cfg (dict): Config dict for gate activation layer.
kwargs (keyword arguments): Same as `ConvModule`.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
feat_act_cfg=dict(type='ELU'),
gate_act_cfg=dict(type='Sigmoid'),
**kwargs):
super().__init__()
# the activation function should specified outside conv module
kwargs_ = copy.deepcopy(kwargs)
kwargs_['act_cfg'] = None
self.with_feat_act = feat_act_cfg is not None
self.with_gate_act = gate_act_cfg is not None
self.conv = ConvModule(in_channels, out_channels * 2, kernel_size,
**kwargs_)
if self.with_feat_act:
self.feat_act = build_activation_layer(feat_act_cfg)
if self.with_gate_act:
self.gate_act = build_activation_layer(gate_act_cfg)
def forward(self, x):
"""Forward Function.
Args:
x (torch.Tensor): Input tensor with shape of (n, c, h, w).
Returns:
torch.Tensor: Output tensor with shape of (n, c, h', w').
"""
x = self.conv(x)
x, gate = torch.split(x, x.size(1) // 2, dim=1)
if self.with_feat_act:
x = self.feat_act(x)
if self.with_gate_act:
gate = self.gate_act(gate)
x = x * gate
return x
| 2,423 | 32.205479 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/conv.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import CONV_LAYERS
from torch import nn
CONV_LAYERS.register_module('Deconv', module=nn.ConvTranspose2d)
# TODO: octave conv
| 188 | 26 | 64 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/gca_module.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, constant_init, xavier_init
from torch.nn import functional as F
class GCAModule(nn.Module):
"""Guided Contextual Attention Module.
From https://arxiv.org/pdf/2001.04069.pdf.
Based on https://github.com/nbei/Deep-Flow-Guided-Video-Inpainting.
This module use image feature map to augment the alpha feature map with
guided contextual attention score.
Image feature and alpha feature are unfolded to small patches and later
used as conv kernel. Thus, we refer the unfolding size as kernel size.
Image feature patches have a default kernel size 3 while the kernel size of
alpha feature patches could be specified by `rate` (see `rate` below). The
image feature patches are used to convolve with the image feature itself
to calculate the contextual attention. Then the attention feature map is
convolved by alpha feature patches to obtain the attention alpha feature.
At last, the attention alpha feature is added to the input alpha feature.
Args:
in_channels (int): Input channels of the guided contextual attention
module.
out_channels (int): Output channels of the guided contextual attention
module.
kernel_size (int): Kernel size of image feature patches. Default 3.
stride (int): Stride when unfolding the image feature. Default 1.
rate (int): The downsample rate of image feature map. The corresponding
kernel size and stride of alpha feature patches will be `rate x 2`
and `rate`. It could be regarded as the granularity of the gca
module. Default: 2.
pad_args (dict): Parameters of padding when convolve image feature with
image feature patches or alpha feature patches. Allowed keys are
`mode` and `value`. See torch.nn.functional.pad() for more
information. Default: dict(mode='reflect').
interpolation (str): Interpolation method in upsampling and
downsampling.
penalty (float): Punishment hyperparameter to avoid a large correlation
between each unknown patch and itself.
eps (float): A small number to avoid dividing by 0 when calculating
the normed image feature patch. Default: 1e-4.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
stride=1,
rate=2,
pad_args=dict(mode='reflect'),
interpolation='nearest',
penalty=-1e4,
eps=1e-4):
super().__init__()
self.kernel_size = kernel_size
self.stride = stride
self.rate = rate
self.pad_args = pad_args
self.interpolation = interpolation
self.penalty = penalty
self.eps = eps
# reduced the channels of input image feature.
self.guidance_conv = nn.Conv2d(in_channels, in_channels // 2, 1)
# convolution after the attention alpha feature
self.out_conv = ConvModule(
out_channels,
out_channels,
1,
norm_cfg=dict(type='BN'),
act_cfg=None)
self.init_weights()
def init_weights(self):
xavier_init(self.guidance_conv, distribution='uniform')
xavier_init(self.out_conv.conv, distribution='uniform')
constant_init(self.out_conv.norm, 1e-3)
def forward(self, img_feat, alpha_feat, unknown=None, softmax_scale=1.):
"""Forward function of GCAModule.
Args:
img_feat (Tensor): Image feature map of shape
(N, ori_c, ori_h, ori_w).
alpha_feat (Tensor): Alpha feature map of shape
(N, alpha_c, ori_h, ori_w).
unknown (Tensor, optional): Unknown area map generated by trimap.
If specified, this tensor should have shape
(N, 1, ori_h, ori_w).
softmax_scale (float, optional): The softmax scale of the attention
if unknown area is not provided in forward. Default: 1.
Returns:
Tensor: The augmented alpha feature.
"""
if alpha_feat.shape[2:4] != img_feat.shape[2:4]:
raise ValueError(
'image feature size does not align with alpha feature size: '
f'image feature size {img_feat.shape[2:4]}, '
f'alpha feature size {alpha_feat.shape[2:4]}')
if unknown is not None and unknown.shape[2:4] != img_feat.shape[2:4]:
raise ValueError(
'image feature size does not align with unknown mask size: '
f'image feature size {img_feat.shape[2:4]}, '
f'unknown mask size {unknown.shape[2:4]}')
# preprocess image feature
img_feat = self.guidance_conv(img_feat)
img_feat = F.interpolate(
img_feat, scale_factor=1 / self.rate, mode=self.interpolation)
# preprocess unknown mask
unknown, softmax_scale = self.process_unknown_mask(
unknown, img_feat, softmax_scale)
img_ps, alpha_ps, unknown_ps = self.extract_feature_maps_patches(
img_feat, alpha_feat, unknown)
# create self correlation mask with shape:
# (N, img_h*img_w, img_h, img_w)
self_mask = self.get_self_correlation_mask(img_feat)
# split tensors by batch dimension; tuple is returned
img_groups = torch.split(img_feat, 1, dim=0)
img_ps_groups = torch.split(img_ps, 1, dim=0)
alpha_ps_groups = torch.split(alpha_ps, 1, dim=0)
unknown_ps_groups = torch.split(unknown_ps, 1, dim=0)
scale_groups = torch.split(softmax_scale, 1, dim=0)
groups = (img_groups, img_ps_groups, alpha_ps_groups,
unknown_ps_groups, scale_groups)
out = []
# i is the virtual index of the sample in the current batch
for img_i, img_ps_i, alpha_ps_i, unknown_ps_i, scale_i in zip(*groups):
similarity_map = self.compute_similarity_map(img_i, img_ps_i)
gca_score = self.compute_guided_attention_score(
similarity_map, unknown_ps_i, scale_i, self_mask)
out_i = self.propagate_alpha_feature(gca_score, alpha_ps_i)
out.append(out_i)
out = torch.cat(out, dim=0)
out.reshape_as(alpha_feat)
out = self.out_conv(out) + alpha_feat
return out
def extract_feature_maps_patches(self, img_feat, alpha_feat, unknown):
"""Extract image feature, alpha feature unknown patches.
Args:
img_feat (Tensor): Image feature map of shape
(N, img_c, img_h, img_w).
alpha_feat (Tensor): Alpha feature map of shape
(N, alpha_c, ori_h, ori_w).
unknown (Tensor, optional): Unknown area map generated by trimap of
shape (N, 1, img_h, img_w).
Returns:
tuple: 3-tuple of
``Tensor``: Image feature patches of shape \
(N, img_h*img_w, img_c, img_ks, img_ks).
``Tensor``: Guided contextual attention alpha feature map. \
(N, img_h*img_w, alpha_c, alpha_ks, alpha_ks).
``Tensor``: Unknown mask of shape (N, img_h*img_w, 1, 1).
"""
# extract image feature patches with shape:
# (N, img_h*img_w, img_c, img_ks, img_ks)
img_ks = self.kernel_size
img_ps = self.extract_patches(img_feat, img_ks, self.stride)
# extract alpha feature patches with shape:
# (N, img_h*img_w, alpha_c, alpha_ks, alpha_ks)
alpha_ps = self.extract_patches(alpha_feat, self.rate * 2, self.rate)
# extract unknown mask patches with shape: (N, img_h*img_w, 1, 1)
unknown_ps = self.extract_patches(unknown, img_ks, self.stride)
unknown_ps = unknown_ps.squeeze(dim=2) # squeeze channel dimension
unknown_ps = unknown_ps.mean(dim=[2, 3], keepdim=True)
return img_ps, alpha_ps, unknown_ps
def compute_similarity_map(self, img_feat, img_ps):
"""Compute similarity between image feature patches.
Args:
img_feat (Tensor): Image feature map of shape
(1, img_c, img_h, img_w).
img_ps (Tensor): Image feature patches tensor of shape
(1, img_h*img_w, img_c, img_ks, img_ks).
Returns:
Tensor: Similarity map between image feature patches with shape \
(1, img_h*img_w, img_h, img_w).
"""
img_ps = img_ps[0] # squeeze dim 0
# convolve the feature to get correlation (similarity) map
escape_NaN = torch.FloatTensor([self.eps]).to(img_feat)
img_ps_normed = img_ps / torch.max(self.l2_norm(img_ps), escape_NaN)
img_feat = self.pad(img_feat, self.kernel_size, self.stride)
similarity_map = F.conv2d(img_feat, img_ps_normed)
return similarity_map
def compute_guided_attention_score(self, similarity_map, unknown_ps, scale,
self_mask):
"""Compute guided attention score.
Args:
similarity_map (Tensor): Similarity map of image feature with shape
(1, img_h*img_w, img_h, img_w).
unknown_ps (Tensor): Unknown area patches tensor of shape
(1, img_h*img_w, 1, 1).
scale (Tensor): Softmax scale of known and unknown area:
[unknown_scale, known_scale].
self_mask (Tensor): Self correlation mask of shape
(1, img_h*img_w, img_h, img_w). At (1, i*i, i, i) mask value
equals -1e4 for i in [1, img_h*img_w] and other area is all
zero.
Returns:
Tensor: Similarity map between image feature patches with shape \
(1, img_h*img_w, img_h, img_w).
"""
# scale the correlation with predicted scale factor for known and
# unknown area
unknown_scale, known_scale = scale[0]
out = similarity_map * (
unknown_scale * unknown_ps.gt(0.).float() +
known_scale * unknown_ps.le(0.).float())
# mask itself, self-mask only applied to unknown area
out = out + self_mask * unknown_ps
gca_score = F.softmax(out, dim=1)
return gca_score
def propagate_alpha_feature(self, gca_score, alpha_ps):
"""Propagate alpha feature based on guided attention score.
Args:
gca_score (Tensor): Guided attention score map of shape
(1, img_h*img_w, img_h, img_w).
alpha_ps (Tensor): Alpha feature patches tensor of shape
(1, img_h*img_w, alpha_c, alpha_ks, alpha_ks).
Returns:
Tensor: Propagated alpha feature map of shape \
(1, alpha_c, alpha_h, alpha_w).
"""
alpha_ps = alpha_ps[0] # squeeze dim 0
if self.rate == 1:
gca_score = self.pad(gca_score, kernel_size=2, stride=1)
alpha_ps = alpha_ps.permute(1, 0, 2, 3)
out = F.conv2d(gca_score, alpha_ps) / 4.
else:
out = F.conv_transpose2d(
gca_score, alpha_ps, stride=self.rate, padding=1) / 4.
return out
def process_unknown_mask(self, unknown, img_feat, softmax_scale):
"""Process unknown mask.
Args:
unknown (Tensor, optional): Unknown area map generated by trimap of
shape (N, 1, ori_h, ori_w)
img_feat (Tensor): The interpolated image feature map of shape
(N, img_c, img_h, img_w).
softmax_scale (float, optional): The softmax scale of the attention
if unknown area is not provided in forward. Default: 1.
Returns:
tuple: 2-tuple of
``Tensor``: Interpolated unknown area map of shape \
(N, img_h*img_w, img_h, img_w).
``Tensor``: Softmax scale tensor of known and unknown area of \
shape (N, 2).
"""
n, _, h, w = img_feat.shape
if unknown is not None:
unknown = unknown.clone()
unknown = F.interpolate(
unknown, scale_factor=1 / self.rate, mode=self.interpolation)
unknown_mean = unknown.mean(dim=[2, 3])
known_mean = 1 - unknown_mean
unknown_scale = torch.clamp(
torch.sqrt(unknown_mean / known_mean), 0.1, 10).to(img_feat)
known_scale = torch.clamp(
torch.sqrt(known_mean / unknown_mean), 0.1, 10).to(img_feat)
softmax_scale = torch.cat([unknown_scale, known_scale], dim=1)
else:
unknown = torch.ones((n, 1, h, w)).to(img_feat)
softmax_scale = torch.FloatTensor(
[softmax_scale,
softmax_scale]).view(1, 2).repeat(n, 1).to(img_feat)
return unknown, softmax_scale
def extract_patches(self, x, kernel_size, stride):
"""Extract feature patches.
The feature map will be padded automatically to make sure the number of
patches is equal to `(H / stride) * (W / stride)`.
Args:
x (Tensor): Feature map of shape (N, C, H, W).
kernel_size (int): Size of each patches.
stride (int): Stride between patches.
Returns:
Tensor: Extracted patches of shape \
(N, (H / stride) * (W / stride) , C, kernel_size, kernel_size).
"""
n, c, _, _ = x.shape
x = self.pad(x, kernel_size, stride)
x = F.unfold(x, (kernel_size, kernel_size), stride=(stride, stride))
x = x.permute(0, 2, 1)
x = x.reshape(n, -1, c, kernel_size, kernel_size)
return x
def pad(self, x, kernel_size, stride):
left = (kernel_size - stride + 1) // 2
right = (kernel_size - stride) // 2
pad = (left, right, left, right)
return F.pad(x, pad, **self.pad_args)
def get_self_correlation_mask(self, img_feat):
_, _, h, w = img_feat.shape
# As ONNX does not support dynamic num_classes, we have to convert it
# into an integer
self_mask = F.one_hot(
torch.arange(h * w).view(h, w), num_classes=int(h * w))
self_mask = self_mask.permute(2, 0, 1).view(1, h * w, h, w)
# use large negative value to mask out self-correlation before softmax
self_mask = self_mask * self.penalty
return self_mask.to(img_feat)
@staticmethod
def l2_norm(x):
x = x**2
x = x.sum(dim=[1, 2, 3], keepdim=True)
return torch.sqrt(x)
| 14,808 | 40.250696 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/generation_model_utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, kaiming_init, normal_init, xavier_init
from torch.nn import init
def generation_init_weights(module, init_type='normal', init_gain=0.02):
"""Default initialization of network weights for image generation.
By default, we use normal init, but xavier and kaiming might work
better for some applications.
Args:
module (nn.Module): Module to be initialized.
init_type (str): The name of an initialization method:
normal | xavier | kaiming | orthogonal.
init_gain (float): Scaling factor for normal, xavier and
orthogonal.
"""
def init_func(m):
"""Initialization function.
Args:
m (nn.Module): Module to be initialized.
"""
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1
or classname.find('Linear') != -1):
if init_type == 'normal':
normal_init(m, 0.0, init_gain)
elif init_type == 'xavier':
xavier_init(m, gain=init_gain, distribution='normal')
elif init_type == 'kaiming':
kaiming_init(
m,
a=0,
mode='fan_in',
nonlinearity='leaky_relu',
distribution='normal')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight, gain=init_gain)
init.constant_(m.bias.data, 0.0)
else:
raise NotImplementedError(
f"Initialization method '{init_type}' is not implemented")
elif classname.find('BatchNorm2d') != -1:
# BatchNorm Layer's weight is not a matrix;
# only normal distribution applies.
normal_init(m, 1.0, init_gain)
module.apply(init_func)
class GANImageBuffer:
"""This class implements an image buffer that stores previously
generated images.
This buffer allows us to update the discriminator using a history of
generated images rather than the ones produced by the latest generator
to reduce model oscillation.
Args:
buffer_size (int): The size of image buffer. If buffer_size = 0,
no buffer will be created.
buffer_ratio (float): The chance / possibility to use the images
previously stored in the buffer.
"""
def __init__(self, buffer_size, buffer_ratio=0.5):
self.buffer_size = buffer_size
# create an empty buffer
if self.buffer_size > 0:
self.img_num = 0
self.image_buffer = []
self.buffer_ratio = buffer_ratio
def query(self, images):
"""Query current image batch using a history of generated images.
Args:
images (Tensor): Current image batch without history information.
"""
if self.buffer_size == 0: # if the buffer size is 0, do nothing
return images
return_images = []
for image in images:
image = torch.unsqueeze(image.data, 0)
# if the buffer is not full, keep inserting current images
if self.img_num < self.buffer_size:
self.img_num = self.img_num + 1
self.image_buffer.append(image)
return_images.append(image)
else:
use_buffer = np.random.random() < self.buffer_ratio
# by self.buffer_ratio, the buffer will return a previously
# stored image, and insert the current image into the buffer
if use_buffer:
random_id = np.random.randint(0, self.buffer_size)
image_tmp = self.image_buffer[random_id].clone()
self.image_buffer[random_id] = image
return_images.append(image_tmp)
# by (1 - self.buffer_ratio), the buffer will return the
# current image
else:
return_images.append(image)
# collect all the images and return
return_images = torch.cat(return_images, 0)
return return_images
class UnetSkipConnectionBlock(nn.Module):
"""Construct a Unet submodule with skip connections, with the following
structure: downsampling - `submodule` - upsampling.
Args:
outer_channels (int): Number of channels at the outer conv layer.
inner_channels (int): Number of channels at the inner conv layer.
in_channels (int): Number of channels in input images/features. If is
None, equals to `outer_channels`. Default: None.
submodule (UnetSkipConnectionBlock): Previously constructed submodule.
Default: None.
is_outermost (bool): Whether this module is the outermost module.
Default: False.
is_innermost (bool): Whether this module is the innermost module.
Default: False.
norm_cfg (dict): Config dict to build norm layer. Default:
`dict(type='BN')`.
use_dropout (bool): Whether to use dropout layers. Default: False.
"""
def __init__(self,
outer_channels,
inner_channels,
in_channels=None,
submodule=None,
is_outermost=False,
is_innermost=False,
norm_cfg=dict(type='BN'),
use_dropout=False):
super().__init__()
# cannot be both outermost and innermost
assert not (is_outermost and is_innermost), (
"'is_outermost' and 'is_innermost' cannot be True"
'at the same time.')
self.is_outermost = is_outermost
assert isinstance(norm_cfg, dict), ("'norm_cfg' should be dict, but"
f'got {type(norm_cfg)}')
assert 'type' in norm_cfg, "'norm_cfg' must have key 'type'"
# We use norm layers in the unet skip connection block.
# Only for IN, use bias since it does not have affine parameters.
use_bias = norm_cfg['type'] == 'IN'
kernel_size = 4
stride = 2
padding = 1
if in_channels is None:
in_channels = outer_channels
down_conv_cfg = dict(type='Conv2d')
down_norm_cfg = norm_cfg
down_act_cfg = dict(type='LeakyReLU', negative_slope=0.2)
up_conv_cfg = dict(type='Deconv')
up_norm_cfg = norm_cfg
up_act_cfg = dict(type='ReLU')
up_in_channels = inner_channels * 2
up_bias = use_bias
middle = [submodule]
upper = []
if is_outermost:
down_act_cfg = None
down_norm_cfg = None
up_bias = True
up_norm_cfg = None
upper = [nn.Tanh()]
elif is_innermost:
down_norm_cfg = None
up_in_channels = inner_channels
middle = []
else:
upper = [nn.Dropout(0.5)] if use_dropout else []
down = [
ConvModule(
in_channels=in_channels,
out_channels=inner_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=use_bias,
conv_cfg=down_conv_cfg,
norm_cfg=down_norm_cfg,
act_cfg=down_act_cfg,
order=('act', 'conv', 'norm'))
]
up = [
ConvModule(
in_channels=up_in_channels,
out_channels=outer_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=up_bias,
conv_cfg=up_conv_cfg,
norm_cfg=up_norm_cfg,
act_cfg=up_act_cfg,
order=('act', 'conv', 'norm'))
]
model = down + middle + up + upper
self.model = nn.Sequential(*model)
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
if self.is_outermost:
return self.model(x)
# add skip connections
return torch.cat([x, self.model(x)], 1)
class ResidualBlockWithDropout(nn.Module):
"""Define a Residual Block with dropout layers.
Ref:
Deep Residual Learning for Image Recognition
A residual block is a conv block with skip connections. A dropout layer is
added between two common conv modules.
Args:
channels (int): Number of channels in the conv layer.
padding_mode (str): The name of padding layer:
'reflect' | 'replicate' | 'zeros'.
norm_cfg (dict): Config dict to build norm layer. Default:
`dict(type='IN')`.
use_dropout (bool): Whether to use dropout layers. Default: True.
"""
def __init__(self,
channels,
padding_mode,
norm_cfg=dict(type='BN'),
use_dropout=True):
super().__init__()
assert isinstance(norm_cfg, dict), ("'norm_cfg' should be dict, but"
f'got {type(norm_cfg)}')
assert 'type' in norm_cfg, "'norm_cfg' must have key 'type'"
# We use norm layers in the residual block with dropout layers.
# Only for IN, use bias since it does not have affine parameters.
use_bias = norm_cfg['type'] == 'IN'
block = [
ConvModule(
in_channels=channels,
out_channels=channels,
kernel_size=3,
padding=1,
bias=use_bias,
norm_cfg=norm_cfg,
padding_mode=padding_mode)
]
if use_dropout:
block += [nn.Dropout(0.5)]
block += [
ConvModule(
in_channels=channels,
out_channels=channels,
kernel_size=3,
padding=1,
bias=use_bias,
norm_cfg=norm_cfg,
act_cfg=None,
padding_mode=padding_mode)
]
self.block = nn.Sequential(*block)
def forward(self, x):
"""Forward function. Add skip connections without final ReLU.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
out = x + self.block(x)
return out
| 10,699 | 34.430464 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/ensemble.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
class SpatialTemporalEnsemble(nn.Module):
""" Apply spatial and temporal ensemble and compute outputs
Args:
is_temporal_ensemble (bool, optional): Whether to apply ensemble
temporally. If True, the sequence will also be flipped temporally.
If the input is an image, this argument must be set to False.
Default: False.
"""
def __init__(self, is_temporal_ensemble=False):
super().__init__()
self.is_temporal_ensemble = is_temporal_ensemble
def _transform(self, imgs, mode):
"""Apply spatial transform (flip, rotate) to the images.
Args:
imgs (torch.Tensor): The images to be transformed/
mode (str): The mode of transform. Supported values are 'vertical',
'horizontal', and 'transpose', corresponding to vertical flip,
horizontal flip, and rotation, respectively.
Returns:
torch.Tensor: Output of the model with spatial ensemble applied.
"""
is_single_image = False
if imgs.ndim == 4:
if self.is_temporal_ensemble:
raise ValueError('"is_temporal_ensemble" must be False if '
'the input is an image.')
is_single_image = True
imgs = imgs.unsqueeze(1)
if mode == 'vertical':
imgs = imgs.flip(4).clone()
elif mode == 'horizontal':
imgs = imgs.flip(3).clone()
elif mode == 'transpose':
imgs = imgs.permute(0, 1, 2, 4, 3).clone()
if is_single_image:
imgs = imgs.squeeze(1)
return imgs
def spatial_ensemble(self, imgs, model):
"""Apply spatial ensemble.
Args:
imgs (torch.Tensor): The images to be processed by the model. Its
size should be either (n, t, c, h, w) or (n, c, h, w).
model (nn.Module): The model to process the images.
Returns:
torch.Tensor: Output of the model with spatial ensemble applied.
"""
img_list = [imgs.cpu()]
for mode in ['vertical', 'horizontal', 'transpose']:
img_list.extend([self._transform(t, mode) for t in img_list])
output_list = [model(t.to(imgs.device)).cpu() for t in img_list]
for i in range(len(output_list)):
if i > 3:
output_list[i] = self._transform(output_list[i], 'transpose')
if i % 4 > 1:
output_list[i] = self._transform(output_list[i], 'horizontal')
if (i % 4) % 2 == 1:
output_list[i] = self._transform(output_list[i], 'vertical')
outputs = torch.stack(output_list, dim=0)
outputs = outputs.mean(dim=0, keepdim=False)
return outputs.to(imgs.device)
def forward(self, imgs, model):
"""Apply spatial and temporal ensemble.
Args:
imgs (torch.Tensor): The images to be processed by the model. Its
size should be either (n, t, c, h, w) or (n, c, h, w).
model (nn.Module): The model to process the images.
Returns:
torch.Tensor: Output of the model with spatial ensemble applied.
"""
outputs = self.spatial_ensemble(imgs, model)
if self.is_temporal_ensemble:
outputs += self.spatial_ensemble(imgs.flip(1), model).flip(1)
outputs *= 0.5
return outputs
| 3,541 | 32.415094 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/upsample.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from .sr_backbone_utils import default_init_weights
class PixelShufflePack(nn.Module):
""" Pixel Shuffle upsample layer.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
scale_factor (int): Upsample ratio.
upsample_kernel (int): Kernel size of Conv layer to expand channels.
Returns:
Upsampled feature map.
"""
def __init__(self, in_channels, out_channels, scale_factor,
upsample_kernel):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.scale_factor = scale_factor
self.upsample_kernel = upsample_kernel
self.upsample_conv = nn.Conv2d(
self.in_channels,
self.out_channels * scale_factor * scale_factor,
self.upsample_kernel,
padding=(self.upsample_kernel - 1) // 2)
self.init_weights()
def init_weights(self):
"""Initialize weights for PixelShufflePack.
"""
default_init_weights(self, 1)
def forward(self, x):
"""Forward function for PixelShufflePack.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
x = self.upsample_conv(x)
x = F.pixel_shuffle(x, self.scale_factor)
return x
| 1,517 | 28.192308 | 76 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/img_normalize.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
class ImgNormalize(nn.Conv2d):
"""Normalize images with the given mean and std value.
Based on Conv2d layer, can work in GPU.
Args:
pixel_range (float): Pixel range of feature.
img_mean (Tuple[float]): Image mean of each channel.
img_std (Tuple[float]): Image std of each channel.
sign (int): Sign of bias. Default -1.
"""
def __init__(self, pixel_range, img_mean, img_std, sign=-1):
assert len(img_mean) == len(img_std)
num_channels = len(img_mean)
super().__init__(num_channels, num_channels, kernel_size=1)
std = torch.Tensor(img_std)
self.weight.data = torch.eye(num_channels).view(
num_channels, num_channels, 1, 1)
self.weight.data.div_(std.view(num_channels, 1, 1, 1))
self.bias.data = sign * pixel_range * torch.Tensor(img_mean)
self.bias.data.div_(std)
self.weight.requires_grad = False
self.bias.requires_grad = False
| 1,063 | 31.242424 | 68 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/mask_conv_module.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import ConvModule
class MaskConvModule(ConvModule):
"""Mask convolution module.
This is a simple wrapper for mask convolution like: 'partial conv'.
Convolutions in this module always need a mask as extra input.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): Same as nn.Conv2d.
kernel_size (int or tuple[int]): Same as nn.Conv2d.
stride (int or tuple[int]): Same as nn.Conv2d.
padding (int or tuple[int]): Same as nn.Conv2d.
dilation (int or tuple[int]): Same as nn.Conv2d.
groups (int): Same as nn.Conv2d.
bias (bool or str): If specified as `auto`, it will be decided by the
norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
False.
conv_cfg (dict): Config dict for convolution layer.
norm_cfg (dict): Config dict for normalization layer.
act_cfg (dict): Config dict for activation layer, "relu" by default.
inplace (bool): Whether to use inplace mode for activation.
with_spectral_norm (bool): Whether use spectral norm in conv module.
padding_mode (str): If the `padding_mode` has not been supported by
current `Conv2d` in Pytorch, we will use our own padding layer
instead. Currently, we support ['zeros', 'circular'] with official
implementation and ['reflect'] with our own implementation.
Default: 'zeros'.
order (tuple[str]): The order of conv/norm/activation layers. It is a
sequence of "conv", "norm" and "act". Examples are
("conv", "norm", "act") and ("act", "conv", "norm").
"""
supported_conv_list = ['PConv']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
assert self.conv_cfg['type'] in self.supported_conv_list
self.init_weights()
def forward(self,
x,
mask=None,
activate=True,
norm=True,
return_mask=True):
"""Forward function for partial conv2d.
Args:
input (torch.Tensor): Tensor with shape of (n, c, h, w).
mask (torch.Tensor): Tensor with shape of (n, c, h, w) or
(n, 1, h, w). If mask is not given, the function will
work as standard conv2d. Default: None.
activate (bool): Whether use activation layer.
norm (bool): Whether use norm layer.
return_mask (bool): If True and mask is not None, the updated
mask will be returned. Default: True.
Returns:
Tensor or tuple: Result Tensor or 2-tuple of
``Tensor``: Results after partial conv.
``Tensor``: Updated mask will be returned if mask is given \
and `return_mask` is True.
"""
for layer in self.order:
if layer == 'conv':
if self.with_explicit_padding:
x = self.padding_layer(x)
mask = self.padding_layer(mask)
if return_mask:
x, updated_mask = self.conv(
x, mask, return_mask=return_mask)
else:
x = self.conv(x, mask, return_mask=False)
elif layer == 'norm' and norm and self.with_norm:
x = self.norm(x)
elif layer == 'act' and activate and self.with_activation:
x = self.activate(x)
if return_mask:
return x, updated_mask
return x
| 3,649 | 40.011236 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/partial_conv.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import CONV_LAYERS
@CONV_LAYERS.register_module(name='PConv')
class PartialConv2d(nn.Conv2d):
"""Implementation for partial convolution.
Image Inpainting for Irregular Holes Using Partial Convolutions
[https://arxiv.org/abs/1804.07723]
Args:
multi_channel (bool): If True, the mask is multi-channel. Otherwise,
the mask is single-channel.
eps (float): Need to be changed for mixed precision training.
For mixed precision training, you need change 1e-8 to 1e-6.
"""
def __init__(self, *args, multi_channel=False, eps=1e-8, **kwargs):
super().__init__(*args, **kwargs)
# whether the mask is multi-channel or not
self.multi_channel = multi_channel
self.eps = eps
if self.multi_channel:
out_channels, in_channels = self.out_channels, self.in_channels
else:
out_channels, in_channels = 1, 1
self.register_buffer(
'weight_mask_updater',
torch.ones(out_channels, in_channels, self.kernel_size[0],
self.kernel_size[1]))
self.mask_kernel_numel = np.prod(self.weight_mask_updater.shape[1:4])
self.mask_kernel_numel = (self.mask_kernel_numel).item()
def forward(self, input, mask=None, return_mask=True):
"""Forward function for partial conv2d.
Args:
input (torch.Tensor): Tensor with shape of (n, c, h, w).
mask (torch.Tensor): Tensor with shape of (n, c, h, w) or
(n, 1, h, w). If mask is not given, the function will
work as standard conv2d. Default: None.
return_mask (bool): If True and mask is not None, the updated
mask will be returned. Default: True.
Returns:
torch.Tensor : Results after partial conv.\
torch.Tensor : Updated mask will be returned if mask is given and \
``return_mask`` is True.
"""
assert input.dim() == 4
if mask is not None:
assert mask.dim() == 4
if self.multi_channel:
assert mask.shape[1] == input.shape[1]
else:
assert mask.shape[1] == 1
# update mask and compute mask ratio
if mask is not None:
with torch.no_grad():
updated_mask = F.conv2d(
mask,
self.weight_mask_updater,
bias=None,
stride=self.stride,
padding=self.padding,
dilation=self.dilation)
mask_ratio = self.mask_kernel_numel / (updated_mask + self.eps)
updated_mask = torch.clamp(updated_mask, 0, 1)
mask_ratio = mask_ratio * updated_mask
# standard conv2d
if mask is not None:
input = input * mask
raw_out = super().forward(input)
if mask is not None:
if self.bias is None:
output = raw_out * mask_ratio
else:
# compute new bias when mask is given
bias_view = self.bias.view(1, self.out_channels, 1, 1)
output = (raw_out - bias_view) * mask_ratio + bias_view
output = output * updated_mask
else:
output = raw_out
if return_mask and mask is not None:
return output, updated_mask
return output
| 3,605 | 34.009709 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/losses/pixelwise_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..registry import LOSSES
from .utils import masked_loss
_reduction_modes = ['none', 'mean', 'sum']
@masked_loss
def l1_loss(pred, target):
"""L1 loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated L1 loss.
"""
return F.l1_loss(pred, target, reduction='none')
@masked_loss
def mse_loss(pred, target):
"""MSE loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated MSE loss.
"""
return F.mse_loss(pred, target, reduction='none')
@masked_loss
def charbonnier_loss(pred, target, eps=1e-12):
"""Charbonnier loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated Charbonnier loss.
"""
return torch.sqrt((pred - target)**2 + eps)
@LOSSES.register_module()
class L1Loss(nn.Module):
"""L1 (mean absolute error, MAE) loss.
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduce loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
"""
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False):
super().__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(f'Unsupported reduction mode: {reduction}. '
f'Supported ones are: {_reduction_modes}')
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
def forward(self, pred, target, weight=None, **kwargs):
"""Forward Function.
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise
weights. Default: None.
"""
return self.loss_weight * l1_loss(
pred,
target,
weight,
reduction=self.reduction,
sample_wise=self.sample_wise)
@LOSSES.register_module()
class MSELoss(nn.Module):
"""MSE (L2) loss.
Args:
loss_weight (float): Loss weight for MSE loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
"""
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False):
super().__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(f'Unsupported reduction mode: {reduction}. '
f'Supported ones are: {_reduction_modes}')
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
def forward(self, pred, target, weight=None, **kwargs):
"""Forward Function.
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise
weights. Default: None.
"""
return self.loss_weight * mse_loss(
pred,
target,
weight,
reduction=self.reduction,
sample_wise=self.sample_wise)
@LOSSES.register_module()
class CharbonnierLoss(nn.Module):
"""Charbonnier loss (one variant of Robust L1Loss, a differentiable
variant of L1Loss).
Described in "Deep Laplacian Pyramid Networks for Fast and Accurate
Super-Resolution".
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
eps (float): A value used to control the curvature near zero.
Default: 1e-12.
"""
def __init__(self,
loss_weight=1.0,
reduction='mean',
sample_wise=False,
eps=1e-12):
super().__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(f'Unsupported reduction mode: {reduction}. '
f'Supported ones are: {_reduction_modes}')
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
self.eps = eps
def forward(self, pred, target, weight=None, **kwargs):
"""Forward Function.
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise
weights. Default: None.
"""
return self.loss_weight * charbonnier_loss(
pred,
target,
weight,
eps=self.eps,
reduction=self.reduction,
sample_wise=self.sample_wise)
@LOSSES.register_module()
class MaskedTVLoss(L1Loss):
"""Masked TV loss.
Args:
loss_weight (float, optional): Loss weight. Defaults to 1.0.
"""
def __init__(self, loss_weight=1.0):
super().__init__(loss_weight=loss_weight)
def forward(self, pred, mask=None):
"""Forward function.
Args:
pred (torch.Tensor): Tensor with shape of (n, c, h, w).
mask (torch.Tensor, optional): Tensor with shape of (n, 1, h, w).
Defaults to None.
Returns:
[type]: [description]
"""
y_diff = super().forward(
pred[:, :, :-1, :], pred[:, :, 1:, :], weight=mask[:, :, :-1, :])
x_diff = super().forward(
pred[:, :, :, :-1], pred[:, :, :, 1:], weight=mask[:, :, :, :-1])
loss = x_diff + y_diff
return loss
| 7,356 | 32.13964 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/losses/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import functools
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
if reduction_enum == 1:
return loss.mean()
return loss.sum()
def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
"none", "mean" and "sum". Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
# if weight is not specified or reduction is sum, just reduce the loss
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
# if reduction is mean, then compute mean over masked region
elif reduction == 'mean':
# expand weight from N1HW to NCHW
if weight.size(1) == 1:
weight = weight.expand_as(loss)
# small value to prevent division by zero
eps = 1e-12
# perform sample-wise mean
if sample_wise:
weight = weight.sum(dim=[1, 2, 3], keepdim=True) # NCHW to N111
loss = (loss / (weight + eps)).sum() / weight.size(0)
# perform pixel-wise mean
else:
loss = loss.sum() / (weight.sum() + eps)
return loss
def masked_loss(loss_func):
"""Create a masked version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @masked_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
sample_wise=False,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = mask_reduce_loss(loss, weight, reduction, sample_wise)
return loss
return wrapper
| 3,743 | 31.275862 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/losses/gan_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import conv2d
from ..registry import LOSSES
@LOSSES.register_module()
class GANLoss(nn.Module):
"""Define GAN loss.
Args:
gan_type (str): Support 'vanilla', 'lsgan', 'wgan', 'hinge'.
real_label_val (float): The value for real label. Default: 1.0.
fake_label_val (float): The value for fake label. Default: 0.0.
loss_weight (float): Loss weight. Default: 1.0.
Note that loss_weight is only for generators; and it is always 1.0
for discriminators.
"""
def __init__(self,
gan_type,
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=1.0):
super().__init__()
self.gan_type = gan_type
self.real_label_val = real_label_val
self.fake_label_val = fake_label_val
self.loss_weight = loss_weight
if self.gan_type == 'smgan':
self.gaussian_blur = GaussianBlur()
if self.gan_type == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif self.gan_type == 'lsgan' or self.gan_type == 'smgan':
self.loss = nn.MSELoss()
elif self.gan_type == 'wgan':
self.loss = self._wgan_loss
elif self.gan_type == 'hinge':
self.loss = nn.ReLU()
else:
raise NotImplementedError(
f'GAN type {self.gan_type} is not implemented.')
def _wgan_loss(self, input, target):
"""wgan loss.
Args:
input (Tensor): Input tensor.
target (bool): Target label.
Returns:
Tensor: wgan loss.
"""
return -input.mean() if target else input.mean()
def get_target_label(self, input, target_is_real):
"""Get target label.
Args:
input (Tensor): Input tensor.
target_is_real (bool): Whether the target is real or fake.
Returns:
(bool | Tensor): Target tensor. Return bool for wgan, otherwise,
return Tensor.
"""
if self.gan_type == 'wgan':
return target_is_real
target_val = (
self.real_label_val if target_is_real else self.fake_label_val)
return input.new_ones(input.size()) * target_val
def forward(self, input, target_is_real, is_disc=False, mask=None):
"""
Args:
input (Tensor): The input for the loss module, i.e., the network
prediction.
target_is_real (bool): Whether the target is real or fake.
is_disc (bool): Whether the loss for discriminators or not.
Default: False.
Returns:
Tensor: GAN loss value.
"""
target_label = self.get_target_label(input, target_is_real)
if self.gan_type == 'hinge':
if is_disc: # for discriminators in hinge-gan
input = -input if target_is_real else input
loss = self.loss(1 + input).mean()
else: # for generators in hinge-gan
loss = -input.mean()
elif self.gan_type == 'smgan':
input_height, input_width = input.shape[2:]
mask_height, mask_width = mask.shape[2:]
# Handle inconsistent size between outputs and masks
if input_height != mask_height or input_width != mask_width:
input = F.interpolate(
input,
size=(mask_height, mask_width),
mode='bilinear',
align_corners=True)
target_label = self.get_target_label(input, target_is_real)
if is_disc:
if target_is_real:
target_label = target_label
else:
target_label = self.gaussian_blur(mask).detach().cuda(
) if mask.is_cuda else self.gaussian_blur(
mask).detach().cpu()
# target_label = self.gaussian_blur(mask).detach().cpu()
loss = self.loss(input, target_label)
else:
loss = self.loss(input, target_label) * mask / mask.mean()
loss = loss.mean()
else: # other gan types
loss = self.loss(input, target_label)
# loss_weight is always 1.0 for discriminators
return loss if is_disc else loss * self.loss_weight
@LOSSES.register_module()
class GaussianBlur(nn.Module):
"""A Gaussian filter which blurs a given tensor with a two-dimensional
gaussian kernel by convolving it along each channel. Batch operation
is supported.
This function is modified from kornia.filters.gaussian:
`<https://kornia.readthedocs.io/en/latest/_modules/kornia/filters/gaussian.html>`.
Args:
kernel_size (tuple[int]): The size of the kernel. Default: (71, 71).
sigma (tuple[float]): The standard deviation of the kernel.
Default (10.0, 10.0)
Returns:
Tensor: The Gaussian-blurred tensor.
Shape:
- input: Tensor with shape of (n, c, h, w)
- output: Tensor with shape of (n, c, h, w)
"""
def __init__(self, kernel_size=(71, 71), sigma=(10.0, 10.0)):
super(GaussianBlur, self).__init__()
self.kernel_size = kernel_size
self.sigma = sigma
self.padding = self.compute_zero_padding(kernel_size)
self.kernel = self.get_2d_gaussian_kernel(kernel_size, sigma)
@staticmethod
def compute_zero_padding(kernel_size):
"""Compute zero padding tuple."""
padding = [(ks - 1) // 2 for ks in kernel_size]
return padding[0], padding[1]
def get_2d_gaussian_kernel(self, kernel_size, sigma):
"""Get the two-dimensional Gaussian filter matrix coefficients.
Args:
kernel_size (tuple[int]): Kernel filter size in the x and y
direction. The kernel sizes
should be odd and positive.
sigma (tuple[int]): Gaussian standard deviation in
the x and y direction.
Returns:
kernel_2d (Tensor): A 2D torch tensor with gaussian filter
matrix coefficients.
"""
if not isinstance(kernel_size, tuple) or len(kernel_size) != 2:
raise TypeError(
'kernel_size must be a tuple of length two. Got {}'.format(
kernel_size))
if not isinstance(sigma, tuple) or len(sigma) != 2:
raise TypeError(
'sigma must be a tuple of length two. Got {}'.format(sigma))
kernel_size_x, kernel_size_y = kernel_size
sigma_x, sigma_y = sigma
kernel_x = self.get_1d_gaussian_kernel(kernel_size_x, sigma_x)
kernel_y = self.get_1d_gaussian_kernel(kernel_size_y, sigma_y)
kernel_2d = torch.matmul(
kernel_x.unsqueeze(-1),
kernel_y.unsqueeze(-1).t())
return kernel_2d
def get_1d_gaussian_kernel(self, kernel_size, sigma):
"""Get the Gaussian filter coefficients in one dimension (x or y direction).
Args:
kernel_size (int): Kernel filter size in x or y direction.
Should be odd and positive.
sigma (float): Gaussian standard deviation in x or y direction.
Returns:
kernel_1d (Tensor): A 1D torch tensor with gaussian filter
coefficients in x or y direction.
"""
if not isinstance(kernel_size,
int) or kernel_size % 2 == 0 or kernel_size <= 0:
raise TypeError(
'kernel_size must be an odd positive integer. Got {}'.format(
kernel_size))
kernel_1d = self.gaussian(kernel_size, sigma)
return kernel_1d
def gaussian(self, kernel_size, sigma):
def gauss_arg(x):
return -(x - kernel_size // 2)**2 / float(2 * sigma**2)
gauss = torch.stack([
torch.exp(torch.tensor(gauss_arg(x))) for x in range(kernel_size)
])
return gauss / gauss.sum()
def forward(self, x):
if not torch.is_tensor(x):
raise TypeError(
'Input x type is not a torch.Tensor. Got {}'.format(type(x)))
if not len(x.shape) == 4:
raise ValueError(
'Invalid input shape, we expect BxCxHxW. Got: {}'.format(
x.shape))
_, c, _, _ = x.shape
tmp_kernel = self.kernel.to(x.device).to(x.dtype)
kernel = tmp_kernel.repeat(c, 1, 1, 1)
return conv2d(x, kernel, padding=self.padding, stride=1, groups=c)
def gradient_penalty_loss(discriminator, real_data, fake_data, mask=None):
"""Calculate gradient penalty for wgan-gp.
Args:
discriminator (nn.Module): Network for the discriminator.
real_data (Tensor): Real input data.
fake_data (Tensor): Fake input data.
mask (Tensor): Masks for inpainting. Default: None.
Returns:
Tensor: A tensor for gradient penalty.
"""
batch_size = real_data.size(0)
alpha = torch.rand(batch_size, 1, 1, 1).to(real_data)
# interpolate between real_data and fake_data
interpolates = alpha * real_data + (1. - alpha) * fake_data
interpolates = autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = discriminator(interpolates)
gradients = autograd.grad(
outputs=disc_interpolates,
inputs=interpolates,
grad_outputs=torch.ones_like(disc_interpolates),
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
if mask is not None:
gradients = gradients * mask
gradients_penalty = ((gradients.norm(2, dim=1) - 1)**2).mean()
if mask is not None:
gradients_penalty /= torch.mean(mask)
return gradients_penalty
@LOSSES.register_module()
class GradientPenaltyLoss(nn.Module):
"""Gradient penalty loss for wgan-gp.
Args:
loss_weight (float): Loss weight. Default: 1.0.
"""
def __init__(self, loss_weight=1.):
super().__init__()
self.loss_weight = loss_weight
def forward(self, discriminator, real_data, fake_data, mask=None):
"""Forward function.
Args:
discriminator (nn.Module): Network for the discriminator.
real_data (Tensor): Real input data.
fake_data (Tensor): Fake input data.
mask (Tensor): Masks for inpainting. Default: None.
Returns:
Tensor: Loss.
"""
loss = gradient_penalty_loss(
discriminator, real_data, fake_data, mask=mask)
return loss * self.loss_weight
@LOSSES.register_module()
class DiscShiftLoss(nn.Module):
"""Disc shift loss.
Args:
loss_weight (float, optional): Loss weight. Defaults to 1.0.
"""
def __init__(self, loss_weight=0.1):
super().__init__()
self.loss_weight = loss_weight
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Tensor with shape (n, c, h, w)
Returns:
Tensor: Loss.
"""
loss = torch.mean(x**2)
return loss * self.loss_weight
| 11,506 | 32.353623 | 86 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/losses/perceptual_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torchvision.models.vgg as vgg
from mmcv.runner import load_checkpoint
from torch.nn import functional as F
from mmedit.utils import get_root_logger
from ..registry import LOSSES
class PerceptualVGG(nn.Module):
"""VGG network used in calculating perceptual loss.
In this implementation, we allow users to choose whether use normalization
in the input feature and the type of vgg network. Note that the pretrained
path must fit the vgg type.
Args:
layer_name_list (list[str]): According to the name in this list,
forward function will return the corresponding features. This
list contains the name each layer in `vgg.feature`. An example
of this list is ['4', '10'].
vgg_type (str): Set the type of vgg network. Default: 'vgg19'.
use_input_norm (bool): If True, normalize the input image.
Importantly, the input feature must in the range [0, 1].
Default: True.
pretrained (str): Path for pretrained weights. Default:
'torchvision://vgg19'
"""
def __init__(self,
layer_name_list,
vgg_type='vgg19',
use_input_norm=True,
pretrained='torchvision://vgg19'):
super().__init__()
if pretrained.startswith('torchvision://'):
assert vgg_type in pretrained
self.layer_name_list = layer_name_list
self.use_input_norm = use_input_norm
# get vgg model and load pretrained vgg weight
# remove _vgg from attributes to avoid `find_unused_parameters` bug
_vgg = getattr(vgg, vgg_type)()
self.init_weights(_vgg, pretrained)
num_layers = max(map(int, layer_name_list)) + 1
assert len(_vgg.features) >= num_layers
# only borrow layers that will be used from _vgg to avoid unused params
self.vgg_layers = _vgg.features[:num_layers]
if self.use_input_norm:
# the mean is for image with range [0, 1]
self.register_buffer(
'mean',
torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
# the std is for image with range [-1, 1]
self.register_buffer(
'std',
torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
for v in self.vgg_layers.parameters():
v.requires_grad = False
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
if self.use_input_norm:
x = (x - self.mean) / self.std
output = {}
for name, module in self.vgg_layers.named_children():
x = module(x)
if name in self.layer_name_list:
output[name] = x.clone()
return output
def init_weights(self, model, pretrained):
"""Init weights.
Args:
model (nn.Module): Models to be inited.
pretrained (str): Path for pretrained weights.
"""
logger = get_root_logger()
load_checkpoint(model, pretrained, logger=logger)
@LOSSES.register_module()
class PerceptualLoss(nn.Module):
"""Perceptual loss with commonly used style loss.
Args:
layers_weights (dict): The weight for each layer of vgg feature for
perceptual loss. Here is an example: {'4': 1., '9': 1., '18': 1.},
which means the 5th, 10th and 18th feature layer will be
extracted with weight 1.0 in calculating losses.
layers_weights_style (dict): The weight for each layer of vgg feature
for style loss. If set to 'None', the weights are set equal to
the weights for perceptual loss. Default: None.
vgg_type (str): The type of vgg network used as feature extractor.
Default: 'vgg19'.
use_input_norm (bool): If True, normalize the input image in vgg.
Default: True.
perceptual_weight (float): If `perceptual_weight > 0`, the perceptual
loss will be calculated and the loss will multiplied by the
weight. Default: 1.0.
style_weight (float): If `style_weight > 0`, the style loss will be
calculated and the loss will multiplied by the weight.
Default: 1.0.
norm_img (bool): If True, the image will be normed to [0, 1]. Note that
this is different from the `use_input_norm` which norm the input in
in forward function of vgg according to the statistics of dataset.
Importantly, the input image must be in range [-1, 1].
pretrained (str): Path for pretrained weights. Default:
'torchvision://vgg19'.
criterion (str): Criterion type. Options are 'l1' and 'mse'.
Default: 'l1'.
"""
def __init__(self,
layer_weights,
layer_weights_style=None,
vgg_type='vgg19',
use_input_norm=True,
perceptual_weight=1.0,
style_weight=1.0,
norm_img=True,
pretrained='torchvision://vgg19',
criterion='l1'):
super().__init__()
self.norm_img = norm_img
self.perceptual_weight = perceptual_weight
self.style_weight = style_weight
self.layer_weights = layer_weights
self.layer_weights_style = layer_weights_style
self.vgg = PerceptualVGG(
layer_name_list=list(self.layer_weights.keys()),
vgg_type=vgg_type,
use_input_norm=use_input_norm,
pretrained=pretrained)
if self.layer_weights_style is not None and \
self.layer_weights_style != self.layer_weights:
self.vgg_style = PerceptualVGG(
layer_name_list=list(self.layer_weights_style.keys()),
vgg_type=vgg_type,
use_input_norm=use_input_norm,
pretrained=pretrained)
else:
self.layer_weights_style = self.layer_weights
self.vgg_style = None
criterion = criterion.lower()
if criterion == 'l1':
self.criterion = torch.nn.L1Loss()
elif criterion == 'mse':
self.criterion = torch.nn.MSELoss()
else:
raise NotImplementedError(
f'{criterion} criterion has not been supported in'
' this version.')
def forward(self, x, gt):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
gt (Tensor): Ground-truth tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
if self.norm_img:
x = (x + 1.) * 0.5
gt = (gt + 1.) * 0.5
# extract vgg features
x_features = self.vgg(x)
gt_features = self.vgg(gt.detach())
# calculate perceptual loss
if self.perceptual_weight > 0:
percep_loss = 0
for k in x_features.keys():
percep_loss += self.criterion(
x_features[k], gt_features[k]) * self.layer_weights[k]
percep_loss *= self.perceptual_weight
else:
percep_loss = None
# calculate style loss
if self.style_weight > 0:
if self.vgg_style is not None:
x_features = self.vgg_style(x)
gt_features = self.vgg_style(gt.detach())
style_loss = 0
for k in x_features.keys():
style_loss += self.criterion(
self._gram_mat(x_features[k]),
self._gram_mat(
gt_features[k])) * self.layer_weights_style[k]
style_loss *= self.style_weight
else:
style_loss = None
return percep_loss, style_loss
def _gram_mat(self, x):
"""Calculate Gram matrix.
Args:
x (torch.Tensor): Tensor with shape of (n, c, h, w).
Returns:
torch.Tensor: Gram matrix.
"""
(n, c, h, w) = x.size()
features = x.view(n, c, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (c * h * w)
return gram
@LOSSES.register_module()
class TransferalPerceptualLoss(nn.Module):
"""Transferal perceptual loss.
Args:
loss_weight (float): Loss weight. Default: 1.0.
use_attention (bool): If True, use soft-attention tensor. Default: True
criterion (str): Criterion type. Options are 'l1' and 'mse'.
Default: 'l1'.
"""
def __init__(self, loss_weight=1.0, use_attention=True, criterion='mse'):
super().__init__()
self.use_attention = use_attention
self.loss_weight = loss_weight
criterion = criterion.lower()
if criterion == 'l1':
self.loss_function = torch.nn.L1Loss()
elif criterion == 'mse':
self.loss_function = torch.nn.MSELoss()
else:
raise ValueError(
f"criterion should be 'l1' or 'mse', but got {criterion}")
def forward(self, maps, soft_attention, textures):
"""Forward function.
Args:
maps (Tuple[Tensor]): Input tensors.
soft_attention (Tensor): Soft-attention tensor.
textures (Tuple[Tensor]): Ground-truth tensors.
Returns:
Tensor: Forward results.
"""
if self.use_attention:
h, w = soft_attention.shape[-2:]
softs = [torch.sigmoid(soft_attention)]
for i in range(1, len(maps)):
softs.append(
F.interpolate(
soft_attention,
size=(h * pow(2, i), w * pow(2, i)),
mode='bicubic',
align_corners=False))
else:
softs = [1., 1., 1.]
loss_texture = 0
for map, soft, texture in zip(maps, softs, textures):
loss_texture += self.loss_function(map * soft, texture * soft)
return loss_texture * self.loss_weight
| 10,350 | 34.940972 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/backbones/sr_backbones/basicvsr_pp.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import constant_init
from mmcv.ops import ModulatedDeformConv2d, modulated_deform_conv2d
from mmcv.runner import load_checkpoint
from mmedit.models.backbones.sr_backbones.basicvsr_net import (
ResidualBlocksWithInputConv, SPyNet)
from mmedit.models.common import PixelShufflePack, flow_warp
from mmedit.models.registry import BACKBONES
from mmedit.utils import get_root_logger
@BACKBONES.register_module()
class BasicVSRPlusPlus(nn.Module):
"""BasicVSR++ network structure.
Support either x4 upsampling or same size output.
Paper:
BasicVSR++: Improving Video Super-Resolution with Enhanced Propagation
and Alignment
Args:
mid_channels (int, optional): Channel number of the intermediate
features. Default: 64.
num_blocks (int, optional): The number of residual blocks in each
propagation branch. Default: 7.
max_residue_magnitude (int): The maximum magnitude of the offset
residue (Eq. 6 in paper). Default: 10.
is_low_res_input (bool, optional): Whether the input is low-resolution
or not. If False, the output resolution is equal to the input
resolution. Default: True.
spynet_pretrained (str, optional): Pre-trained model path of SPyNet.
Default: None.
cpu_cache_length (int, optional): When the length of sequence is larger
than this value, the intermediate features are sent to CPU. This
saves GPU memory, but slows down the inference speed. You can
increase this number if you have a GPU with large memory.
Default: 100.
"""
def __init__(self,
mid_channels=64,
num_blocks=7,
max_residue_magnitude=10,
is_low_res_input=True,
spynet_pretrained=None,
cpu_cache_length=100):
super().__init__()
self.mid_channels = mid_channels
self.is_low_res_input = is_low_res_input
self.cpu_cache_length = cpu_cache_length
# optical flow
self.spynet = SPyNet(pretrained=spynet_pretrained)
# feature extraction module
if is_low_res_input:
self.feat_extract = ResidualBlocksWithInputConv(3, mid_channels, 5)
else:
self.feat_extract = nn.Sequential(
nn.Conv2d(3, mid_channels, 3, 2, 1),
nn.LeakyReLU(negative_slope=0.1, inplace=True),
nn.Conv2d(mid_channels, mid_channels, 3, 2, 1),
nn.LeakyReLU(negative_slope=0.1, inplace=True),
ResidualBlocksWithInputConv(mid_channels, mid_channels, 5))
# propagation branches
self.deform_align = nn.ModuleDict()
self.backbone = nn.ModuleDict()
modules = ['backward_1', 'forward_1', 'backward_2', 'forward_2']
for i, module in enumerate(modules):
self.deform_align[module] = SecondOrderDeformableAlignment(
2 * mid_channels,
mid_channels,
3,
padding=1,
deform_groups=16,
max_residue_magnitude=max_residue_magnitude)
self.backbone[module] = ResidualBlocksWithInputConv(
(2 + i) * mid_channels, mid_channels, num_blocks)
# upsampling module
self.reconstruction = ResidualBlocksWithInputConv(
5 * mid_channels, mid_channels, 5)
self.upsample1 = PixelShufflePack(
mid_channels, mid_channels, 2, upsample_kernel=3)
self.upsample2 = PixelShufflePack(
mid_channels, 64, 2, upsample_kernel=3)
self.conv_hr = nn.Conv2d(64, 64, 3, 1, 1)
self.conv_last = nn.Conv2d(64, 3, 3, 1, 1)
self.img_upsample = nn.Upsample(
scale_factor=4, mode='bilinear', align_corners=False)
# activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
# check if the sequence is augmented by flipping
self.is_mirror_extended = False
def check_if_mirror_extended(self, lqs):
"""Check whether the input is a mirror-extended sequence.
If mirror-extended, the i-th (i=0, ..., t-1) frame is equal to the
(t-1-i)-th frame.
Args:
lqs (tensor): Input low quality (LQ) sequence with
shape (n, t, c, h, w).
"""
if lqs.size(1) % 2 == 0:
lqs_1, lqs_2 = torch.chunk(lqs, 2, dim=1)
if torch.norm(lqs_1 - lqs_2.flip(1)) == 0:
self.is_mirror_extended = True
def compute_flow(self, lqs):
"""Compute optical flow using SPyNet for feature alignment.
Note that if the input is an mirror-extended sequence, 'flows_forward'
is not needed, since it is equal to 'flows_backward.flip(1)'.
Args:
lqs (tensor): Input low quality (LQ) sequence with
shape (n, t, c, h, w).
Return:
tuple(Tensor): Optical flow. 'flows_forward' corresponds to the
flows used for forward-time propagation (current to previous).
'flows_backward' corresponds to the flows used for
backward-time propagation (current to next).
"""
n, t, c, h, w = lqs.size()
lqs_1 = lqs[:, :-1, :, :, :].reshape(-1, c, h, w)
lqs_2 = lqs[:, 1:, :, :, :].reshape(-1, c, h, w)
flows_backward = self.spynet(lqs_1, lqs_2).view(n, t - 1, 2, h, w)
if self.is_mirror_extended: # flows_forward = flows_backward.flip(1)
flows_forward = None
else:
flows_forward = self.spynet(lqs_2, lqs_1).view(n, t - 1, 2, h, w)
if self.cpu_cache:
flows_backward = flows_backward.cpu()
flows_forward = flows_forward.cpu()
return flows_forward, flows_backward
def propagate(self, feats, flows, module_name):
"""Propagate the latent features throughout the sequence.
Args:
feats dict(list[tensor]): Features from previous branches. Each
component is a list of tensors with shape (n, c, h, w).
flows (tensor): Optical flows with shape (n, t - 1, 2, h, w).
module_name (str): The name of the propgation branches. Can either
be 'backward_1', 'forward_1', 'backward_2', 'forward_2'.
Return:
dict(list[tensor]): A dictionary containing all the propagated
features. Each key in the dictionary corresponds to a
propagation branch, which is represented by a list of tensors.
"""
n, t, _, h, w = flows.size()
frame_idx = range(0, t + 1)
flow_idx = range(-1, t)
mapping_idx = list(range(0, len(feats['spatial'])))
mapping_idx += mapping_idx[::-1]
if 'backward' in module_name:
frame_idx = frame_idx[::-1]
flow_idx = frame_idx
feat_prop = flows.new_zeros(n, self.mid_channels, h, w)
for i, idx in enumerate(frame_idx):
feat_current = feats['spatial'][mapping_idx[idx]]
if self.cpu_cache:
feat_current = feat_current.cuda()
feat_prop = feat_prop.cuda()
# second-order deformable alignment
if i > 0:
flow_n1 = flows[:, flow_idx[i], :, :, :]
if self.cpu_cache:
flow_n1 = flow_n1.cuda()
cond_n1 = flow_warp(feat_prop, flow_n1.permute(0, 2, 3, 1))
# initialize second-order features
feat_n2 = torch.zeros_like(feat_prop)
flow_n2 = torch.zeros_like(flow_n1)
cond_n2 = torch.zeros_like(cond_n1)
if i > 1: # second-order features
feat_n2 = feats[module_name][-2]
if self.cpu_cache:
feat_n2 = feat_n2.cuda()
flow_n2 = flows[:, flow_idx[i - 1], :, :, :]
if self.cpu_cache:
flow_n2 = flow_n2.cuda()
flow_n2 = flow_n1 + flow_warp(flow_n2,
flow_n1.permute(0, 2, 3, 1))
cond_n2 = flow_warp(feat_n2, flow_n2.permute(0, 2, 3, 1))
# flow-guided deformable convolution
cond = torch.cat([cond_n1, feat_current, cond_n2], dim=1)
feat_prop = torch.cat([feat_prop, feat_n2], dim=1)
feat_prop = self.deform_align[module_name](feat_prop, cond,
flow_n1, flow_n2)
# concatenate and residual blocks
feat = [feat_current] + [
feats[k][idx]
for k in feats if k not in ['spatial', module_name]
] + [feat_prop]
if self.cpu_cache:
feat = [f.cuda() for f in feat]
feat = torch.cat(feat, dim=1)
feat_prop = feat_prop + self.backbone[module_name](feat)
feats[module_name].append(feat_prop)
if self.cpu_cache:
feats[module_name][-1] = feats[module_name][-1].cpu()
torch.cuda.empty_cache()
if 'backward' in module_name:
feats[module_name] = feats[module_name][::-1]
return feats
def upsample(self, lqs, feats):
"""Compute the output image given the features.
Args:
lqs (tensor): Input low quality (LQ) sequence with
shape (n, t, c, h, w).
feats (dict): The features from the propgation branches.
Returns:
Tensor: Output HR sequence with shape (n, t, c, 4h, 4w).
"""
outputs = []
num_outputs = len(feats['spatial'])
mapping_idx = list(range(0, num_outputs))
mapping_idx += mapping_idx[::-1]
for i in range(0, lqs.size(1)):
hr = [feats[k].pop(0) for k in feats if k != 'spatial']
hr.insert(0, feats['spatial'][mapping_idx[i]])
hr = torch.cat(hr, dim=1)
if self.cpu_cache:
hr = hr.cuda()
hr = self.reconstruction(hr)
hr = self.lrelu(self.upsample1(hr))
hr = self.lrelu(self.upsample2(hr))
hr = self.lrelu(self.conv_hr(hr))
hr = self.conv_last(hr)
if self.is_low_res_input:
hr += self.img_upsample(lqs[:, i, :, :, :])
else:
hr += lqs[:, i, :, :, :]
if self.cpu_cache:
hr = hr.cpu()
torch.cuda.empty_cache()
outputs.append(hr)
return torch.stack(outputs, dim=1)
def forward(self, lqs):
"""Forward function for BasicVSR++.
Args:
lqs (tensor): Input low quality (LQ) sequence with
shape (n, t, c, h, w).
Returns:
Tensor: Output HR sequence with shape (n, t, c, 4h, 4w).
"""
n, t, c, h, w = lqs.size()
# whether to cache the features in CPU (no effect if using CPU)
if t > self.cpu_cache_length and lqs.is_cuda:
self.cpu_cache = True
else:
self.cpu_cache = False
if self.is_low_res_input:
lqs_downsample = lqs.clone()
else:
lqs_downsample = F.interpolate(
lqs.view(-1, c, h, w), scale_factor=0.25,
mode='bicubic').view(n, t, c, h // 4, w // 4)
# check whether the input is an extended sequence
self.check_if_mirror_extended(lqs)
feats = {}
# compute spatial features
if self.cpu_cache:
feats['spatial'] = []
for i in range(0, t):
feat = self.feat_extract(lqs[:, i, :, :, :]).cpu()
feats['spatial'].append(feat)
torch.cuda.empty_cache()
else:
feats_ = self.feat_extract(lqs.view(-1, c, h, w))
h, w = feats_.shape[2:]
feats_ = feats_.view(n, t, -1, h, w)
feats['spatial'] = [feats_[:, i, :, :, :] for i in range(0, t)]
# compute optical flow using the low-res inputs
assert lqs_downsample.size(3) >= 64 and lqs_downsample.size(4) >= 64, (
'The height and width of low-res inputs must be at least 64, '
f'but got {h} and {w}.')
flows_forward, flows_backward = self.compute_flow(lqs_downsample)
# feature propgation
for iter_ in [1, 2]:
for direction in ['backward', 'forward']:
module = f'{direction}_{iter_}'
feats[module] = []
if direction == 'backward':
flows = flows_backward
elif flows_forward is not None:
flows = flows_forward
else:
flows = flows_backward.flip(1)
feats = self.propagate(feats, flows, module)
if self.cpu_cache:
del flows
torch.cuda.empty_cache()
return self.upsample(lqs, feats)
def init_weights(self, pretrained=None, strict=True):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Default: None.
strict (bool, optional): Whether strictly load the pretrained
model. Default: True.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif pretrained is not None:
raise TypeError(f'"pretrained" must be a str or None. '
f'But received {type(pretrained)}.')
class SecondOrderDeformableAlignment(ModulatedDeformConv2d):
"""Second-order deformable alignment module.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): Same as nn.Conv2d.
kernel_size (int or tuple[int]): Same as nn.Conv2d.
stride (int or tuple[int]): Same as nn.Conv2d.
padding (int or tuple[int]): Same as nn.Conv2d.
dilation (int or tuple[int]): Same as nn.Conv2d.
groups (int): Same as nn.Conv2d.
bias (bool or str): If specified as `auto`, it will be decided by the
norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
False.
max_residue_magnitude (int): The maximum magnitude of the offset
residue (Eq. 6 in paper). Default: 10.
"""
def __init__(self, *args, **kwargs):
self.max_residue_magnitude = kwargs.pop('max_residue_magnitude', 10)
super(SecondOrderDeformableAlignment, self).__init__(*args, **kwargs)
self.conv_offset = nn.Sequential(
nn.Conv2d(3 * self.out_channels + 4, self.out_channels, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.1, inplace=True),
nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.1, inplace=True),
nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.1, inplace=True),
nn.Conv2d(self.out_channels, 27 * self.deform_groups, 3, 1, 1),
)
self.init_offset()
def init_offset(self):
constant_init(self.conv_offset[-1], val=0, bias=0)
def forward(self, x, extra_feat, flow_1, flow_2):
extra_feat = torch.cat([extra_feat, flow_1, flow_2], dim=1)
out = self.conv_offset(extra_feat)
o1, o2, mask = torch.chunk(out, 3, dim=1)
# offset
offset = self.max_residue_magnitude * torch.tanh(
torch.cat((o1, o2), dim=1))
offset_1, offset_2 = torch.chunk(offset, 2, dim=1)
offset_1 = offset_1 + flow_1.flip(1).repeat(1,
offset_1.size(1) // 2, 1,
1)
offset_2 = offset_2 + flow_2.flip(1).repeat(1,
offset_2.size(1) // 2, 1,
1)
offset = torch.cat([offset_1, offset_2], dim=1)
# mask
mask = torch.sigmoid(mask)
return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias,
self.stride, self.padding,
self.dilation, self.groups,
self.deform_groups)
| 16,773 | 37.56092 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/backbones/sr_backbones/basicvsr_net.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import load_checkpoint
from mmedit.models.common import (PixelShufflePack, ResidualBlockNoBN,
flow_warp, make_layer)
from mmedit.models.registry import BACKBONES
from mmedit.utils import get_root_logger
@BACKBONES.register_module()
class BasicVSRNet(nn.Module):
"""BasicVSR network structure for video super-resolution.
Support only x4 upsampling.
Paper:
BasicVSR: The Search for Essential Components in Video Super-Resolution
and Beyond, CVPR, 2021
Args:
mid_channels (int): Channel number of the intermediate features.
Default: 64.
num_blocks (int): Number of residual blocks in each propagation branch.
Default: 30.
spynet_pretrained (str): Pre-trained model path of SPyNet.
Default: None.
"""
def __init__(self, mid_channels=64, num_blocks=30, spynet_pretrained=None):
super().__init__()
self.mid_channels = mid_channels
# optical flow network for feature alignment
self.spynet = SPyNet(pretrained=spynet_pretrained)
# propagation branches
self.backward_resblocks = ResidualBlocksWithInputConv(
mid_channels + 3, mid_channels, num_blocks)
self.forward_resblocks = ResidualBlocksWithInputConv(
mid_channels + 3, mid_channels, num_blocks)
# upsample
self.fusion = nn.Conv2d(
mid_channels * 2, mid_channels, 1, 1, 0, bias=True)
self.upsample1 = PixelShufflePack(
mid_channels, mid_channels, 2, upsample_kernel=3)
self.upsample2 = PixelShufflePack(
mid_channels, 64, 2, upsample_kernel=3)
self.conv_hr = nn.Conv2d(64, 64, 3, 1, 1)
self.conv_last = nn.Conv2d(64, 3, 3, 1, 1)
self.img_upsample = nn.Upsample(
scale_factor=4, mode='bilinear', align_corners=False)
# activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def check_if_mirror_extended(self, lrs):
"""Check whether the input is a mirror-extended sequence.
If mirror-extended, the i-th (i=0, ..., t-1) frame is equal to the
(t-1-i)-th frame.
Args:
lrs (tensor): Input LR images with shape (n, t, c, h, w)
"""
self.is_mirror_extended = False
if lrs.size(1) % 2 == 0:
lrs_1, lrs_2 = torch.chunk(lrs, 2, dim=1)
if torch.norm(lrs_1 - lrs_2.flip(1)) == 0:
self.is_mirror_extended = True
def compute_flow(self, lrs):
"""Compute optical flow using SPyNet for feature warping.
Note that if the input is an mirror-extended sequence, 'flows_forward'
is not needed, since it is equal to 'flows_backward.flip(1)'.
Args:
lrs (tensor): Input LR images with shape (n, t, c, h, w)
Return:
tuple(Tensor): Optical flow. 'flows_forward' corresponds to the
flows used for forward-time propagation (current to previous).
'flows_backward' corresponds to the flows used for
backward-time propagation (current to next).
"""
n, t, c, h, w = lrs.size()
lrs_1 = lrs[:, :-1, :, :, :].reshape(-1, c, h, w)
lrs_2 = lrs[:, 1:, :, :, :].reshape(-1, c, h, w)
flows_backward = self.spynet(lrs_1, lrs_2).view(n, t - 1, 2, h, w)
if self.is_mirror_extended: # flows_forward = flows_backward.flip(1)
flows_forward = None
else:
flows_forward = self.spynet(lrs_2, lrs_1).view(n, t - 1, 2, h, w)
return flows_forward, flows_backward
def forward(self, lrs):
"""Forward function for BasicVSR.
Args:
lrs (Tensor): Input LR sequence with shape (n, t, c, h, w).
Returns:
Tensor: Output HR sequence with shape (n, t, c, 4h, 4w).
"""
n, t, c, h, w = lrs.size()
assert h >= 64 and w >= 64, (
'The height and width of inputs should be at least 64, '
f'but got {h} and {w}.')
# check whether the input is an extended sequence
self.check_if_mirror_extended(lrs)
# compute optical flow
flows_forward, flows_backward = self.compute_flow(lrs)
# backward-time propgation
outputs = []
feat_prop = lrs.new_zeros(n, self.mid_channels, h, w)
for i in range(t - 1, -1, -1):
if i < t - 1: # no warping required for the last timestep
flow = flows_backward[:, i, :, :, :]
feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))
feat_prop = torch.cat([lrs[:, i, :, :, :], feat_prop], dim=1)
feat_prop = self.backward_resblocks(feat_prop)
outputs.append(feat_prop)
outputs = outputs[::-1]
# forward-time propagation and upsampling
feat_prop = torch.zeros_like(feat_prop)
for i in range(0, t):
lr_curr = lrs[:, i, :, :, :]
if i > 0: # no warping required for the first timestep
if flows_forward is not None:
flow = flows_forward[:, i - 1, :, :, :]
else:
flow = flows_backward[:, -i, :, :, :]
feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))
feat_prop = torch.cat([lr_curr, feat_prop], dim=1)
feat_prop = self.forward_resblocks(feat_prop)
# upsampling given the backward and forward features
out = torch.cat([outputs[i], feat_prop], dim=1)
out = self.lrelu(self.fusion(out))
out = self.lrelu(self.upsample1(out))
out = self.lrelu(self.upsample2(out))
out = self.lrelu(self.conv_hr(out))
out = self.conv_last(out)
base = self.img_upsample(lr_curr)
out += base
outputs[i] = out
return torch.stack(outputs, dim=1)
def init_weights(self, pretrained=None, strict=True):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Defaults: None.
strict (boo, optional): Whether strictly load the pretrained model.
Defaults to True.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif pretrained is not None:
raise TypeError(f'"pretrained" must be a str or None. '
f'But received {type(pretrained)}.')
class ResidualBlocksWithInputConv(nn.Module):
"""Residual blocks with a convolution in front.
Args:
in_channels (int): Number of input channels of the first conv.
out_channels (int): Number of channels of the residual blocks.
Default: 64.
num_blocks (int): Number of residual blocks. Default: 30.
"""
def __init__(self, in_channels, out_channels=64, num_blocks=30):
super().__init__()
main = []
# a convolution used to match the channels of the residual blocks
main.append(nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=True))
main.append(nn.LeakyReLU(negative_slope=0.1, inplace=True))
# residual blocks
main.append(
make_layer(
ResidualBlockNoBN, num_blocks, mid_channels=out_channels))
self.main = nn.Sequential(*main)
def forward(self, feat):
"""
Forward function for ResidualBlocksWithInputConv.
Args:
feat (Tensor): Input feature with shape (n, in_channels, h, w)
Returns:
Tensor: Output feature with shape (n, out_channels, h, w)
"""
return self.main(feat)
class SPyNet(nn.Module):
"""SPyNet network structure.
The difference to the SPyNet in [tof.py] is that
1. more SPyNetBasicModule is used in this version, and
2. no batch normalization is used in this version.
Paper:
Optical Flow Estimation using a Spatial Pyramid Network, CVPR, 2017
Args:
pretrained (str): path for pre-trained SPyNet. Default: None.
"""
def __init__(self, pretrained):
super().__init__()
self.basic_module = nn.ModuleList(
[SPyNetBasicModule() for _ in range(6)])
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=True, logger=logger)
elif pretrained is not None:
raise TypeError('[pretrained] should be str or None, '
f'but got {type(pretrained)}.')
self.register_buffer(
'mean',
torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
self.register_buffer(
'std',
torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
def compute_flow(self, ref, supp):
"""Compute flow from ref to supp.
Note that in this function, the images are already resized to a
multiple of 32.
Args:
ref (Tensor): Reference image with shape of (n, 3, h, w).
supp (Tensor): Supporting image with shape of (n, 3, h, w).
Returns:
Tensor: Estimated optical flow: (n, 2, h, w).
"""
n, _, h, w = ref.size()
# normalize the input images
ref = [(ref - self.mean) / self.std]
supp = [(supp - self.mean) / self.std]
# generate downsampled frames
for level in range(5):
ref.append(
F.avg_pool2d(
input=ref[-1],
kernel_size=2,
stride=2,
count_include_pad=False))
supp.append(
F.avg_pool2d(
input=supp[-1],
kernel_size=2,
stride=2,
count_include_pad=False))
ref = ref[::-1]
supp = supp[::-1]
# flow computation
flow = ref[0].new_zeros(n, 2, h // 32, w // 32)
for level in range(len(ref)):
if level == 0:
flow_up = flow
else:
flow_up = F.interpolate(
input=flow,
scale_factor=2,
mode='bilinear',
align_corners=True) * 2.0
# add the residue to the upsampled flow
flow = flow_up + self.basic_module[level](
torch.cat([
ref[level],
flow_warp(
supp[level],
flow_up.permute(0, 2, 3, 1),
padding_mode='border'), flow_up
], 1))
return flow
def forward(self, ref, supp):
"""Forward function of SPyNet.
This function computes the optical flow from ref to supp.
Args:
ref (Tensor): Reference image with shape of (n, 3, h, w).
supp (Tensor): Supporting image with shape of (n, 3, h, w).
Returns:
Tensor: Estimated optical flow: (n, 2, h, w).
"""
# upsize to a multiple of 32
h, w = ref.shape[2:4]
w_up = w if (w % 32) == 0 else 32 * (w // 32 + 1)
h_up = h if (h % 32) == 0 else 32 * (h // 32 + 1)
ref = F.interpolate(
input=ref, size=(h_up, w_up), mode='bilinear', align_corners=False)
supp = F.interpolate(
input=supp,
size=(h_up, w_up),
mode='bilinear',
align_corners=False)
# compute flow, and resize back to the original resolution
flow = F.interpolate(
input=self.compute_flow(ref, supp),
size=(h, w),
mode='bilinear',
align_corners=False)
# adjust the flow values
flow[:, 0, :, :] *= float(w) / float(w_up)
flow[:, 1, :, :] *= float(h) / float(h_up)
return flow
class SPyNetBasicModule(nn.Module):
"""Basic Module for SPyNet.
Paper:
Optical Flow Estimation using a Spatial Pyramid Network, CVPR, 2017
"""
def __init__(self):
super().__init__()
self.basic_module = nn.Sequential(
ConvModule(
in_channels=8,
out_channels=32,
kernel_size=7,
stride=1,
padding=3,
norm_cfg=None,
act_cfg=dict(type='ReLU')),
ConvModule(
in_channels=32,
out_channels=64,
kernel_size=7,
stride=1,
padding=3,
norm_cfg=None,
act_cfg=dict(type='ReLU')),
ConvModule(
in_channels=64,
out_channels=32,
kernel_size=7,
stride=1,
padding=3,
norm_cfg=None,
act_cfg=dict(type='ReLU')),
ConvModule(
in_channels=32,
out_channels=16,
kernel_size=7,
stride=1,
padding=3,
norm_cfg=None,
act_cfg=dict(type='ReLU')),
ConvModule(
in_channels=16,
out_channels=2,
kernel_size=7,
stride=1,
padding=3,
norm_cfg=None,
act_cfg=None))
def forward(self, tensor_input):
"""
Args:
tensor_input (Tensor): Input tensor with shape (b, 8, h, w).
8 channels contain:
[reference image (3), neighbor image (3), initial flow (2)].
Returns:
Tensor: Refined flow with shape (b, 2, h, w)
"""
return self.basic_module(tensor_input)
| 14,148 | 32.608076 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/base_dataset.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
from abc import ABCMeta, abstractmethod
from torch.utils.data import Dataset
from .pipelines import Compose
class BaseDataset(Dataset, metaclass=ABCMeta):
"""Base class for datasets.
All datasets should subclass it.
All subclasses should overwrite:
``load_annotations``, supporting to load information and generate
image lists.
Args:
pipeline (list[dict | callable]): A sequence of data transforms.
test_mode (bool): If True, the dataset will work in test mode.
Otherwise, in train mode.
"""
def __init__(self, pipeline, test_mode=False):
super().__init__()
self.test_mode = test_mode
self.pipeline = Compose(pipeline)
@abstractmethod
def load_annotations(self):
"""Abstract function for loading annotation.
All subclasses should overwrite this function
"""
def prepare_train_data(self, idx):
"""Prepare training data.
Args:
idx (int): Index of the training batch data.
Returns:
dict: Returned training batch.
"""
results = copy.deepcopy(self.data_infos[idx])
return self.pipeline(results)
def prepare_test_data(self, idx):
"""Prepare testing data.
Args:
idx (int): Index for getting each testing batch.
Returns:
Tensor: Returned testing batch.
"""
results = copy.deepcopy(self.data_infos[idx])
return self.pipeline(results)
def __len__(self):
"""Length of the dataset.
Returns:
int: Length of the dataset.
"""
return len(self.data_infos)
def __getitem__(self, idx):
"""Get item at each call.
Args:
idx (int): Index for getting each item.
"""
if self.test_mode:
return self.prepare_test_data(idx)
return self.prepare_train_data(idx)
| 2,006 | 24.405063 | 73 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/builder.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import platform
import random
from functools import partial
import numpy as np
import torch
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmcv.utils import build_from_cfg
from packaging import version
from torch.utils.data import ConcatDataset, DataLoader
from .dataset_wrappers import RepeatDataset
from .registry import DATASETS
from .samplers import DistributedSampler
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
base_soft_limit = rlimit[0]
hard_limit = rlimit[1]
soft_limit = min(max(4096, base_soft_limit), hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
def _concat_dataset(cfg, default_args=None):
"""Concat datasets with different ann_file but the same type.
Args:
cfg (dict): The config of dataset.
default_args (dict, optional): Default initialization arguments.
Default: None.
Returns:
Dataset: The concatenated dataset.
"""
ann_files = cfg['ann_file']
datasets = []
num_dset = len(ann_files)
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
data_cfg['ann_file'] = ann_files[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets)
def build_dataset(cfg, default_args=None):
"""Build a dataset from config dict.
It supports a variety of dataset config. If ``cfg`` is a Sequential (list
or dict), it will be a concatenated dataset of the datasets specified by
the Sequential. If it is a ``RepeatDataset``, then it will repeat the
dataset ``cfg['dataset']`` for ``cfg['times']`` times. If the ``ann_file``
of the dataset is a Sequential, then it will build a concatenated dataset
with the same dataset type but different ``ann_file``.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
default_args (dict, optional): Default initialization arguments.
Default: None.
Returns:
Dataset: The constructed dataset.
"""
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
elif isinstance(cfg.get('ann_file'), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
def build_dataloader(dataset,
samples_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
seed=None,
drop_last=False,
pin_memory=True,
persistent_workers=True,
**kwargs):
"""Build PyTorch DataLoader.
In distributed training, each GPU/process has a dataloader.
In non-distributed training, there is only one dataloader for all GPUs.
Args:
dataset (:obj:`Dataset`): A PyTorch dataset.
samples_per_gpu (int): Number of samples on each GPU, i.e.,
batch size of each GPU.
workers_per_gpu (int): How many subprocesses to use for data
loading for each GPU.
num_gpus (int): Number of GPUs. Only used in non-distributed
training. Default: 1.
dist (bool): Distributed training/test or not. Default: True.
shuffle (bool): Whether to shuffle the data at every epoch.
Default: True.
seed (int | None): Seed to be used. Default: None.
drop_last (bool): Whether to drop the last incomplete batch in epoch.
Default: False
pin_memory (bool): Whether to use pin_memory in DataLoader.
Default: True
persistent_workers (bool): If True, the data loader will not shutdown
the worker processes after a dataset has been consumed once.
This allows to maintain the workers Dataset instances alive.
The argument also has effect in PyTorch>=1.7.0.
Default: True
kwargs (dict, optional): Any keyword argument to be used to initialize
DataLoader.
Returns:
DataLoader: A PyTorch dataloader.
"""
rank, world_size = get_dist_info()
if dist:
sampler = DistributedSampler(
dataset,
world_size,
rank,
shuffle=shuffle,
samples_per_gpu=samples_per_gpu,
seed=seed)
shuffle = False
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
sampler = None
batch_size = num_gpus * samples_per_gpu
num_workers = num_gpus * workers_per_gpu
init_fn = partial(
worker_init_fn, num_workers=num_workers, rank=rank,
seed=seed) if seed is not None else None
if version.parse(torch.__version__) >= version.parse('1.7.0'):
kwargs['persistent_workers'] = persistent_workers
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
pin_memory=pin_memory,
shuffle=shuffle,
worker_init_fn=init_fn,
drop_last=drop_last,
**kwargs)
return data_loader
def worker_init_fn(worker_id, num_workers, rank, seed):
"""Function to initialize each worker.
The seed of each worker equals to
``num_worker * rank + worker_id + user_seed``.
Args:
worker_id (int): Id for each worker.
num_workers (int): Number of workers.
rank (int): Rank in distributed training.
seed (int): Random seed.
"""
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
torch.manual_seed(worker_seed)
| 6,177 | 32.945055 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/samplers/distributed_sampler.py | # Copyright (c) OpenMMLab. All rights reserved.
from __future__ import division
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
from mmedit.core.utils import sync_random_seed
class DistributedSampler(_DistributedSampler):
"""DistributedSampler inheriting from `torch.utils.data.DistributedSampler`.
In pytorch of lower versions, there is no `shuffle` argument. This child
class will port one to DistributedSampler.
"""
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
samples_per_gpu=1,
seed=0):
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.shuffle = shuffle
self.samples_per_gpu = samples_per_gpu
# fix the bug of the official implementation
self.num_samples_per_replica = int(
math.ceil(
len(self.dataset) * 1.0 / self.num_replicas / samples_per_gpu))
self.num_samples = self.num_samples_per_replica * self.samples_per_gpu
self.total_size = self.num_samples * self.num_replicas
# In distributed sampling, different ranks should sample
# non-overlapped data in the dataset. Therefore, this function
# is used to make sure that each rank shuffles the data indices
# in the same order based on the same seed. Then different ranks
# could use different indices to select non-overlapped data from the
# same data list.
self.seed = sync_random_seed(seed)
# to avoid padding bug when meeting too small dataset
if len(dataset) < self.num_replicas * samples_per_gpu:
raise ValueError(
'You may use too small dataset and our distributed '
'sampler cannot pad your dataset correctly. We highly '
'recommend you to use fewer GPUs to finish your work')
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
# When :attr:`shuffle=True`, this ensures all replicas
# use a different random ordering for each epoch.
# Otherwise, the next iteration of this sampler will
# yield the same ordering.
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
| 2,892 | 39.180556 | 80 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/crop.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import random
import mmcv
import numpy as np
from torch.nn.modules.utils import _pair
from ..registry import PIPELINES
from .utils import random_choose_unknown
@PIPELINES.register_module()
class Crop:
"""Crop data to specific size for training.
Args:
keys (Sequence[str]): The images to be cropped.
crop_size (Tuple[int]): Target spatial size (h, w).
random_crop (bool): If set to True, it will random crop
image. Otherwise, it will work as center crop.
is_pad_zeros (bool, optional): Whether to pad the image with 0 if
crop_size is greater than image size. Default: False.
"""
def __init__(self, keys, crop_size, random_crop=True, is_pad_zeros=False):
if not mmcv.is_tuple_of(crop_size, int):
raise TypeError(
'Elements of crop_size must be int and crop_size must be'
f' tuple, but got {type(crop_size[0])} in {type(crop_size)}')
self.keys = keys
self.crop_size = crop_size
self.random_crop = random_crop
self.is_pad_zeros = is_pad_zeros
def _crop(self, data):
if not isinstance(data, list):
data_list = [data]
else:
data_list = data
crop_bbox_list = []
data_list_ = []
for item in data_list:
data_h, data_w = item.shape[:2]
crop_h, crop_w = self.crop_size
if self.is_pad_zeros:
crop_y_offset, crop_x_offset = 0, 0
if crop_h > data_h:
crop_y_offset = (crop_h - data_h) // 2
if crop_w > data_w:
crop_x_offset = (crop_w - data_w) // 2
if crop_y_offset > 0 or crop_x_offset > 0:
pad_width = [(2 * crop_y_offset, 2 * crop_y_offset),
(2 * crop_x_offset, 2 * crop_x_offset)]
if item.ndim == 3:
pad_width.append((0, 0))
item = np.pad(
item,
tuple(pad_width),
mode='constant',
constant_values=0)
data_h, data_w = item.shape[:2]
crop_h = min(data_h, crop_h)
crop_w = min(data_w, crop_w)
if self.random_crop:
x_offset = np.random.randint(0, data_w - crop_w + 1)
y_offset = np.random.randint(0, data_h - crop_h + 1)
else:
x_offset = max(0, (data_w - crop_w)) // 2
y_offset = max(0, (data_h - crop_h)) // 2
crop_bbox = [x_offset, y_offset, crop_w, crop_h]
item_ = item[y_offset:y_offset + crop_h,
x_offset:x_offset + crop_w, ...]
crop_bbox_list.append(crop_bbox)
data_list_.append(item_)
if not isinstance(data, list):
return data_list_[0], crop_bbox_list[0]
return data_list_, crop_bbox_list
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for k in self.keys:
data_, crop_bbox = self._crop(results[k])
results[k] = data_
results[k + '_crop_bbox'] = crop_bbox
results['crop_size'] = self.crop_size
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'keys={self.keys}, crop_size={self.crop_size}, '
f'random_crop={self.random_crop}')
return repr_str
@PIPELINES.register_module()
class RandomResizedCrop(object):
"""Crop data to random size and aspect ratio.
A crop of a random proportion of the original image
and a random aspect ratio of the original aspect ratio is made.
The cropped image is finally resized to a given size specified
by 'crop_size'. Modified keys are the attributes specified in "keys".
This code is partially adopted from
torchvision.transforms.RandomResizedCrop:
[https://pytorch.org/vision/stable/_modules/torchvision/transforms/\
transforms.html#RandomResizedCrop].
Args:
keys (list[str]): The images to be resized and random-cropped.
crop_size (int | tuple[int]): Target spatial size (h, w).
scale (tuple[float], optional): Range of the proportion of the original
image to be cropped. Default: (0.08, 1.0).
ratio (tuple[float], optional): Range of aspect ratio of the crop.
Default: (3. / 4., 4. / 3.).
interpolation (str, optional): Algorithm used for interpolation.
It can be only either one of the following:
"nearest" | "bilinear" | "bicubic" | "area" | "lanczos".
Default: "bilinear".
"""
def __init__(self,
keys,
crop_size,
scale=(0.08, 1.0),
ratio=(3. / 4., 4. / 3.),
interpolation='bilinear'):
assert keys, 'Keys should not be empty.'
if isinstance(crop_size, int):
crop_size = (crop_size, crop_size)
elif not mmcv.is_tuple_of(crop_size, int):
raise TypeError('"crop_size" must be an integer '
'or a tuple of integers, but got '
f'{type(crop_size)}')
if not mmcv.is_tuple_of(scale, float):
raise TypeError('"scale" must be a tuple of float, '
f'but got {type(scale)}')
if not mmcv.is_tuple_of(ratio, float):
raise TypeError('"ratio" must be a tuple of float, '
f'but got {type(ratio)}')
self.keys = keys
self.crop_size = crop_size
self.scale = scale
self.ratio = ratio
self.interpolation = interpolation
def get_params(self, data):
"""Get parameters for a random sized crop.
Args:
data (np.ndarray): Image of type numpy array to be cropped.
Returns:
A tuple containing the coordinates of the top left corner
and the chosen crop size.
"""
data_h, data_w = data.shape[:2]
area = data_h * data_w
for _ in range(10):
target_area = random.uniform(*self.scale) * area
log_ratio = (math.log(self.ratio[0]), math.log(self.ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
crop_w = int(round(math.sqrt(target_area * aspect_ratio)))
crop_h = int(round(math.sqrt(target_area / aspect_ratio)))
if 0 < crop_w <= data_w and 0 < crop_h <= data_h:
top = random.randint(0, data_h - crop_h)
left = random.randint(0, data_w - crop_w)
return top, left, crop_h, crop_w
# Fall back to center crop
in_ratio = float(data_w) / float(data_h)
if (in_ratio < min(self.ratio)):
crop_w = data_w
crop_h = int(round(crop_w / min(self.ratio)))
elif (in_ratio > max(self.ratio)):
crop_h = data_h
crop_w = int(round(crop_h * max(self.ratio)))
else: # whole image
crop_w = data_w
crop_h = data_h
top = (data_h - crop_h) // 2
left = (data_w - crop_w) // 2
return top, left, crop_h, crop_w
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for k in self.keys:
top, left, crop_h, crop_w = self.get_params(results[k])
crop_bbox = [top, left, crop_w, crop_h]
results[k] = results[k][top:top + crop_h, left:left + crop_w, ...]
results[k] = mmcv.imresize(
results[k],
self.crop_size,
return_scale=False,
interpolation=self.interpolation)
results[k + '_crop_bbox'] = crop_bbox
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, crop_size={self.crop_size}, '
f'scale={self.scale}, ratio={self.ratio}, '
f'interpolation={self.interpolation})')
return repr_str
@PIPELINES.register_module()
class FixedCrop:
"""Crop paired data (at a specific position) to specific size for training.
Args:
keys (Sequence[str]): The images to be cropped.
crop_size (Tuple[int]): Target spatial size (h, w).
crop_pos (Tuple[int]): Specific position (x, y). If set to None,
random initialize the position to crop paired data batch.
"""
def __init__(self, keys, crop_size, crop_pos=None):
if not mmcv.is_tuple_of(crop_size, int):
raise TypeError(
'Elements of crop_size must be int and crop_size must be'
f' tuple, but got {type(crop_size[0])} in {type(crop_size)}')
if not mmcv.is_tuple_of(crop_pos, int) and (crop_pos is not None):
raise TypeError(
'Elements of crop_pos must be int and crop_pos must be'
f' tuple or None, but got {type(crop_pos[0])} in '
f'{type(crop_pos)}')
self.keys = keys
self.crop_size = crop_size
self.crop_pos = crop_pos
def _crop(self, data, x_offset, y_offset, crop_w, crop_h):
crop_bbox = [x_offset, y_offset, crop_w, crop_h]
data_ = data[y_offset:y_offset + crop_h, x_offset:x_offset + crop_w,
...]
return data_, crop_bbox
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
if isinstance(results[self.keys[0]], list):
data_h, data_w = results[self.keys[0]][0].shape[:2]
else:
data_h, data_w = results[self.keys[0]].shape[:2]
crop_h, crop_w = self.crop_size
crop_h = min(data_h, crop_h)
crop_w = min(data_w, crop_w)
if self.crop_pos is None:
x_offset = np.random.randint(0, data_w - crop_w + 1)
y_offset = np.random.randint(0, data_h - crop_h + 1)
else:
x_offset, y_offset = self.crop_pos
crop_w = min(data_w - x_offset, crop_w)
crop_h = min(data_h - y_offset, crop_h)
for k in self.keys:
images = results[k]
is_list = isinstance(images, list)
if not is_list:
images = [images]
cropped_images = []
crop_bbox = None
for image in images:
# In fixed crop for paired images, sizes should be the same
if (image.shape[0] != data_h or image.shape[1] != data_w):
raise ValueError(
'The sizes of paired images should be the same. '
f'Expected ({data_h}, {data_w}), '
f'but got ({image.shape[0]}, '
f'{image.shape[1]}).')
data_, crop_bbox = self._crop(image, x_offset, y_offset,
crop_w, crop_h)
cropped_images.append(data_)
results[k + '_crop_bbox'] = crop_bbox
if not is_list:
cropped_images = cropped_images[0]
results[k] = cropped_images
results['crop_size'] = self.crop_size
results['crop_pos'] = self.crop_pos
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'keys={self.keys}, crop_size={self.crop_size}, '
f'crop_pos={self.crop_pos}')
return repr_str
@PIPELINES.register_module()
class PairedRandomCrop:
"""Paried random crop.
It crops a pair of lq and gt images with corresponding locations.
It also supports accepting lq list and gt list.
Required keys are "scale", "lq", and "gt",
added or modified keys are "lq" and "gt".
Args:
gt_patch_size (int): cropped gt patch size.
"""
def __init__(self, gt_patch_size):
self.gt_patch_size = gt_patch_size
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
scale = results['scale']
lq_patch_size = self.gt_patch_size // scale
lq_is_list = isinstance(results['lq'], list)
if not lq_is_list:
results['lq'] = [results['lq']]
gt_is_list = isinstance(results['gt'], list)
if not gt_is_list:
results['gt'] = [results['gt']]
h_lq, w_lq, _ = results['lq'][0].shape
h_gt, w_gt, _ = results['gt'][0].shape
if h_gt != h_lq * scale or w_gt != w_lq * scale:
raise ValueError(
f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ',
f'multiplication of LQ ({h_lq}, {w_lq}).')
if h_lq < lq_patch_size or w_lq < lq_patch_size:
raise ValueError(
f'LQ ({h_lq}, {w_lq}) is smaller than patch size ',
f'({lq_patch_size}, {lq_patch_size}). Please check '
f'{results["lq_path"][0]} and {results["gt_path"][0]}.')
# randomly choose top and left coordinates for lq patch
top = np.random.randint(h_lq - lq_patch_size + 1)
left = np.random.randint(w_lq - lq_patch_size + 1)
# crop lq patch
results['lq'] = [
v[top:top + lq_patch_size, left:left + lq_patch_size, ...]
for v in results['lq']
]
# crop corresponding gt patch
top_gt, left_gt = int(top * scale), int(left * scale)
results['gt'] = [
v[top_gt:top_gt + self.gt_patch_size,
left_gt:left_gt + self.gt_patch_size, ...] for v in results['gt']
]
if not lq_is_list:
results['lq'] = results['lq'][0]
if not gt_is_list:
results['gt'] = results['gt'][0]
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(gt_patch_size={self.gt_patch_size})'
return repr_str
@PIPELINES.register_module()
class CropAroundCenter:
"""Randomly crop the images around unknown area in the center 1/4 images.
This cropping strategy is adopted in GCA matting. The `unknown area` is the
same as `semi-transparent area`.
https://arxiv.org/pdf/2001.04069.pdf
It retains the center 1/4 images and resizes the images to 'crop_size'.
Required keys are "fg", "bg", "trimap" and "alpha", added or modified keys
are "crop_bbox", "fg", "bg", "trimap" and "alpha".
Args:
crop_size (int | tuple): Desired output size. If int, square crop is
applied.
"""
def __init__(self, crop_size):
if mmcv.is_tuple_of(crop_size, int):
assert len(crop_size) == 2, 'length of crop_size must be 2.'
elif not isinstance(crop_size, int):
raise TypeError('crop_size must be int or a tuple of int, but got '
f'{type(crop_size)}')
self.crop_size = _pair(crop_size)
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
fg = results['fg']
alpha = results['alpha']
trimap = results['trimap']
bg = results['bg']
h, w = fg.shape[:2]
assert bg.shape == fg.shape, (f'shape of bg {bg.shape} should be the '
f'same as fg {fg.shape}.')
crop_h, crop_w = self.crop_size
# Make sure h >= crop_h, w >= crop_w. If not, rescale imgs
rescale_ratio = max(crop_h / h, crop_w / w)
if rescale_ratio > 1:
new_h = max(int(h * rescale_ratio), crop_h)
new_w = max(int(w * rescale_ratio), crop_w)
fg = mmcv.imresize(fg, (new_w, new_h), interpolation='nearest')
alpha = mmcv.imresize(
alpha, (new_w, new_h), interpolation='nearest')
trimap = mmcv.imresize(
trimap, (new_w, new_h), interpolation='nearest')
bg = mmcv.imresize(bg, (new_w, new_h), interpolation='bicubic')
h, w = new_h, new_w
# resize to 1/4 to ignore small unknown patches
small_trimap = mmcv.imresize(
trimap, (w // 4, h // 4), interpolation='nearest')
# find unknown area in center 1/4 region
margin_h, margin_w = crop_h // 2, crop_w // 2
sample_area = small_trimap[margin_h // 4:(h - margin_h) // 4,
margin_w // 4:(w - margin_w) // 4]
unknown_xs, unknown_ys = np.where(sample_area == 128)
unknown_num = len(unknown_xs)
if unknown_num < 10:
# too few unknown area in the center, crop from the whole image
top = np.random.randint(0, h - crop_h + 1)
left = np.random.randint(0, w - crop_w + 1)
else:
idx = np.random.randint(unknown_num)
top = unknown_xs[idx] * 4
left = unknown_ys[idx] * 4
bottom = top + crop_h
right = left + crop_w
results['fg'] = fg[top:bottom, left:right]
results['alpha'] = alpha[top:bottom, left:right]
results['trimap'] = trimap[top:bottom, left:right]
results['bg'] = bg[top:bottom, left:right]
results['crop_bbox'] = (left, top, right, bottom)
return results
def __repr__(self):
return self.__class__.__name__ + f'(crop_size={self.crop_size})'
@PIPELINES.register_module()
class CropAroundUnknown:
"""Crop around unknown area with a randomly selected scale.
Randomly select the w and h from a list of (w, h).
Required keys are the keys in argument `keys`, added or
modified keys are "crop_bbox" and the keys in argument `keys`.
This class assumes value of "alpha" ranges from 0 to 255.
Args:
keys (Sequence[str]): The images to be cropped. It must contain
'alpha'. If unknown_source is set to 'trimap', then it must also
contain 'trimap'.
crop_sizes (list[int | tuple[int]]): List of (w, h) to be selected.
unknown_source (str, optional): Unknown area to select from. It must be
'alpha' or 'tirmap'. Default to 'alpha'.
interpolations (str | list[str], optional): Interpolation method of
mmcv.imresize. The interpolation operation will be applied when
image size is smaller than the crop_size. If given as a list of
str, it should have the same length as `keys`. Or if given as a
str all the keys will be resized with the same method.
Default to 'bilinear'.
"""
def __init__(self,
keys,
crop_sizes,
unknown_source='alpha',
interpolations='bilinear'):
if 'alpha' not in keys:
raise ValueError(f'"alpha" must be in keys, but got {keys}')
self.keys = keys
if not isinstance(crop_sizes, list):
raise TypeError(
f'Crop sizes must be list, but got {type(crop_sizes)}.')
self.crop_sizes = [_pair(crop_size) for crop_size in crop_sizes]
if not mmcv.is_tuple_of(self.crop_sizes[0], int):
raise TypeError('Elements of crop_sizes must be int or tuple of '
f'int, but got {type(self.crop_sizes[0][0])}.')
if unknown_source not in ['alpha', 'trimap']:
raise ValueError('unknown_source must be "alpha" or "trimap", '
f'but got {unknown_source}')
if unknown_source not in keys:
# it could only be trimap, since alpha is checked before
raise ValueError(
'if unknown_source is "trimap", it must also be set in keys')
self.unknown_source = unknown_source
if isinstance(interpolations, str):
self.interpolations = [interpolations] * len(self.keys)
elif mmcv.is_list_of(interpolations,
str) and len(interpolations) == len(self.keys):
self.interpolations = interpolations
else:
raise TypeError(
'interpolations must be a str or list of str with '
f'the same length as keys, but got {interpolations}')
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
h, w = results[self.keys[0]].shape[:2]
rand_ind = np.random.randint(len(self.crop_sizes))
crop_h, crop_w = self.crop_sizes[rand_ind]
# Make sure h >= crop_h, w >= crop_w. If not, rescale imgs
rescale_ratio = max(crop_h / h, crop_w / w)
if rescale_ratio > 1:
h = max(int(h * rescale_ratio), crop_h)
w = max(int(w * rescale_ratio), crop_w)
for key, interpolation in zip(self.keys, self.interpolations):
results[key] = mmcv.imresize(
results[key], (w, h), interpolation=interpolation)
# Select the cropping top-left point which is an unknown pixel
if self.unknown_source == 'alpha':
unknown = (results['alpha'] > 0) & (results['alpha'] < 255)
else:
unknown = results['trimap'] == 128
top, left = random_choose_unknown(unknown.squeeze(), (crop_h, crop_w))
bottom = top + crop_h
right = left + crop_w
for key in self.keys:
results[key] = results[key][top:bottom, left:right]
results['crop_bbox'] = (left, top, right, bottom)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, crop_sizes={self.crop_sizes}, '
f"unknown_source='{self.unknown_source}', "
f'interpolations={self.interpolations})')
return repr_str
@PIPELINES.register_module()
class CropAroundFg:
"""Crop around the whole foreground in the segmentation mask.
Required keys are "seg" and the keys in argument `keys`.
Meanwhile, "seg" must be in argument `keys`. Added or modified keys are
"crop_bbox" and the keys in argument `keys`.
Args:
keys (Sequence[str]): The images to be cropped. It must contain
'seg'.
bd_ratio_range (tuple, optional): The range of the boundary (bd) ratio
to select from. The boundary ratio is the ratio of the boundary to
the minimal bbox that contains the whole foreground given by
segmentation. Default to (0.1, 0.4).
test_mode (bool): Whether use test mode. In test mode, the tight crop
area of foreground will be extended to the a square.
Default to False.
"""
def __init__(self, keys, bd_ratio_range=(0.1, 0.4), test_mode=False):
if 'seg' not in keys:
raise ValueError(f'"seg" must be in keys, but got {keys}')
if (not mmcv.is_tuple_of(bd_ratio_range, float)
or len(bd_ratio_range) != 2):
raise TypeError('bd_ratio_range must be a tuple of 2 int, but got '
f'{bd_ratio_range}')
self.keys = keys
self.bd_ratio_range = bd_ratio_range
self.test_mode = test_mode
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
seg = results['seg']
height, width = seg.shape[:2]
# get foreground bbox
fg_coor = np.array(np.where(seg))
top, left = np.amin(fg_coor, axis=1)
bottom, right = np.amax(fg_coor, axis=1)
# enlarge bbox
long_side = np.maximum(bottom - top, right - left)
if self.test_mode:
bottom = top + long_side
right = left + long_side
boundary_ratio = np.random.uniform(*self.bd_ratio_range)
boundary = int(np.round(boundary_ratio * long_side))
# NOTE: Different from the original repo, we keep track of the four
# corners of the bbox (left, top, right, bottom) while the original
# repo use (top, left, height, width) to represent bbox. This may
# introduce an difference of 1 pixel.
top = max(top - boundary, 0)
left = max(left - boundary, 0)
bottom = min(bottom + boundary, height)
right = min(right + boundary, width)
for key in self.keys:
results[key] = results[key][top:bottom, left:right]
results['crop_bbox'] = (left, top, right, bottom)
return results
@PIPELINES.register_module()
class ModCrop:
"""Mod crop gt images, used during testing.
Required keys are "scale" and "gt",
added or modified keys are "gt".
"""
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
img = results['gt'].copy()
scale = results['scale']
if img.ndim in [2, 3]:
h, w = img.shape[0], img.shape[1]
h_remainder, w_remainder = h % scale, w % scale
img = img[:h - h_remainder, :w - w_remainder, ...]
else:
raise ValueError(f'Wrong img ndim: {img.ndim}.')
results['gt'] = img
return results
@PIPELINES.register_module()
class CropLike:
"""Crop/pad the image in the target_key according to the size of image
in the reference_key .
Args:
target_key (str): The key needs to be cropped.
reference_key (str | None): The reference key, need its size.
Default: None.
"""
def __init__(self, target_key, reference_key=None):
assert reference_key and target_key
self.target_key = target_key
self.reference_key = reference_key
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Require self.target_key and self.reference_key.
Returns:
dict: A dict containing the processed data and information.
Modify self.target_key.
"""
size = results[self.reference_key].shape
old_image = results[self.target_key]
old_size = old_image.shape
h, w = old_size[:2]
new_size = size[:2] + old_size[2:]
h_cover, w_cover = min(h, size[0]), min(w, size[1])
format_image = np.zeros(new_size, dtype=old_image.dtype)
format_image[:h_cover, :w_cover] = old_image[:h_cover, :w_cover]
results[self.target_key] = format_image
return results
def __repr__(self):
return (self.__class__.__name__ + f' target_key={self.target_key}, ' +
f'reference_key={self.reference_key}')
| 28,291 | 36.722667 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/augmentation.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import math
import numbers
import os
import os.path as osp
import random
import cv2
import mmcv
import numpy as np
import torchvision.transforms as transforms
from PIL import Image
from ..registry import PIPELINES
@PIPELINES.register_module()
class Resize:
"""Resize data to a specific size for training or resize the images to fit
the network input regulation for testing.
When used for resizing images to fit network input regulation, the case is
that a network may have several downsample and then upsample operation,
then the input height and width should be divisible by the downsample
factor of the network.
For example, the network would downsample the input for 5 times with
stride 2, then the downsample factor is 2^5 = 32 and the height
and width should be divisible by 32.
Required keys are the keys in attribute "keys", added or modified keys are
"keep_ratio", "scale_factor", "interpolation" and the
keys in attribute "keys".
All keys in "keys" should have the same shape. "test_trans" is used to
record the test transformation to align the input's shape.
Args:
keys (list[str]): The images to be resized.
scale (float | tuple[int]): If scale is tuple[int], target spatial
size (h, w). Otherwise, target spatial size is scaled by input
size.
Note that when it is used, `size_factor` and `max_size` are
useless. Default: None
keep_ratio (bool): If set to True, images will be resized without
changing the aspect ratio. Otherwise, it will resize images to a
given size. Default: False.
Note that it is used togher with `scale`.
size_factor (int): Let the output shape be a multiple of size_factor.
Default:None.
Note that when it is used, `scale` should be set to None and
`keep_ratio` should be set to False.
max_size (int): The maximum size of the longest side of the output.
Default:None.
Note that it is used togher with `size_factor`.
interpolation (str): Algorithm used for interpolation:
"nearest" | "bilinear" | "bicubic" | "area" | "lanczos".
Default: "bilinear".
backend (str | None): The image resize backend type. Options are `cv2`,
`pillow`, `None`. If backend is None, the global imread_backend
specified by ``mmcv.use_backend()`` will be used.
Default: None.
output_keys (list[str] | None): The resized images. Default: None
Note that if it is not `None`, its length should be equal to keys.
"""
def __init__(self,
keys,
scale=None,
keep_ratio=False,
size_factor=None,
max_size=None,
interpolation='bilinear',
backend=None,
output_keys=None):
assert keys, 'Keys should not be empty.'
if output_keys:
assert len(output_keys) == len(keys)
else:
output_keys = keys
if size_factor:
assert scale is None, ('When size_factor is used, scale should ',
f'be None. But received {scale}.')
assert keep_ratio is False, ('When size_factor is used, '
'keep_ratio should be False.')
if max_size:
assert size_factor is not None, (
'When max_size is used, '
f'size_factor should also be set. But received {size_factor}.')
if isinstance(scale, float):
if scale <= 0:
raise ValueError(f'Invalid scale {scale}, must be positive.')
elif mmcv.is_tuple_of(scale, int):
max_long_edge = max(scale)
max_short_edge = min(scale)
if max_short_edge == -1:
# assign np.inf to long edge for rescaling short edge later.
scale = (np.inf, max_long_edge)
elif scale is not None:
raise TypeError(
f'Scale must be None, float or tuple of int, but got '
f'{type(scale)}.')
self.keys = keys
self.output_keys = output_keys
self.scale = scale
self.size_factor = size_factor
self.max_size = max_size
self.keep_ratio = keep_ratio
self.interpolation = interpolation
self.backend = backend
def _resize(self, img):
if self.keep_ratio:
img, self.scale_factor = mmcv.imrescale(
img,
self.scale,
return_scale=True,
interpolation=self.interpolation,
backend=self.backend)
else:
img, w_scale, h_scale = mmcv.imresize(
img,
self.scale,
return_scale=True,
interpolation=self.interpolation,
backend=self.backend)
self.scale_factor = np.array((w_scale, h_scale), dtype=np.float32)
return img
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
if self.size_factor:
h, w = results[self.keys[0]].shape[:2]
new_h = h - (h % self.size_factor)
new_w = w - (w % self.size_factor)
if self.max_size:
new_h = min(self.max_size - (self.max_size % self.size_factor),
new_h)
new_w = min(self.max_size - (self.max_size % self.size_factor),
new_w)
self.scale = (new_w, new_h)
for key, out_key in zip(self.keys, self.output_keys):
results[out_key] = self._resize(results[key])
if len(results[out_key].shape) == 2:
results[out_key] = np.expand_dims(results[out_key], axis=2)
results['scale_factor'] = self.scale_factor
results['keep_ratio'] = self.keep_ratio
results['interpolation'] = self.interpolation
results['backend'] = self.backend
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (
f'(keys={self.keys}, output_keys={self.output_keys}, '
f'scale={self.scale}, '
f'keep_ratio={self.keep_ratio}, size_factor={self.size_factor}, '
f'max_size={self.max_size}, interpolation={self.interpolation})')
return repr_str
@PIPELINES.register_module()
class RandomRotation:
"""Rotate the image by a randomly-chosen angle, measured in degree.
Args:
keys (list[str]): The images to be rotated.
degrees (tuple[float] | tuple[int] | float | int): If it is a tuple,
it represents a range (min, max). If it is a float or int,
the range is constructed as (-degrees, degrees).
"""
def __init__(self, keys, degrees):
if isinstance(degrees, (int, float)):
if degrees < 0.0:
raise ValueError('Degrees must be positive if it is a number.')
else:
degrees = (-degrees, degrees)
elif not mmcv.is_tuple_of(degrees, (int, float)):
raise TypeError(f'Degrees must be float | int or tuple of float | '
'int, but got '
f'{type(degrees)}.')
self.keys = keys
self.degrees = degrees
def __call__(self, results):
angle = random.uniform(self.degrees[0], self.degrees[1])
for k in self.keys:
results[k] = mmcv.imrotate(results[k], angle)
if results[k].ndim == 2:
results[k] = np.expand_dims(results[k], axis=2)
results['degrees'] = self.degrees
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, degrees={self.degrees})')
return repr_str
@PIPELINES.register_module()
class Flip:
"""Flip the input data with a probability.
Reverse the order of elements in the given data with a specific direction.
The shape of the data is preserved, but the elements are reordered.
Required keys are the keys in attributes "keys", added or modified keys are
"flip", "flip_direction" and the keys in attributes "keys".
It also supports flipping a list of images with the same flip.
Args:
keys (list[str]): The images to be flipped.
flip_ratio (float): The propability to flip the images.
direction (str): Flip images horizontally or vertically. Options are
"horizontal" | "vertical". Default: "horizontal".
"""
_directions = ['horizontal', 'vertical']
def __init__(self, keys, flip_ratio=0.5, direction='horizontal'):
if direction not in self._directions:
raise ValueError(f'Direction {direction} is not supported.'
f'Currently support ones are {self._directions}')
self.keys = keys
self.flip_ratio = flip_ratio
self.direction = direction
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
flip = np.random.random() < self.flip_ratio
if flip:
for key in self.keys:
if isinstance(results[key], list):
for v in results[key]:
mmcv.imflip_(v, self.direction)
else:
mmcv.imflip_(results[key], self.direction)
results['flip'] = flip
results['flip_direction'] = self.direction
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, flip_ratio={self.flip_ratio}, '
f'direction={self.direction})')
return repr_str
@PIPELINES.register_module()
class Pad:
"""Pad the images to align with network downsample factor for testing.
See `Reshape` for more explanation. `numpy.pad` is used for the pad
operation.
Required keys are the keys in attribute "keys", added or
modified keys are "test_trans" and the keys in attribute
"keys". All keys in "keys" should have the same shape. "test_trans" is used
to record the test transformation to align the input's shape.
Args:
keys (list[str]): The images to be padded.
ds_factor (int): Downsample factor of the network. The height and
weight will be padded to a multiple of ds_factor. Default: 32.
kwargs (option): any keyword arguments to be passed to `numpy.pad`.
"""
def __init__(self, keys, ds_factor=32, **kwargs):
self.keys = keys
self.ds_factor = ds_factor
self.kwargs = kwargs
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
h, w = results[self.keys[0]].shape[:2]
new_h = self.ds_factor * ((h - 1) // self.ds_factor + 1)
new_w = self.ds_factor * ((w - 1) // self.ds_factor + 1)
pad_h = new_h - h
pad_w = new_w - w
if new_h != h or new_w != w:
pad_width = ((0, pad_h), (0, pad_w), (0, 0))
for key in self.keys:
results[key] = np.pad(results[key],
pad_width[:results[key].ndim],
**self.kwargs)
results['pad'] = (pad_h, pad_w)
return results
def __repr__(self):
repr_str = self.__class__.__name__
kwargs_str = ', '.join(
[f'{key}={val}' for key, val in self.kwargs.items()])
repr_str += (f'(keys={self.keys}, ds_factor={self.ds_factor}, '
f'{kwargs_str})')
return repr_str
@PIPELINES.register_module()
class RandomAffine:
"""Apply random affine to input images.
This class is adopted from
https://github.com/pytorch/vision/blob/v0.5.0/torchvision/transforms/
transforms.py#L1015
It should be noted that in
https://github.com/Yaoyi-Li/GCA-Matting/blob/master/dataloader/
data_generator.py#L70
random flip is added. See explanation of `flip_ratio` below.
Required keys are the keys in attribute "keys", modified keys
are keys in attribute "keys".
Args:
keys (Sequence[str]): The images to be affined.
degrees (float | tuple[float]): Range of degrees to select from. If it
is a float instead of a tuple like (min, max), the range of degrees
will be (-degrees, +degrees). Set to 0 to deactivate rotations.
translate (tuple, optional): Tuple of maximum absolute fraction for
horizontal and vertical translations. For example translate=(a, b),
then horizontal shift is randomly sampled in the range
-img_width * a < dx < img_width * a and vertical shift is randomly
sampled in the range -img_height * b < dy < img_height * b.
Default: None.
scale (tuple, optional): Scaling factor interval, e.g (a, b), then
scale is randomly sampled from the range a <= scale <= b.
Default: None.
shear (float | tuple[float], optional): Range of shear degrees to
select from. If shear is a float, a shear parallel to the x axis
and a shear parallel to the y axis in the range (-shear, +shear)
will be applied. Else if shear is a tuple of 2 values, a x-axis
shear and a y-axis shear in (shear[0], shear[1]) will be applied.
Default: None.
flip_ratio (float, optional): Probability of the image being flipped.
The flips in horizontal direction and vertical direction are
independent. The image may be flipped in both directions.
Default: None.
"""
def __init__(self,
keys,
degrees,
translate=None,
scale=None,
shear=None,
flip_ratio=None):
self.keys = keys
if isinstance(degrees, numbers.Number):
assert degrees >= 0, ('If degrees is a single number, '
'it must be positive.')
self.degrees = (-degrees, degrees)
else:
assert isinstance(degrees, tuple) and len(degrees) == 2, \
'degrees should be a tuple and it must be of length 2.'
self.degrees = degrees
if translate is not None:
assert isinstance(translate, tuple) and len(translate) == 2, \
'translate should be a tuple and it must be of length 2.'
for t in translate:
assert 0.0 <= t <= 1.0, ('translation values should be '
'between 0 and 1.')
self.translate = translate
if scale is not None:
assert isinstance(scale, tuple) and len(scale) == 2, \
'scale should be a tuple and it must be of length 2.'
for s in scale:
assert s > 0, 'scale values should be positive.'
self.scale = scale
if shear is not None:
if isinstance(shear, numbers.Number):
assert shear >= 0, ('If shear is a single number, '
'it must be positive.')
self.shear = (-shear, shear)
else:
assert isinstance(shear, tuple) and len(shear) == 2, \
'shear should be a tuple and it must be of length 2.'
# X-Axis and Y-Axis shear with (min, max)
self.shear = shear
else:
self.shear = shear
if flip_ratio is not None:
assert isinstance(flip_ratio,
float), 'flip_ratio should be a float.'
self.flip_ratio = flip_ratio
else:
self.flip_ratio = 0
@staticmethod
def _get_params(degrees, translate, scale_ranges, shears, flip_ratio,
img_size):
"""Get parameters for affine transformation.
Returns:
paras (tuple): Params to be passed to the affine transformation.
"""
angle = np.random.uniform(degrees[0], degrees[1])
if translate is not None:
max_dx = translate[0] * img_size[0]
max_dy = translate[1] * img_size[1]
translations = (np.round(np.random.uniform(-max_dx, max_dx)),
np.round(np.random.uniform(-max_dy, max_dy)))
else:
translations = (0, 0)
if scale_ranges is not None:
scale = (np.random.uniform(scale_ranges[0], scale_ranges[1]),
np.random.uniform(scale_ranges[0], scale_ranges[1]))
else:
scale = (1.0, 1.0)
if shears is not None:
shear = np.random.uniform(shears[0], shears[1])
else:
shear = 0.0
# Because `flip` is used as a multiplier in line 479 and 480,
# so -1 stands for flip and 1 stands for no flip. Thus `flip`
# should be an 'inverse' flag as the result of the comparison.
# See https://github.com/open-mmlab/mmediting/pull/799 for more detail
flip = (np.random.rand(2) > flip_ratio).astype(np.int32) * 2 - 1
return angle, translations, scale, shear, flip
@staticmethod
def _get_inverse_affine_matrix(center, angle, translate, scale, shear,
flip):
"""Helper method to compute inverse matrix for affine transformation.
As it is explained in PIL.Image.rotate, we need compute INVERSE of
affine transformation matrix: M = T * C * RSS * C^-1 where
T is translation matrix:
[1, 0, tx | 0, 1, ty | 0, 0, 1];
C is translation matrix to keep center:
[1, 0, cx | 0, 1, cy | 0, 0, 1];
RSS is rotation with scale and shear matrix.
It is different from the original function in torchvision.
1. The order are changed to flip -> scale -> rotation -> shear.
2. x and y have different scale factors.
RSS(shear, a, scale, f) =
[ cos(a + shear)*scale_x*f -sin(a + shear)*scale_y 0]
[ sin(a)*scale_x*f cos(a)*scale_y 0]
[ 0 0 1]
Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1.
"""
angle = math.radians(angle)
shear = math.radians(shear)
scale_x = 1.0 / scale[0] * flip[0]
scale_y = 1.0 / scale[1] * flip[1]
# Inverted rotation matrix with scale and shear
d = math.cos(angle + shear) * math.cos(angle) + math.sin(
angle + shear) * math.sin(angle)
matrix = [
math.cos(angle) * scale_x,
math.sin(angle + shear) * scale_x, 0, -math.sin(angle) * scale_y,
math.cos(angle + shear) * scale_y, 0
]
matrix = [m / d for m in matrix]
# Apply inverse of translation and of center translation:
# RSS^-1 * C^-1 * T^-1
matrix[2] += matrix[0] * (-center[0] - translate[0]) + matrix[1] * (
-center[1] - translate[1])
matrix[5] += matrix[3] * (-center[0] - translate[0]) + matrix[4] * (
-center[1] - translate[1])
# Apply center translation: C * RSS^-1 * C^-1 * T^-1
matrix[2] += center[0]
matrix[5] += center[1]
return matrix
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
h, w = results[self.keys[0]].shape[:2]
# if image is too small, set degree to 0 to reduce introduced dark area
if np.maximum(h, w) < 1024:
params = self._get_params((0, 0), self.translate, self.scale,
self.shear, self.flip_ratio, (h, w))
else:
params = self._get_params(self.degrees, self.translate, self.scale,
self.shear, self.flip_ratio, (h, w))
center = (w * 0.5 - 0.5, h * 0.5 - 0.5)
M = self._get_inverse_affine_matrix(center, *params)
M = np.array(M).reshape((2, 3))
for key in self.keys:
results[key] = cv2.warpAffine(
results[key],
M, (w, h),
flags=cv2.INTER_NEAREST + cv2.WARP_INVERSE_MAP)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, degrees={self.degrees}, '
f'translate={self.translate}, scale={self.scale}, '
f'shear={self.shear}, flip_ratio={self.flip_ratio})')
return repr_str
@PIPELINES.register_module()
class RandomJitter:
"""Randomly jitter the foreground in hsv space.
The jitter range of hue is adjustable while the jitter ranges of saturation
and value are adaptive to the images. Side effect: the "fg" image will be
converted to `np.float32`.
Required keys are "fg" and "alpha", modified key is "fg".
Args:
hue_range (float | tuple[float]): Range of hue jittering. If it is a
float instead of a tuple like (min, max), the range of hue
jittering will be (-hue_range, +hue_range). Default: 40.
"""
def __init__(self, hue_range=40):
if isinstance(hue_range, numbers.Number):
assert hue_range >= 0, ('If hue_range is a single number, '
'it must be positive.')
self.hue_range = (-hue_range, hue_range)
else:
assert isinstance(hue_range, tuple) and len(hue_range) == 2, \
'hue_range should be a tuple and it must be of length 2.'
self.hue_range = hue_range
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
fg, alpha = results['fg'], results['alpha']
# convert to HSV space;
# convert to float32 image to keep precision during space conversion.
fg = mmcv.bgr2hsv(fg.astype(np.float32) / 255)
# Hue noise
hue_jitter = np.random.randint(self.hue_range[0], self.hue_range[1])
fg[:, :, 0] = np.remainder(fg[:, :, 0] + hue_jitter, 360)
# Saturation noise
sat_mean = fg[:, :, 1][alpha > 0].mean()
# jitter saturation within range (1.1 - sat_mean) * [-0.1, 0.1]
sat_jitter = (1.1 - sat_mean) * (np.random.rand() * 0.2 - 0.1)
sat = fg[:, :, 1]
sat = np.abs(sat + sat_jitter)
sat[sat > 1] = 2 - sat[sat > 1]
fg[:, :, 1] = sat
# Value noise
val_mean = fg[:, :, 2][alpha > 0].mean()
# jitter value within range (1.1 - val_mean) * [-0.1, 0.1]
val_jitter = (1.1 - val_mean) * (np.random.rand() * 0.2 - 0.1)
val = fg[:, :, 2]
val = np.abs(val + val_jitter)
val[val > 1] = 2 - val[val > 1]
fg[:, :, 2] = val
# convert back to BGR space
fg = mmcv.hsv2bgr(fg)
results['fg'] = fg * 255
return results
def __repr__(self):
return self.__class__.__name__ + f'hue_range={self.hue_range}'
@PIPELINES.register_module()
class ColorJitter:
"""An interface for torch color jitter so that it can be invoked in
mmediting pipeline.
Randomly change the brightness, contrast and saturation of an image.
Modified keys are the attributes specified in "keys".
Args:
keys (list[str]): The images to be resized.
to_rgb (bool): Whether to convert channels from BGR to RGB.
Default: False.
"""
def __init__(self, keys, to_rgb=False, **kwargs):
assert keys, 'Keys should not be empty.'
self.keys = keys
self.to_rgb = to_rgb
self.transform = transforms.ColorJitter(**kwargs)
def __call__(self, results):
for k in self.keys:
if self.to_rgb:
results[k] = results[k][..., ::-1]
results[k] = Image.fromarray(results[k])
results[k] = self.transform(results[k])
results[k] = np.asarray(results[k])
results[k] = results[k][..., ::-1]
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, to_rgb={self.to_rgb})')
return repr_str
class BinarizeImage:
"""Binarize image.
Args:
keys (Sequence[str]): The images to be binarized.
binary_thr (float): Threshold for binarization.
to_int (bool): If True, return image as int32, otherwise
return image as float32.
"""
def __init__(self, keys, binary_thr, to_int=False):
self.keys = keys
self.binary_thr = binary_thr
self.to_int = to_int
def _binarize(self, img):
type_ = np.float32 if not self.to_int else np.int32
img = (img[..., :] > self.binary_thr).astype(type_)
return img
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for k in self.keys:
results[k] = self._binarize(results[k])
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, binary_thr={self.binary_thr}, '
f'to_int={self.to_int})')
return repr_str
@PIPELINES.register_module()
class RandomMaskDilation:
"""Randomly dilate binary masks.
Args:
keys (Sequence[str]): The images to be resized.
get_binary (bool): If True, according to binary_thr, reset final
output as binary mask. Otherwise, return masks directly.
binary_thr (float): Threshold for obtaining binary mask.
kernel_min (int): Min size of dilation kernel.
kernel_max (int): Max size of dilation kernel.
"""
def __init__(self, keys, binary_thr=0., kernel_min=9, kernel_max=49):
self.keys = keys
self.kernel_min = kernel_min
self.kernel_max = kernel_max
self.binary_thr = binary_thr
def _random_dilate(self, img):
kernel_size = np.random.randint(self.kernel_min, self.kernel_max + 1)
kernel = np.ones((kernel_size, kernel_size), dtype=np.uint8)
dilate_kernel_size = kernel_size
img_ = cv2.dilate(img, kernel, iterations=1)
img_ = (img_ > self.binary_thr).astype(np.float32)
return img_, dilate_kernel_size
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for k in self.keys:
results[k], d_kernel = self._random_dilate(results[k])
if len(results[k].shape) == 2:
results[k] = np.expand_dims(results[k], axis=2)
results[k + '_dilate_kernel_size'] = d_kernel
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, kernel_min={self.kernel_min}, '
f'kernel_max={self.kernel_max})')
return repr_str
@PIPELINES.register_module()
class RandomTransposeHW:
"""Randomly transpose images in H and W dimensions with a probability.
(TransposeHW = horizontal flip + anti-clockwise rotatation by 90 degrees)
When used with horizontal/vertical flips, it serves as a way of rotation
augmentation.
It also supports randomly transposing a list of images.
Required keys are the keys in attributes "keys", added or modified keys are
"transpose" and the keys in attributes "keys".
Args:
keys (list[str]): The images to be transposed.
transpose_ratio (float): The propability to transpose the images.
"""
def __init__(self, keys, transpose_ratio=0.5):
self.keys = keys
self.transpose_ratio = transpose_ratio
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
transpose = np.random.random() < self.transpose_ratio
if transpose:
for key in self.keys:
if isinstance(results[key], list):
results[key] = [v.transpose(1, 0, 2) for v in results[key]]
else:
results[key] = results[key].transpose(1, 0, 2)
results['transpose'] = transpose
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (
f'(keys={self.keys}, transpose_ratio={self.transpose_ratio})')
return repr_str
@PIPELINES.register_module()
class GenerateFrameIndiceswithPadding:
"""Generate frame index with padding for REDS dataset and Vid4 dataset
during testing.
Required keys: lq_path, gt_path, key, num_input_frames, max_frame_num
Added or modified keys: lq_path, gt_path
Args:
padding (str): padding mode, one of
'replicate' | 'reflection' | 'reflection_circle' | 'circle'.
Examples: current_idx = 0, num_input_frames = 5
The generated frame indices under different padding mode:
replicate: [0, 0, 0, 1, 2]
reflection: [2, 1, 0, 1, 2]
reflection_circle: [4, 3, 0, 1, 2]
circle: [3, 4, 0, 1, 2]
filename_tmpl (str): Template for file name. Default: '{:08d}'.
"""
def __init__(self, padding, filename_tmpl='{:08d}'):
if padding not in ('replicate', 'reflection', 'reflection_circle',
'circle'):
raise ValueError(f'Wrong padding mode {padding}.'
'Should be "replicate", "reflection", '
'"reflection_circle", "circle"')
self.padding = padding
self.filename_tmpl = filename_tmpl
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
clip_name, frame_name = results['key'].split(os.sep)
current_idx = int(frame_name)
max_frame_num = results['max_frame_num'] - 1 # start from 0
num_input_frames = results['num_input_frames']
num_pad = num_input_frames // 2
frame_list = []
for i in range(current_idx - num_pad, current_idx + num_pad + 1):
if i < 0:
if self.padding == 'replicate':
pad_idx = 0
elif self.padding == 'reflection':
pad_idx = -i
elif self.padding == 'reflection_circle':
pad_idx = current_idx + num_pad - i
else:
pad_idx = num_input_frames + i
elif i > max_frame_num:
if self.padding == 'replicate':
pad_idx = max_frame_num
elif self.padding == 'reflection':
pad_idx = max_frame_num * 2 - i
elif self.padding == 'reflection_circle':
pad_idx = (current_idx - num_pad) - (i - max_frame_num)
else:
pad_idx = i - num_input_frames
else:
pad_idx = i
frame_list.append(pad_idx)
lq_path_root = results['lq_path']
gt_path_root = results['gt_path']
lq_paths = [
osp.join(lq_path_root, clip_name,
f'{self.filename_tmpl.format(idx)}.png')
for idx in frame_list
]
gt_paths = [osp.join(gt_path_root, clip_name, f'{frame_name}.png')]
results['lq_path'] = lq_paths
results['gt_path'] = gt_paths
return results
def __repr__(self):
repr_str = self.__class__.__name__ + f"(padding='{self.padding}')"
return repr_str
@PIPELINES.register_module()
class GenerateFrameIndices:
"""Generate frame index for REDS datasets. It also performs
temporal augmention with random interval.
Required keys: lq_path, gt_path, key, num_input_frames
Added or modified keys: lq_path, gt_path, interval, reverse
Args:
interval_list (list[int]): Interval list for temporal augmentation.
It will randomly pick an interval from interval_list and sample
frame index with the interval.
frames_per_clip(int): Number of frames per clips. Default: 99 for
REDS dataset.
"""
def __init__(self, interval_list, frames_per_clip=99):
self.interval_list = interval_list
self.frames_per_clip = frames_per_clip
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
clip_name, frame_name = results['key'].split(
os.sep) # key example: 000/00000000
center_frame_idx = int(frame_name)
num_half_frames = results['num_input_frames'] // 2
max_frame_num = results.get('max_frame_num', self.frames_per_clip + 1)
frames_per_clip = min(self.frames_per_clip, max_frame_num - 1)
interval = np.random.choice(self.interval_list)
# ensure not exceeding the borders
start_frame_idx = center_frame_idx - num_half_frames * interval
end_frame_idx = center_frame_idx + num_half_frames * interval
while (start_frame_idx < 0) or (end_frame_idx > frames_per_clip):
center_frame_idx = np.random.randint(0, frames_per_clip + 1)
start_frame_idx = center_frame_idx - num_half_frames * interval
end_frame_idx = center_frame_idx + num_half_frames * interval
frame_name = f'{center_frame_idx:08d}'
neighbor_list = list(
range(center_frame_idx - num_half_frames * interval,
center_frame_idx + num_half_frames * interval + 1, interval))
lq_path_root = results['lq_path']
gt_path_root = results['gt_path']
lq_path = [
osp.join(lq_path_root, clip_name, f'{v:08d}.png')
for v in neighbor_list
]
gt_path = [osp.join(gt_path_root, clip_name, f'{frame_name}.png')]
results['lq_path'] = lq_path
results['gt_path'] = gt_path
results['interval'] = interval
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(interval_list={self.interval_list}, '
f'frames_per_clip={self.frames_per_clip})')
return repr_str
@PIPELINES.register_module()
class TemporalReverse:
"""Reverse frame lists for temporal augmentation.
Required keys are the keys in attributes "lq" and "gt",
added or modified keys are "lq", "gt" and "reverse".
Args:
keys (list[str]): The frame lists to be reversed.
reverse_ratio (float): The propability to reverse the frame lists.
Default: 0.5.
"""
def __init__(self, keys, reverse_ratio=0.5):
self.keys = keys
self.reverse_ratio = reverse_ratio
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
reverse = np.random.random() < self.reverse_ratio
if reverse:
for key in self.keys:
results[key].reverse()
results['reverse'] = reverse
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(keys={self.keys}, reverse_ratio={self.reverse_ratio})'
return repr_str
@PIPELINES.register_module()
class GenerateSegmentIndices:
"""Generate frame indices for a segment. It also performs temporal
augmention with random interval.
Required keys: lq_path, gt_path, key, num_input_frames, sequence_length
Added or modified keys: lq_path, gt_path, interval, reverse
Args:
interval_list (list[int]): Interval list for temporal augmentation.
It will randomly pick an interval from interval_list and sample
frame index with the interval.
start_idx (int): The index corresponds to the first frame in the
sequence. Default: 0.
filename_tmpl (str): Template for file name. Default: '{:08d}.png'.
"""
def __init__(self, interval_list, start_idx=0, filename_tmpl='{:08d}.png'):
self.interval_list = interval_list
self.filename_tmpl = filename_tmpl
self.start_idx = start_idx
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
# key example: '000', 'calendar' (sequence name)
clip_name = results['key']
interval = np.random.choice(self.interval_list)
self.sequence_length = results['sequence_length']
num_input_frames = results.get('num_input_frames',
self.sequence_length)
# randomly select a frame as start
if self.sequence_length - num_input_frames * interval < 0:
raise ValueError('The input sequence is not long enough to '
'support the current choice of [interval] or '
'[num_input_frames].')
start_frame_idx = np.random.randint(
0, self.sequence_length - num_input_frames * interval + 1)
end_frame_idx = start_frame_idx + num_input_frames * interval
neighbor_list = list(range(start_frame_idx, end_frame_idx, interval))
neighbor_list = [v + self.start_idx for v in neighbor_list]
# add the corresponding file paths
lq_path_root = results['lq_path']
gt_path_root = results['gt_path']
lq_path = [
osp.join(lq_path_root, clip_name, self.filename_tmpl.format(v))
for v in neighbor_list
]
gt_path = [
osp.join(gt_path_root, clip_name, self.filename_tmpl.format(v))
for v in neighbor_list
]
results['lq_path'] = lq_path
results['gt_path'] = gt_path
results['interval'] = interval
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(interval_list={self.interval_list})')
return repr_str
@PIPELINES.register_module()
class MirrorSequence:
"""Extend short sequences (e.g. Vimeo-90K) by mirroring the sequences
Given a sequence with N frames (x1, ..., xN), extend the sequence to
(x1, ..., xN, xN, ..., x1).
Args:
keys (list[str]): The frame lists to be extended.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for key in self.keys:
if isinstance(results[key], list):
results[key] = results[key] + results[key][::-1]
else:
raise TypeError('The input must be of class list[nparray]. '
f'Got {type(results[key])}.')
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys})')
return repr_str
@PIPELINES.register_module()
class CopyValues:
"""Copy the value of a source key to a destination key.
It does the following: results[dst_key] = results[src_key] for
(src_key, dst_key) in zip(src_keys, dst_keys).
Added keys are the keys in the attribute "dst_keys".
Args:
src_keys (list[str]): The source keys.
dst_keys (list[str]): The destination keys.
"""
def __init__(self, src_keys, dst_keys):
if not isinstance(src_keys, list) or not isinstance(dst_keys, list):
raise AssertionError('"src_keys" and "dst_keys" must be lists.')
if len(src_keys) != len(dst_keys):
raise ValueError('"src_keys" and "dst_keys" should have the same'
'number of elements.')
self.src_keys = src_keys
self.dst_keys = dst_keys
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict with a key added/modified.
"""
for (src_key, dst_key) in zip(self.src_keys, self.dst_keys):
results[dst_key] = copy.deepcopy(results[src_key])
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(src_keys={self.src_keys})')
repr_str += (f'(dst_keys={self.dst_keys})')
return repr_str
@PIPELINES.register_module()
class Quantize:
"""Quantize and clip the image to [0, 1].
It is assumed that the the input has range [0, 1].
Modified keys are the attributes specified in "keys".
Args:
keys (list[str]): The keys whose values are clipped.
"""
def __init__(self, keys):
self.keys = keys
def _quantize_clip(self, input_):
is_single_image = False
if isinstance(input_, np.ndarray):
is_single_image = True
input_ = [input_]
# quantize and clip
input_ = [np.clip((v * 255.0).round(), 0, 255) / 255. for v in input_]
if is_single_image:
input_ = input_[0]
return input_
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict with the values of the specified keys are rounded
and clipped.
"""
for key in self.keys:
results[key] = self._quantize_clip(results[key])
return results
def __repr__(self):
return self.__class__.__name__
@PIPELINES.register_module()
class UnsharpMasking:
"""Apply unsharp masking to an image or a sequence of images.
Args:
kernel_size (int): The kernel_size of the Gaussian kernel.
sigma (float): The standard deviation of the Gaussian.
weight (float): The weight of the "details" in the final output.
threshold (float): Pixel differences larger than this value are
regarded as "details".
keys (list[str]): The keys whose values are processed.
Added keys are "xxx_unsharp", where "xxx" are the attributes specified
in "keys".
"""
def __init__(self, kernel_size, sigma, weight, threshold, keys):
if kernel_size % 2 == 0:
raise ValueError('kernel_size must be an odd number, but '
f'got {kernel_size}.')
self.kernel_size = kernel_size
self.sigma = sigma
self.weight = weight
self.threshold = threshold
self.keys = keys
kernel = cv2.getGaussianKernel(kernel_size, sigma)
self.kernel = np.matmul(kernel, kernel.transpose())
def _unsharp_masking(self, imgs):
is_single_image = False
if isinstance(imgs, np.ndarray):
is_single_image = True
imgs = [imgs]
outputs = []
for img in imgs:
residue = img - cv2.filter2D(img, -1, self.kernel)
mask = np.float32(np.abs(residue) * 255 > self.threshold)
soft_mask = cv2.filter2D(mask, -1, self.kernel)
sharpened = np.clip(img + self.weight * residue, 0, 1)
outputs.append(soft_mask * sharpened + (1 - soft_mask) * img)
if is_single_image:
outputs = outputs[0]
return outputs
def __call__(self, results):
for key in self.keys:
results[f'{key}_unsharp'] = self._unsharp_masking(results[key])
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, kernel_size={self.kernel_size}, '
f'sigma={self.sigma}, weight={self.weight}, '
f'threshold={self.threshold})')
return repr_str
| 46,436 | 35.081585 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import logging
import numpy as np
import torch
from mmcv.utils import print_log
_integer_types = (
np.byte,
np.ubyte, # 8 bits
np.short,
np.ushort, # 16 bits
np.intc,
np.uintc, # 16 or 32 or 64 bits
np.int_,
np.uint, # 32 or 64 bits
np.longlong,
np.ulonglong) # 64 bits
_integer_ranges = {
t: (np.iinfo(t).min, np.iinfo(t).max)
for t in _integer_types
}
dtype_range = {
np.bool_: (False, True),
np.bool8: (False, True),
np.float16: (-1, 1),
np.float32: (-1, 1),
np.float64: (-1, 1)
}
dtype_range.update(_integer_ranges)
def dtype_limits(image, clip_negative=False):
"""Return intensity limits, i.e. (min, max) tuple, of the image's dtype.
This function is adopted from skimage:
https://github.com/scikit-image/scikit-image/blob/
7e4840bd9439d1dfb6beaf549998452c99f97fdd/skimage/util/dtype.py#L35
Args:
image (ndarray): Input image.
clip_negative (bool, optional): If True, clip the negative range
(i.e. return 0 for min intensity) even if the image dtype allows
negative values.
Returns
tuple: Lower and upper intensity limits.
"""
imin, imax = dtype_range[image.dtype.type]
if clip_negative:
imin = 0
return imin, imax
def adjust_gamma(image, gamma=1, gain=1):
"""Performs Gamma Correction on the input image.
This function is adopted from skimage:
https://github.com/scikit-image/scikit-image/blob/
7e4840bd9439d1dfb6beaf549998452c99f97fdd/skimage/exposure/
exposure.py#L439-L494
Also known as Power Law Transform.
This function transforms the input image pixelwise according to the
equation ``O = I**gamma`` after scaling each pixel to the range 0 to 1.
Args:
image (ndarray): Input image.
gamma (float, optional): Non negative real number. Defaults to 1.
gain (float, optional): The constant multiplier. Defaults to 1.
Returns:
ndarray: Gamma corrected output image.
"""
if np.any(image < 0):
raise ValueError('Image Correction methods work correctly only on '
'images with non-negative values. Use '
'skimage.exposure.rescale_intensity.')
dtype = image.dtype.type
if gamma < 0:
raise ValueError('Gamma should be a non-negative real number.')
scale = float(dtype_limits(image, True)[1] - dtype_limits(image, True)[0])
out = ((image / scale)**gamma) * scale * gain
return out.astype(dtype)
def random_choose_unknown(unknown, crop_size):
"""Randomly choose an unknown start (top-left) point for a given crop_size.
Args:
unknown (np.ndarray): The binary unknown mask.
crop_size (tuple[int]): The given crop size.
Returns:
tuple[int]: The top-left point of the chosen bbox.
"""
h, w = unknown.shape
crop_h, crop_w = crop_size
delta_h = center_h = crop_h // 2
delta_w = center_w = crop_w // 2
# mask out the validate area for selecting the cropping center
mask = np.zeros_like(unknown)
mask[delta_h:h - delta_h, delta_w:w - delta_w] = 1
if np.any(unknown & mask):
center_h_list, center_w_list = np.where(unknown & mask)
elif np.any(unknown):
center_h_list, center_w_list = np.where(unknown)
else:
print_log('No unknown pixels found!', level=logging.WARNING)
center_h_list = [center_h]
center_w_list = [center_w]
num_unknowns = len(center_h_list)
rand_ind = np.random.randint(num_unknowns)
center_h = center_h_list[rand_ind]
center_w = center_w_list[rand_ind]
# make sure the top-left point is valid
top = np.clip(center_h - delta_h, 0, h - crop_h)
left = np.clip(center_w - delta_w, 0, w - crop_w)
return top, left
def make_coord(shape, ranges=None, flatten=True):
""" Make coordinates at grid centers.
Args:
shape (tuple): shape of image.
ranges (tuple): range of coordinate value. Default: None.
flatten (bool): flatten to (n, 2) or Not. Default: True.
return:
coord (Tensor): coordinates.
"""
coord_seqs = []
for i, n in enumerate(shape):
if ranges is None:
v0, v1 = -1, 1
else:
v0, v1 = ranges[i]
r = (v1 - v0) / (2 * n)
seq = v0 + r + (2 * r) * torch.arange(n).float()
coord_seqs.append(seq)
coord = torch.stack(torch.meshgrid(*coord_seqs), dim=-1)
if flatten:
coord = coord.view(-1, coord.shape[-1])
return coord
| 4,623 | 28.832258 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/random_down_sampling.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import numpy as np
import torch
from mmcv import imresize
from ..registry import PIPELINES
@PIPELINES.register_module()
class RandomDownSampling:
"""Generate LQ image from GT (and crop), which will randomly pick a scale.
Args:
scale_min (float): The minimum of upsampling scale, inclusive.
Default: 1.0.
scale_max (float): The maximum of upsampling scale, exclusive.
Default: 4.0.
patch_size (int): The cropped lr patch size.
Default: None, means no crop.
interpolation (str): Interpolation method, accepted values are
"nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2'
backend, "nearest", "bilinear", "bicubic", "box", "lanczos",
"hamming" for 'pillow' backend.
Default: "bicubic".
backend (str | None): The image resize backend type. Options are `cv2`,
`pillow`, `None`. If backend is None, the global imread_backend
specified by ``mmcv.use_backend()`` will be used.
Default: "pillow".
Scale will be picked in the range of [scale_min, scale_max).
"""
def __init__(self,
scale_min=1.0,
scale_max=4.0,
patch_size=None,
interpolation='bicubic',
backend='pillow'):
assert scale_max >= scale_min
self.scale_min = scale_min
self.scale_max = scale_max
self.patch_size = patch_size
self.interpolation = interpolation
self.backend = backend
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation. 'gt' is required.
Returns:
dict: A dict containing the processed data and information.
modified 'gt', supplement 'lq' and 'scale' to keys.
"""
img = results['gt']
scale = np.random.uniform(self.scale_min, self.scale_max)
if self.patch_size is None:
h_lr = math.floor(img.shape[-3] / scale + 1e-9)
w_lr = math.floor(img.shape[-2] / scale + 1e-9)
img = img[:round(h_lr * scale), :round(w_lr * scale), :]
img_down = resize_fn(img, (w_lr, h_lr), self.interpolation,
self.backend)
crop_lr, crop_hr = img_down, img
else:
w_lr = self.patch_size
w_hr = round(w_lr * scale)
x0 = np.random.randint(0, img.shape[-3] - w_hr)
y0 = np.random.randint(0, img.shape[-2] - w_hr)
crop_hr = img[x0:x0 + w_hr, y0:y0 + w_hr, :]
crop_lr = resize_fn(crop_hr, w_lr, self.interpolation,
self.backend)
results['gt'] = crop_hr
results['lq'] = crop_lr
results['scale'] = scale
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f' scale_min={self.scale_min}, '
f'scale_max={self.scale_max}, '
f'patch_size={self.patch_size}, '
f'interpolation={self.interpolation}, '
f'backend={self.backend}')
return repr_str
def resize_fn(img, size, interpolation='bicubic', backend='pillow'):
"""Resize the given image to a given size.
Args:
img (ndarray | torch.Tensor): The input image.
size (int | tuple[int]): Target size w or (w, h).
interpolation (str): Interpolation method, accepted values are
"nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2'
backend, "nearest", "bilinear", "bicubic", "box", "lanczos",
"hamming" for 'pillow' backend.
Default: "bicubic".
backend (str | None): The image resize backend type. Options are `cv2`,
`pillow`, `None`. If backend is None, the global imread_backend
specified by ``mmcv.use_backend()`` will be used.
Default: "pillow".
Returns:
ndarray | torch.Tensor: `resized_img`, whose type is same as `img`.
"""
if isinstance(size, int):
size = (size, size)
if isinstance(img, np.ndarray):
return imresize(
img, size, interpolation=interpolation, backend=backend)
elif isinstance(img, torch.Tensor):
image = imresize(
img.numpy(), size, interpolation=interpolation, backend=backend)
return torch.from_numpy(image)
else:
raise TypeError('img should got np.ndarray or torch.Tensor,'
f'but got {type(img)}')
| 4,735 | 36.587302 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/formating.py | # Copyright (c) OpenMMLab. All rights reserved.
from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from torch.nn import functional as F
from ..registry import PIPELINES
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
"""
if isinstance(data, torch.Tensor):
return data
if isinstance(data, np.ndarray):
return torch.from_numpy(data)
if isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
if isinstance(data, int):
return torch.LongTensor([data])
if isinstance(data, float):
return torch.FloatTensor([data])
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
@PIPELINES.register_module()
class ToTensor:
"""Convert some values in results dict to `torch.Tensor` type
in data loader pipeline.
Args:
keys (Sequence[str]): Required keys to be converted.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class ImageToTensor:
"""Convert image type to `torch.Tensor` type.
Args:
keys (Sequence[str]): Required keys to be converted.
to_float32 (bool): Whether convert numpy image array to np.float32
before converted to tensor. Default: True.
"""
def __init__(self, keys, to_float32=True):
self.keys = keys
self.to_float32 = to_float32
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for key in self.keys:
# deal with gray scale img: expand a color channel
if len(results[key].shape) == 2:
results[key] = results[key][..., None]
if self.to_float32 and not isinstance(results[key], np.float32):
results[key] = results[key].astype(np.float32)
results[key] = to_tensor(results[key].transpose(2, 0, 1))
return results
def __repr__(self):
return self.__class__.__name__ + (
f'(keys={self.keys}, to_float32={self.to_float32})')
@PIPELINES.register_module()
class FramesToTensor(ImageToTensor):
"""Convert frames type to `torch.Tensor` type.
It accepts a list of frames, converts each to `torch.Tensor` type and then
concatenates in a new dimension (dim=0).
Args:
keys (Sequence[str]): Required keys to be converted.
to_float32 (bool): Whether convert numpy image array to np.float32
before converted to tensor. Default: True.
"""
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for key in self.keys:
if not isinstance(results[key], list):
raise TypeError(f'results["{key}"] should be a list, '
f'but got {type(results[key])}')
for idx, v in enumerate(results[key]):
# deal with gray scale img: expand a color channel
if len(v.shape) == 2:
v = v[..., None]
if self.to_float32 and not isinstance(v, np.float32):
v = v.astype(np.float32)
results[key][idx] = to_tensor(v.transpose(2, 0, 1))
results[key] = torch.stack(results[key], dim=0)
if results[key].size(0) == 1:
results[key].squeeze_()
return results
@PIPELINES.register_module()
class GetMaskedImage:
"""Get masked image.
Args:
img_name (str): Key for clean image.
mask_name (str): Key for mask image. The mask shape should be
(h, w, 1) while '1' indicate holes and '0' indicate valid
regions.
"""
def __init__(self, img_name='gt_img', mask_name='mask'):
self.img_name = img_name
self.mask_name = mask_name
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
clean_img = results[self.img_name]
mask = results[self.mask_name]
masked_img = clean_img * (1. - mask)
results['masked_img'] = masked_img
return results
def __repr__(self):
return self.__class__.__name__ + (
f"(img_name='{self.img_name}', mask_name='{self.mask_name}')")
@PIPELINES.register_module()
class FormatTrimap:
"""Convert trimap (tensor) to one-hot representation.
It transforms the trimap label from (0, 128, 255) to (0, 1, 2). If
``to_onehot`` is set to True, the trimap will convert to one-hot tensor of
shape (3, H, W). Required key is "trimap", added or modified key are
"trimap" and "to_onehot".
Args:
to_onehot (bool): whether convert trimap to one-hot tensor. Default:
``False``.
"""
def __init__(self, to_onehot=False):
self.to_onehot = to_onehot
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
trimap = results['trimap'].squeeze()
trimap[trimap == 128] = 1
trimap[trimap == 255] = 2
if self.to_onehot:
trimap = F.one_hot(trimap.to(torch.long), num_classes=3)
trimap = trimap.permute(2, 0, 1)
else:
trimap = trimap[None, ...] # expand the channels dimension
results['trimap'] = trimap.float()
results['meta'].data['to_onehot'] = self.to_onehot
return results
def __repr__(self):
return self.__class__.__name__ + f'(to_onehot={self.to_onehot})'
@PIPELINES.register_module()
class Collect:
"""Collect data from the loader relevant to the specific task.
This is usually the last stage of the data loader pipeline. Typically keys
is set to some subset of "img", "gt_labels".
The "img_meta" item is always populated. The contents of the "meta"
dictionary depends on "meta_keys".
Args:
keys (Sequence[str]): Required keys to be collected.
meta_keys (Sequence[str]): Required keys to be collected to "meta".
Default: None.
"""
def __init__(self, keys, meta_keys=None):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
data = {}
img_meta = {}
for key in self.meta_keys:
img_meta[key] = results[key]
data['meta'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return self.__class__.__name__ + (
f'(keys={self.keys}, meta_keys={self.meta_keys})')
| 8,262 | 30.299242 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/generate_assistant.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from ..registry import PIPELINES
from .utils import make_coord
@PIPELINES.register_module()
class GenerateHeatmap:
"""Generate heatmap from keypoint.
Args:
keypoint (str): Key of keypoint in dict.
ori_size (int | Tuple[int]): Original image size of keypoint.
target_size (int | Tuple[int]): Target size of heatmap.
sigma (float): Sigma parameter of heatmap. Default: 1.0
"""
def __init__(self, keypoint, ori_size, target_size, sigma=1.0):
if isinstance(ori_size, int):
ori_size = (ori_size, ori_size)
else:
ori_size = ori_size[:2]
if isinstance(target_size, int):
target_size = (target_size, target_size)
else:
target_size = target_size[:2]
self.size_ratio = (target_size[0] / ori_size[0],
target_size[1] / ori_size[1])
self.keypoint = keypoint
self.sigma = sigma
self.target_size = target_size
self.ori_size = ori_size
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation. Require keypoint.
Returns:
dict: A dict containing the processed data and information.
Add 'heatmap'.
"""
keypoint_list = [(keypoint[0] * self.size_ratio[0],
keypoint[1] * self.size_ratio[1])
for keypoint in results[self.keypoint]]
heatmap_list = [
self._generate_one_heatmap(keypoint) for keypoint in keypoint_list
]
results['heatmap'] = np.stack(heatmap_list, axis=2)
return results
def _generate_one_heatmap(self, keypoint):
"""Generate One Heatmap.
Args:
landmark (Tuple[float]): Location of a landmark.
results:
heatmap (np.ndarray): A heatmap of landmark.
"""
w, h = self.target_size
x_range = np.arange(start=0, stop=w, dtype=int)
y_range = np.arange(start=0, stop=h, dtype=int)
grid_x, grid_y = np.meshgrid(x_range, y_range)
dist2 = (grid_x - keypoint[0])**2 + (grid_y - keypoint[1])**2
exponent = dist2 / 2.0 / self.sigma / self.sigma
heatmap = np.exp(-exponent)
return heatmap
def __repr__(self):
return (f'{self.__class__.__name__}, '
f'keypoint={self.keypoint}, '
f'ori_size={self.ori_size}, '
f'target_size={self.target_size}, '
f'sigma={self.sigma}')
@PIPELINES.register_module()
class GenerateCoordinateAndCell:
"""Generate coordinate and cell.
Generate coordinate from the desired size of SR image.
Train or val:
1. Generate coordinate from GT.
2. Reshape GT image to (HgWg, 3) and transpose to (3, HgWg).
where `Hg` and `Wg` represent the height and width of GT.
Test:
Generate coordinate from LQ and scale or target_size.
Then generate cell from coordinate.
Args:
sample_quantity (int): The quantity of samples in coordinates.
To ensure that the GT tensors in a batch have the same dimensions.
Default: None.
scale (float): Scale of upsampling.
Default: None.
target_size (tuple[int]): Size of target image.
Default: None.
The priority of getting 'size of target image' is:
1, results['gt'].shape[-2:]
2, results['lq'].shape[-2:] * scale
3, target_size
"""
def __init__(self, sample_quantity=None, scale=None, target_size=None):
self.sample_quantity = sample_quantity
self.scale = scale
self.target_size = target_size
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Require either in results:
1. 'lq' (tensor), whose shape is similar as (3, H, W).
2. 'gt' (tensor), whose shape is similar as (3, H, W).
3. None, the premise is
self.target_size and len(self.target_size) >= 2.
Returns:
dict: A dict containing the processed data and information.
Reshape 'gt' to (-1, 3) and transpose to (3, -1) if 'gt'
in results.
Add 'coord' and 'cell'.
"""
# generate hr_coord (and hr_rgb)
if 'gt' in results:
crop_hr = results['gt']
self.target_size = crop_hr.shape
hr_rgb = crop_hr.contiguous().view(3, -1).permute(1, 0)
results['gt'] = hr_rgb
elif self.scale is not None and 'lq' in results:
_, h_lr, w_lr = results['lq'].shape
self.target_size = (round(h_lr * self.scale),
round(w_lr * self.scale))
else:
assert self.target_size is not None
assert len(self.target_size) >= 2
hr_coord = make_coord(self.target_size[-2:])
if self.sample_quantity is not None and 'gt' in results:
sample_lst = np.random.choice(
len(hr_coord), self.sample_quantity, replace=False)
hr_coord = hr_coord[sample_lst]
results['gt'] = results['gt'][sample_lst]
# Preparations for cell decoding
cell = torch.ones_like(hr_coord)
cell[:, 0] *= 2 / self.target_size[-2]
cell[:, 1] *= 2 / self.target_size[-1]
results['coord'] = hr_coord
results['cell'] = cell
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'sample_quantity={self.sample_quantity}, '
f'scale={self.scale}, target_size={self.target_size}')
return repr_str
| 6,056 | 34.629412 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/utils/setup_env.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import platform
import warnings
import cv2
import torch.multiprocessing as mp
def setup_multi_processes(cfg):
"""Setup multi-processing environment variables."""
# set multi-process start method as `fork` to speed up the training
if platform.system() != 'Windows':
mp_start_method = cfg.get('mp_start_method', 'fork')
current_method = mp.get_start_method(allow_none=True)
if current_method is not None and current_method != mp_start_method:
warnings.warn(
f'Multi-processing start method `{mp_start_method}` is '
f'different from the previous setting `{current_method}`.'
f'It will be force set to `{mp_start_method}`. You can change '
f'this behavior by changing `mp_start_method` in your config.')
mp.set_start_method(mp_start_method, force=True)
# disable opencv multithreading to avoid system being overloaded
opencv_num_threads = cfg.get('opencv_num_threads', 0)
cv2.setNumThreads(opencv_num_threads)
# setup OMP threads
# This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa
if 'OMP_NUM_THREADS' not in os.environ and cfg.data.workers_per_gpu > 1:
omp_num_threads = 1
warnings.warn(
f'Setting OMP_NUM_THREADS environment variable for each process '
f'to be {omp_num_threads} in default, to avoid your system being '
f'overloaded, please further tune the variable for optimal '
f'performance in your application as needed.')
os.environ['OMP_NUM_THREADS'] = str(omp_num_threads)
# setup MKL threads
if 'MKL_NUM_THREADS' not in os.environ and cfg.data.workers_per_gpu > 1:
mkl_num_threads = 1
warnings.warn(
f'Setting MKL_NUM_THREADS environment variable for each process '
f'to be {mkl_num_threads} in default, to avoid your system being '
f'overloaded, please further tune the variable for optimal '
f'performance in your application as needed.')
os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads)
| 2,219 | 45.25 | 112 | py |
SLOPpy | SLOPpy-main/docs/conf.py | # -*- coding: utf-8 -*-
#
# PyORBIT documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 16 17:20:15 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import matplotlib
matplotlib.use('agg')
sys.path.insert(0, os.path.abspath('../'))
import SLOPpy
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['myst_parser',
"sphinx.ext.autosummary",
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
"sphinx.ext.viewcode",
'sphinx.ext.napoleon']
myst_enable_extensions = ["dollarmath", "colon_fence"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
#source_suffix = ['.rst', '.md']
source_suffix = {
".rst": "restructuredtext",
".ipynb": "myst-nb",
}
#source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'SLOPpy'
copyright = u'2018-2022, Daniela Sicilia, Luca Malavolta'
author = u'Daniela Sicilia, Luca Malavolta'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join(SLOPpy.__version__.split('.')[:-1])
# The full version, including alpha/beta/rc tags.
release = SLOPpy.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_book_theme'
#html_logo = "_static/PyORBIT_logo_transp.png"
html_title = "SLOPpy"
html_copy_source = True
html_show_sourcelink = True
html_sourcelink_suffix = ""
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
"path_to_docs": "docs",
"repository_url": "https://github.com/LucaMalavolta/SLOPpy",
"repository_branch": "main",
"use_repository_button": True,
"use_issues_button": True,
#"home_page_in_toc": True,
"show_navbar_depth": 3,
"logo_only": True,
#"show_related": True
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
#html_sidebars = {
# '**': ['sidebar-logo.html','search-field.html',
# 'globaltoc.html',
# 'relations.html', # needs 'show_related': True theme option to display
## #'searchbox.html',
# ]
#}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'SLOPpy_doc'
| 4,554 | 29.57047 | 80 | py |
EPSANet | EPSANet-master/main.py | import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from loss import CrossEntropyLabelSmooth
import models
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--arch', '-a', metavar='ARCH', default='epsanet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: epsanet50)')
parser.add_argument('--data', metavar='DIR',default='/path/dataset',
help='path to dataset')
parser.add_argument('-j', '--workers', default=10, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=120, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=100, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', default=False, dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='gloo', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int, nargs='+',
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--action', default='', type=str,
help='other information.')
best_prec1 = 0
best_prec5 = 0
best_epoch = 0
def main():
global args, best_prec1
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
args.distributed = args.world_size > 1
if args.distributed:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if args.gpu is not None:
model = model.cuda(args.gpu)
elif args.distributed:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
else:
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
print(model)
# get the number of models parameters
print('Number of models parameters: {}'.format(
sum([p.data.nelement() for p in model.parameters()])))
# define loss function (criterion) and optimizer
criterion = CrossEntropyLabelSmooth(num_classes=1000, epsilon=0.1)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
del checkpoint
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
m = time.time()
_, _ = validate(val_loader, model, criterion)
n = time.time()
print((n - m) / 3600)
return
directory = "runs/%s/" % (args.arch + '_' + args.action)
if not os.path.exists(directory):
os.makedirs(directory)
Loss_plot = {}
train_prec1_plot = {}
train_prec5_plot = {}
val_prec1_plot = {}
val_prec5_plot = {}
for epoch in range(args.start_epoch, args.epochs):
start_time = time.time()
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch)
# train for one epoch
# train(train_loader, model, criterion, optimizer, epoch)
loss_temp, train_prec1_temp, train_prec5_temp = train(train_loader, model, criterion, optimizer, epoch)
Loss_plot[epoch] = loss_temp
train_prec1_plot[epoch] = train_prec1_temp
train_prec5_plot[epoch] = train_prec5_temp
# evaluate on validation set
# prec1 = validate(val_loader, model, criterion)
prec1, prec5 = validate(val_loader, model, criterion)
val_prec1_plot[epoch] = prec1
val_prec5_plot[epoch] = prec5
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
}, is_best)
if is_best:
best_epoch = epoch + 1
best_prec5 = prec5
print(' * BestPrec so far@1 {top1:.3f} @5 {top5:.3f} in epoch {best_epoch}'.format(top1=best_prec1,
top5=best_prec5,
best_epoch=best_epoch))
data_save(directory + 'Loss_plot.txt', Loss_plot)
data_save(directory + 'train_prec1.txt', train_prec1_plot)
data_save(directory + 'train_prec5.txt', train_prec5_plot)
data_save(directory + 'val_prec1.txt', val_prec1_plot)
data_save(directory + 'val_prec5.txt', val_prec5_plot)
end_time = time.time()
time_value = (end_time - start_time) / 3600
print("-" * 80)
print(time_value)
print("-" * 80)
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
losses_batch = {}
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
return losses.avg, top1.avg, top5.avg
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, top5.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
directory = "runs/%s/" % (args.arch + '_' + args.action)
filename = directory + filename
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, directory + 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def data_save(root, file):
if not os.path.exists(root):
os.mknod(root)
file_temp = open(root, 'r')
lines = file_temp.readlines()
if not lines:
epoch = -1
else:
epoch = lines[-1][:lines[-1].index(' ')]
epoch = int(epoch)
file_temp.close()
file_temp = open(root, 'a')
for line in file:
if line > epoch:
file_temp.write(str(line) + " " + str(file[line]) + '\n')
file_temp.close()
if __name__ == '__main__':
main()
| 15,032 | 34.707838 | 114 | py |
EPSANet | EPSANet-master/loss.py | import torch
import numpy as np
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
class CrossEntropyLabelSmooth(nn.Module):
"""Cross entropy loss with label smoothing regularizer.
Reference:
Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.
Equation: y = (1 - epsilon) * y + epsilon / K.
Args:
num_classes (int): number of classes.
epsilon (float): weight.
"""
def __init__(self, num_classes, epsilon=0.1, use_gpu=True):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.use_gpu = use_gpu
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
"""
Args:
inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)
targets: ground truth labels with shape (num_classes)
"""
log_probs = self.logsoftmax(inputs)
targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).cpu(), 1)
if self.use_gpu: targets = targets.cuda()
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
loss = (- targets * log_probs).mean(0).sum()
return loss | 1,320 | 36.742857 | 91 | py |
EPSANet | EPSANet-master/models/SE_weight_module.py |
import torch.nn as nn
class SEWeightModule(nn.Module):
def __init__(self, channels, reduction=16):
super(SEWeightModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(channels, channels//reduction, kernel_size=1, padding=0)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels//reduction, channels, kernel_size=1, padding=0)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.avg_pool(x)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
weight = self.sigmoid(out)
return weight | 651 | 30.047619 | 85 | py |
EPSANet | EPSANet-master/models/epsanet.py | import torch
import torch.nn as nn
import math
from .SE_weight_module import SEWeightModule
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, groups=1):
"""standard convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class PSAModule(nn.Module):
def __init__(self, inplans, planes, conv_kernels=[3, 5, 7, 9], stride=1, conv_groups=[1, 4, 8, 16]):
super(PSAModule, self).__init__()
self.conv_1 = conv(inplans, planes//4, kernel_size=conv_kernels[0], padding=conv_kernels[0]//2,
stride=stride, groups=conv_groups[0])
self.conv_2 = conv(inplans, planes//4, kernel_size=conv_kernels[1], padding=conv_kernels[1]//2,
stride=stride, groups=conv_groups[1])
self.conv_3 = conv(inplans, planes//4, kernel_size=conv_kernels[2], padding=conv_kernels[2]//2,
stride=stride, groups=conv_groups[2])
self.conv_4 = conv(inplans, planes//4, kernel_size=conv_kernels[3], padding=conv_kernels[3]//2,
stride=stride, groups=conv_groups[3])
self.se = SEWeightModule(planes // 4)
self.split_channel = planes // 4
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
batch_size = x.shape[0]
x1 = self.conv_1(x)
x2 = self.conv_2(x)
x3 = self.conv_3(x)
x4 = self.conv_4(x)
feats = torch.cat((x1, x2, x3, x4), dim=1)
feats = feats.view(batch_size, 4, self.split_channel, feats.shape[2], feats.shape[3])
x1_se = self.se(x1)
x2_se = self.se(x2)
x3_se = self.se(x3)
x4_se = self.se(x4)
x_se = torch.cat((x1_se, x2_se, x3_se, x4_se), dim=1)
attention_vectors = x_se.view(batch_size, 4, self.split_channel, 1, 1)
attention_vectors = self.softmax(attention_vectors)
feats_weight = feats * attention_vectors
for i in range(4):
x_se_weight_fp = feats_weight[:, i, :, :]
if i == 0:
out = x_se_weight_fp
else:
out = torch.cat((x_se_weight_fp, out), 1)
return out
class EPSABlock(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None, conv_kernels=[3, 5, 7, 9],
conv_groups=[1, 4, 8, 16]):
super(EPSABlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = norm_layer(planes)
self.conv2 = PSAModule(planes, planes, stride=stride, conv_kernels=conv_kernels, conv_groups=conv_groups)
self.bn2 = norm_layer(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class EPSANet(nn.Module):
def __init__(self,block, layers, num_classes=1000):
super(EPSANet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layers(block, 64, layers[0], stride=1)
self.layer2 = self._make_layers(block, 128, layers[1], stride=2)
self.layer3 = self._make_layers(block, 256, layers[2], stride=2)
self.layer4 = self._make_layers(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layers(self, block, planes, num_blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, num_blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def epsanet50():
model = EPSANet(EPSABlock, [3, 4, 6, 3], num_classes=1000)
return model
def epsanet101():
model = EPSANet(EPSABlock, [3, 4, 23, 3], num_classes=1000)
return model
| 6,122 | 35.664671 | 113 | py |
trieste-develop | trieste-develop/trieste/models/__init__.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
This package contains the primary interfaces for probabilistic models, :class:`ProbabilisticModel`
and its trainable subclass :class:`TrainableProbabilisticModel`. It also contains tooling for
creating :class:`TrainableProbabilisticModel`\ s from config.
"""
from . import gpflow, gpflux, keras, optimizer
from .interfaces import (
FastUpdateModel,
ModelStack,
ProbabilisticModel,
ProbabilisticModelType,
ReparametrizationSampler,
SupportsCovarianceWithTopFidelity,
TrainableModelStack,
TrainableProbabilisticModel,
TrajectoryFunction,
TrajectoryFunctionClass,
TrajectorySampler,
)
| 1,220 | 34.911765 | 98 | py |
trieste-develop | trieste-develop/trieste/models/optimizer.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
This module contains common optimizers based on :class:`~tf.optimizers.Optimizer` that can be used
with models. Specific models can also sub-class these optimizers or implement their own, and should
register their loss functions using a :func:`create_loss_function`.
"""
from __future__ import annotations
import copy
from dataclasses import dataclass, field
from functools import singledispatch
from typing import Any, Callable, Iterable, Optional, Tuple, Union
import scipy
import tensorflow as tf
import tensorflow_probability as tfp
from ..data import Dataset
from ..types import TensorType
from ..utils import jit
TrainingData = Union[Tuple[TensorType, TensorType], Iterable[Tuple[TensorType, TensorType]]]
""" Type alias for a batch, or batches, of training data. """
DatasetTransformer = Callable[[Dataset, Optional[int]], TrainingData]
"""
Type alias for a function that converts a :class:`~trieste.data.Dataset` to batches of training
data.
"""
LossClosure = Callable[[], TensorType]
""" Type alias for a loss closure, typically used in optimization. """
OptimizeResult = Union[scipy.optimize.OptimizeResult, None]
"""
Optimization result. TensorFlow optimizer doesn't return any result. For scipy optimizer that is
also commonly used, it is :class:`~scipy.optimize.OptimizeResult`.
"""
@dataclass
class Optimizer:
"""Optimizer for training models with all the training data at once."""
optimizer: Any
"""
The underlying optimizer to use. For example, one of the subclasses of
:class:`~tensorflow.optimizers.Optimizer` could be used. Note that we use a flexible type `Any`
to allow for various optimizers that specific models might need to use.
"""
minimize_args: dict[str, Any] = field(default_factory=lambda: {})
""" The keyword arguments to pass to the :meth:`minimize` method of the :attr:`optimizer`. """
compile: bool = False
""" If `True`, the optimization process will be compiled with :func:`~tf.function`. """
def create_loss(self, model: tf.Module, dataset: Dataset) -> LossClosure:
"""
Build a loss function for the specified `model` with the `dataset` using a
:func:`create_loss_function`.
:param model: The model to build a loss function for.
:param dataset: The data with which to build the loss function.
:return: The loss function.
"""
x = tf.convert_to_tensor(dataset.query_points)
y = tf.convert_to_tensor(dataset.observations)
data = (x, y)
return create_loss_function(model, data, self.compile)
def optimize(self, model: tf.Module, dataset: Dataset) -> OptimizeResult:
"""
Optimize the specified `model` with the `dataset`.
:param model: The model to optimize.
:param dataset: The data with which to optimize the `model`.
:return: The return value of the optimizer's :meth:`minimize` method.
"""
loss_fn = self.create_loss(model, dataset)
variables = model.trainable_variables
return self.optimizer.minimize(loss_fn, variables, **self.minimize_args)
@dataclass
class BatchOptimizer(Optimizer):
"""Optimizer for training models with mini-batches of training data."""
max_iter: int = 100
""" The number of iterations over which to optimize the model. """
batch_size: int = 100
""" The size of the mini-batches. """
dataset_builder: DatasetTransformer | None = None
""" A mapping from :class:`~trieste.observer.Observer` data to mini-batches. """
def create_loss(self, model: tf.Module, dataset: Dataset) -> LossClosure:
"""
Build a loss function for the specified `model` with the `dataset`.
:param model: The model to build a loss function for.
:param dataset: The data with which to build the loss function.
:return: The loss function.
"""
def creator_fn(data: TrainingData) -> LossClosure:
return create_loss_function(model, data, self.compile)
if self.dataset_builder is None:
return creator_fn(
iter(
tf.data.Dataset.from_tensor_slices(dataset.astuple())
.shuffle(len(dataset))
.batch(self.batch_size)
.prefetch(tf.data.experimental.AUTOTUNE)
.repeat()
)
)
return creator_fn(self.dataset_builder(dataset, self.batch_size))
def optimize(self, model: tf.Module, dataset: Dataset) -> None:
"""
Optimize the specified `model` with the `dataset`.
:param model: The model to optimize.
:param dataset: The data with which to optimize the `model`.
"""
loss_fn = self.create_loss(model, dataset)
variables = model.trainable_variables
@jit(apply=self.compile)
def train_fn() -> None:
self.optimizer.minimize(loss_fn, variables, **self.minimize_args)
for _ in range(self.max_iter):
train_fn()
def __deepcopy__(self, memo: dict[int, object]) -> BatchOptimizer:
# workaround for https://github.com/tensorflow/tensorflow/issues/58973
# (keras optimizers not being deepcopyable in TF 2.11 and 2.12)
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if (
k == "optimizer"
and isinstance(v, tf.keras.optimizers.Optimizer)
and hasattr(v, "_distribution_strategy")
):
# avoid copying distribution strategy: reuse it instead
strategy = v._distribution_strategy
v._distribution_strategy = None
try:
setattr(result, k, copy.deepcopy(v, memo))
finally:
v._distribution_strategy = strategy
result.optimizer._distribution_strategy = strategy
else:
setattr(result, k, copy.deepcopy(v, memo))
return result
@dataclass
class KerasOptimizer:
"""Optimizer wrapper for training models implemented with Keras."""
optimizer: tf.keras.optimizers.Optimizer
""" The underlying optimizer to use for training the model. """
fit_args: dict[str, Any] = field(default_factory=lambda: {})
"""
The keyword arguments to pass to the ``fit`` method of a :class:`~tf.keras.Model` instance.
See https://keras.io/api/models/model_training_apis/#fit-method for a list of possible
arguments in the dictionary.
"""
loss: Optional[
Union[
tf.keras.losses.Loss, Callable[[TensorType, tfp.distributions.Distribution], TensorType]
]
] = None
""" Optional loss function for training the model. """
metrics: Optional[list[tf.keras.metrics.Metric]] = None
""" Optional metrics for monitoring the performance of the network. """
def __deepcopy__(self, memo: dict[int, object]) -> KerasOptimizer:
# workaround for https://github.com/tensorflow/tensorflow/issues/58973
# (keras optimizers not being deepcopyable in TF 2.11 and 2.12)
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k == "optimizer" and hasattr(v, "_distribution_strategy"):
# avoid copying distribution strategy: reuse it instead
strategy = v._distribution_strategy
v._distribution_strategy = None
try:
setattr(result, k, copy.deepcopy(v, memo))
finally:
v._distribution_strategy = strategy
result.optimizer._distribution_strategy = strategy
else:
setattr(result, k, copy.deepcopy(v, memo))
return result
@singledispatch
def create_loss_function(model: Any, dataset: TrainingData, compile: bool = False) -> LossClosure:
"""
Generic function for building a loss function for a specified `model` and `dataset`.
The implementations depends on the type of the model, which should use this function as a
decorator together with its register method to make a model-specific loss function available.
:param model: The model to build a loss function for.
:param dataset: The data with which to build the loss function.
:param compile: Whether to compile with :func:`tf.function`.
:return: The loss function.
"""
raise NotImplementedError(f"Unknown model {model} passed for loss function extraction")
| 9,230 | 37.949367 | 100 | py |
trieste-develop | trieste-develop/trieste/models/gpflux/models.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Any, Callable, Optional
import dill
import gpflow
import tensorflow as tf
from gpflow.inducing_variables import InducingPoints
from gpflux.layers import GPLayer, LatentVariableLayer
from gpflux.models import DeepGP
from tensorflow.python.keras.callbacks import Callback
from ... import logging
from ...data import Dataset
from ...types import TensorType
from ..interfaces import (
HasReparamSampler,
HasTrajectorySampler,
ReparametrizationSampler,
TrainableProbabilisticModel,
TrajectorySampler,
)
from ..optimizer import KerasOptimizer
from ..utils import (
write_summary_data_based_metrics,
write_summary_kernel_parameters,
write_summary_likelihood_parameters,
)
from .interface import GPfluxPredictor
from .sampler import (
DeepGaussianProcessDecoupledTrajectorySampler,
DeepGaussianProcessReparamSampler,
)
class DeepGaussianProcess(
GPfluxPredictor, TrainableProbabilisticModel, HasReparamSampler, HasTrajectorySampler
):
"""
A :class:`TrainableProbabilisticModel` wrapper for a GPflux :class:`~gpflux.models.DeepGP` with
:class:`GPLayer` or :class:`LatentVariableLayer`: this class does not support e.g. keras layers.
We provide simple architectures that can be used with this class in the `architectures.py` file.
"""
def __init__(
self,
model: DeepGP | Callable[[], DeepGP],
optimizer: KerasOptimizer | None = None,
num_rff_features: int = 1000,
continuous_optimisation: bool = True,
):
"""
:param model: The underlying GPflux deep Gaussian process model. Passing in a named closure
rather than a model can help when copying or serialising.
:param optimizer: The optimizer wrapper with necessary specifications for compiling and
training the model. Defaults to :class:`~trieste.models.optimizer.KerasOptimizer` with
:class:`~tf.optimizers.Adam` optimizer, mean squared error metric and a dictionary of
default arguments for the Keras `fit` method: 400 epochs, batch size of 1000, and
verbose 0. A custom callback that reduces the optimizer learning rate is used as well.
See https://keras.io/api/models/model_training_apis/#fit-method for a list of possible
arguments.
:param num_rff_features: The number of random Fourier features used to approximate the
kernel when calling :meth:`trajectory_sampler`. We use a default of 1000 as it typically
performs well for a wide range of kernels. Note that very smooth kernels (e.g. RBF) can
be well-approximated with fewer features.
:param continuous_optimisation: if True (default), the optimizer will keep track of the
number of epochs across BO iterations and use this number as initial_epoch. This is
essential to allow monitoring of model training across BO iterations.
:raise ValueError: If ``model`` has unsupported layers, ``num_rff_features`` is less than 0,
or if the ``optimizer`` is not of a supported type.
"""
if isinstance(model, DeepGP):
self._model_closure = None
else:
self._model_closure = model
model = model()
for layer in model.f_layers:
if not isinstance(layer, (GPLayer, LatentVariableLayer)):
raise ValueError(
f"`DeepGaussianProcess` can only be built out of `GPLayer` or"
f"`LatentVariableLayer`, received {type(layer)} instead."
)
super().__init__(optimizer)
if num_rff_features <= 0:
raise ValueError(
f"num_rff_features must be greater or equal to zero, got {num_rff_features}."
)
self._num_rff_features = num_rff_features
if not isinstance(self.optimizer.optimizer, tf.optimizers.Optimizer):
raise ValueError(
f"Optimizer for `DeepGaussianProcess` must be an instance of a "
f"`tf.optimizers.Optimizer` or `tf.keras.optimizers.Optimizer`, "
f"received {type(self.optimizer.optimizer)} instead."
)
if not isinstance(
self.optimizer.optimizer.lr, tf.keras.optimizers.schedules.LearningRateSchedule
):
self.original_lr = self.optimizer.optimizer.lr.numpy()
epochs = 400
def scheduler(epoch: int, lr: float) -> float:
if epoch == epochs // 2:
return lr * 0.1
else:
return lr
if not self.optimizer.fit_args:
self.optimizer.fit_args = {
"verbose": 0,
"epochs": epochs,
"batch_size": 1000,
"callbacks": [tf.keras.callbacks.LearningRateScheduler(scheduler)],
}
if self.optimizer.metrics is None:
self.optimizer.metrics = ["mse"]
self._model_gpflux = model
# inputs and targets need to be redone with a float64 dtype to avoid setting the keras
# backend to float64, this is likely to be fixed in GPflux, see issue:
# https://github.com/secondmind-labs/GPflux/issues/76
self._model_gpflux.inputs = tf.keras.Input(
tuple(self._model_gpflux.inputs.shape[:-1]),
name=self._model_gpflux.inputs.name,
dtype=tf.float64,
)
self._model_gpflux.targets = tf.keras.Input(
tuple(self._model_gpflux.targets.shape[:-1]),
name=self._model_gpflux.targets.name,
dtype=tf.float64,
)
self._model_keras = model.as_training_model()
self._model_keras.compile(self.optimizer.optimizer, metrics=self.optimizer.metrics)
self._absolute_epochs = 0
self._continuous_optimisation = continuous_optimisation
def __getstate__(self) -> dict[str, Any]:
state = self.__dict__.copy()
# when using a model closure, store the model parameters, not the model itself
if self._model_closure is not None:
state["_model_gpflux"] = gpflow.utilities.parameter_dict(self._model_gpflux)
state["_model_keras"] = gpflow.utilities.parameter_dict(self._model_keras)
# use to_json and get_weights to save any optimizer fit_arg callback models
callbacks: list[Callback] = self._optimizer.fit_args.get("callbacks", [])
callback: Callback
saved_models: list[KerasOptimizer] = []
tensorboard_writers: list[dict[str, Any]] = []
try:
for callback in callbacks:
# serialize the callback models before pickling the optimizer
saved_models.append(callback.model)
if callback.model is self._model_keras:
# no need to serialize the main model, just use a special value instead
callback.model = ...
elif callback.model:
callback.model = (callback.model.to_json(), callback.model.get_weights())
# don't pickle tensorboard writers either; they'll be recreated when needed
if isinstance(callback, tf.keras.callbacks.TensorBoard):
tensorboard_writers.append(callback._writers)
callback._writers = {}
state["_optimizer"] = dill.dumps(state["_optimizer"])
except Exception as e:
raise NotImplementedError(
"Failed to copy DeepGaussianProcess optimizer due to unsupported callbacks."
) from e
finally:
# revert original state, even if the pickling failed
for callback, model in zip(self._optimizer.fit_args.get("callbacks", []), saved_models):
callback.model = model
for callback, writers in zip(
(cb for cb in callbacks if isinstance(cb, tf.keras.callbacks.TensorBoard)),
tensorboard_writers,
):
callback._writers = writers
# do the same thing for the history callback
if self._model_keras.history:
history_model = self._model_keras.history.model
try:
if history_model is self._model_keras:
# no need to serialize the main model, just use a special value instead
self._model_keras.history.model = ...
elif history_model:
self._model_keras.history.model = (
history_model.to_json(),
history_model.get_weights(),
)
state["_history"] = dill.dumps(self._model_keras.history)
finally:
self._model_keras.history.model = history_model
return state
def __setstate__(self, state: dict[str, Any]) -> None:
self.__dict__.update(state)
# regenerate the models using the model closure
if self._model_closure is not None:
dgp: DeepGP = state["_model_closure"]()
self._model_gpflux = dgp
# inputs and targets need to be redone with a float64 dtype to avoid setting the keras
# backend to float64, this is likely to be fixed in GPflux, see issue:
# https://github.com/secondmind-labs/GPflux/issues/76
self._model_gpflux.inputs = tf.keras.Input(
tuple(self._model_gpflux.inputs.shape[:-1]),
name=self._model_gpflux.inputs.name,
dtype=tf.float64,
)
self._model_gpflux.targets = tf.keras.Input(
tuple(self._model_gpflux.targets.shape[:-1]),
name=self._model_gpflux.targets.name,
dtype=tf.float64,
)
self._model_keras = dgp.as_training_model()
# unpickle the optimizer, and restore all the callback models
self._optimizer = dill.loads(self._optimizer)
for callback in self._optimizer.fit_args.get("callbacks", []):
if callback.model is ...:
callback.set_model(self._model_keras)
elif callback.model:
model_json, weights = callback.model
model = tf.keras.models.model_from_json(model_json)
model.set_weights(weights)
callback.set_model(model)
# recompile the model
self._model_keras.compile(self._optimizer.optimizer)
# assign the model parameters
if self._model_closure is not None:
gpflow.utilities.multiple_assign(self._model_gpflux, state["_model_gpflux"])
gpflow.utilities.multiple_assign(self._model_keras, state["_model_keras"])
# restore the history (including any model it contains)
if "_history" in state:
self._model_keras.history = dill.loads(state["_history"])
if self._model_keras.history.model is ...:
self._model_keras.history.set_model(self._model_keras)
elif self._model_keras.history.model:
model_json, weights = self._model_keras.history.model
model = tf.keras.models.model_from_json(model_json)
model.set_weights(weights)
self._model_keras.history.set_model(model)
def __repr__(self) -> str:
""""""
return f"DeepGaussianProcess({self.model_gpflux!r}, {self.optimizer.optimizer!r})"
@property
def model_gpflux(self) -> DeepGP:
return self._model_gpflux
@property
def model_keras(self) -> tf.keras.Model:
return self._model_keras
def sample(self, query_points: TensorType, num_samples: int) -> TensorType:
trajectory = self.trajectory_sampler().get_trajectory()
expanded_query_points = tf.expand_dims(query_points, -2) # [N, 1, D]
tiled_query_points = tf.tile(expanded_query_points, [1, num_samples, 1]) # [N, S, D]
return tf.transpose(trajectory(tiled_query_points), [1, 0, 2]) # [S, N, L]
def reparam_sampler(self, num_samples: int) -> ReparametrizationSampler[GPfluxPredictor]:
"""
Return a reparametrization sampler for a :class:`DeepGaussianProcess` model.
:param num_samples: The number of samples to obtain.
:return: The reparametrization sampler.
"""
return DeepGaussianProcessReparamSampler(num_samples, self)
def trajectory_sampler(self) -> TrajectorySampler[GPfluxPredictor]:
"""
Return a trajectory sampler. For :class:`DeepGaussianProcess`, we build
trajectories using the GPflux default sampler.
:return: The trajectory sampler.
"""
return DeepGaussianProcessDecoupledTrajectorySampler(self, self._num_rff_features)
def update(self, dataset: Dataset) -> None:
inputs = dataset.query_points
new_num_data = inputs.shape[0]
self.model_gpflux.num_data = new_num_data
# Update num_data for each layer, as well as make sure dataset shapes are ok
for i, layer in enumerate(self.model_gpflux.f_layers):
if hasattr(layer, "num_data"):
layer.num_data = new_num_data
if isinstance(layer, LatentVariableLayer):
inputs = layer(inputs)
continue
if isinstance(layer.inducing_variable, InducingPoints):
inducing_variable = layer.inducing_variable
else:
inducing_variable = layer.inducing_variable.inducing_variable
if inputs.shape[-1] != inducing_variable.Z.shape[-1]:
raise ValueError(
f"Shape {inputs.shape} of input to layer {layer} is incompatible with shape"
f" {inducing_variable.Z.shape} of that layer. Trailing dimensions must match."
)
if (
i == len(self.model_gpflux.f_layers) - 1
and dataset.observations.shape[-1] != layer.q_mu.shape[-1]
):
raise ValueError(
f"Shape {dataset.observations.shape} of new observations is incompatible"
f" with shape {layer.q_mu.shape} of existing observations. Trailing"
f" dimensions must match."
)
inputs = layer(inputs)
def optimize(self, dataset: Dataset) -> None:
"""
Optimize the model with the specified `dataset`.
:param dataset: The data with which to optimize the `model`.
"""
fit_args = dict(self.optimizer.fit_args)
# Tell optimizer how many epochs have been used before: the optimizer will "continue"
# optimization across multiple BO iterations rather than start fresh at each iteration.
# This allows us to monitor training across iterations.
if "epochs" in fit_args:
fit_args["epochs"] = fit_args["epochs"] + self._absolute_epochs
hist = self.model_keras.fit(
{"inputs": dataset.query_points, "targets": dataset.observations},
**fit_args,
initial_epoch=self._absolute_epochs,
)
if self._continuous_optimisation:
self._absolute_epochs = self._absolute_epochs + len(hist.history["loss"])
# Reset lr in case there was an lr schedule: a schedule will have changed the learning
# rate, so that the next time we call `optimize` the starting learning rate would be
# different. Therefore, we make sure the learning rate is set back to its initial value.
# However, this is not needed for `LearningRateSchedule` instances.
if not isinstance(
self.optimizer.optimizer.lr, tf.keras.optimizers.schedules.LearningRateSchedule
):
self.optimizer.optimizer.lr.assign(self.original_lr)
def log(self, dataset: Optional[Dataset] = None) -> None:
"""
Log model training information at a given optimization step to the Tensorboard.
We log a few summary statistics of losses, layer KL divergences and metrics (as provided in
``optimizer``): ``final`` value at the end of the training, ``diff`` value as a difference
between inital and final epoch. We also log epoch statistics, but as histograms, rather
than time series. We also log several training data based metrics, such as root mean square
error between predictions and observations and several others.
For custom logs user will need to subclass the model and overwrite this method.
:param dataset: Optional data that can be used to log additional data-based model summaries.
"""
summary_writer = logging.get_tensorboard_writer()
if summary_writer:
with summary_writer.as_default(step=logging.get_step_number()):
logging.scalar("epochs/num_epochs", len(self.model_keras.history.epoch))
for idx, layer in enumerate(self.model_gpflux.f_layers):
write_summary_kernel_parameters(layer.kernel, prefix=f"layer[{idx}]/")
write_summary_likelihood_parameters(self.model_gpflux.likelihood_layer.likelihood)
for k, v in self.model_keras.history.history.items():
logging.histogram(f"{k}/epoch", lambda: v)
logging.scalar(f"{k}/final", lambda: v[-1])
logging.scalar(f"{k}/diff", lambda: v[0] - v[-1])
if dataset:
write_summary_data_based_metrics(
dataset=dataset, model=self, prefix="training_"
)
| 18,255 | 44.526185 | 100 | py |
trieste-develop | trieste-develop/trieste/models/gpflux/interface.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from abc import ABC, abstractmethod
import tensorflow as tf
from gpflow.base import Module
from ...types import TensorType
from ..interfaces import SupportsGetObservationNoise
from ..optimizer import KerasOptimizer
class GPfluxPredictor(SupportsGetObservationNoise, ABC):
"""
A trainable wrapper for a GPflux deep Gaussian process model. The code assumes subclasses
will use the Keras `fit` method for training, and so they should provide access to both a
`model_keras` and `model_gpflux`.
"""
def __init__(self, optimizer: KerasOptimizer | None = None):
"""
:param optimizer: The optimizer wrapper containing the optimizer with which to train the
model and arguments for the wrapper and the optimizer. The optimizer must
be an instance of a :class:`~tf.optimizers.Optimizer`. Defaults to
:class:`~tf.optimizers.Adam` optimizer with 0.01 learning rate.
"""
if optimizer is None:
optimizer = KerasOptimizer(tf.optimizers.Adam(0.01))
self._optimizer = optimizer
@property
@abstractmethod
def model_gpflux(self) -> Module:
"""The underlying GPflux model."""
@property
@abstractmethod
def model_keras(self) -> tf.keras.Model:
"""Returns the compiled Keras model for training."""
@property
def optimizer(self) -> KerasOptimizer:
"""The optimizer wrapper for training the model."""
return self._optimizer
def predict(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
"""Note: unless otherwise noted, this returns the mean and variance of the last layer
conditioned on one sample from the previous layers."""
return self.model_gpflux.predict_f(query_points)
@abstractmethod
def sample(self, query_points: TensorType, num_samples: int) -> TensorType:
raise NotImplementedError
def predict_y(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
"""Note: unless otherwise noted, this will return the prediction conditioned on one sample
from the lower layers."""
f_mean, f_var = self.model_gpflux.predict_f(query_points)
return self.model_gpflux.likelihood_layer.likelihood.predict_mean_and_var(
query_points, f_mean, f_var
)
def get_observation_noise(self) -> TensorType:
"""
Return the variance of observation noise for homoscedastic likelihoods.
:return: The observation noise.
:raise NotImplementedError: If the model does not have a homoscedastic likelihood.
"""
try:
noise_variance = self.model_gpflux.likelihood_layer.likelihood.variance
except AttributeError:
raise NotImplementedError(f"Model {self!r} does not have scalar observation noise")
return noise_variance
| 3,501 | 37.483516 | 98 | py |
trieste-develop | trieste-develop/trieste/models/keras/architectures.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains implementations of neural network architectures with Keras.
"""
from __future__ import annotations
from abc import abstractmethod
from typing import Any, Callable, Sequence
import dill
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.layers.distribution_layer import DistributionLambda, _serialize
from trieste.types import TensorType
class KerasEnsemble:
"""
This class builds an ensemble of neural networks, using Keras. Individual networks must
be instance of :class:`~trieste.models.keras_networks.KerasEnsembleNetwork`. This class
is meant to be used with :class:`~trieste.models.keras_networks.DeepEnsemble` model wrapper,
which compiles the model.
"""
def __init__(
self,
networks: Sequence[KerasEnsembleNetwork],
) -> None:
"""
:param networks: A list of neural network specifications, one for each member of the
ensemble. The ensemble will be built using these specifications.
:raise ValueError: If there are no objects in ``networks`` or we try to create
a model with networks whose input or output shapes are not the same.
"""
if not networks:
raise ValueError(
f"networks should consist of KerasEnsembleNetwork objects, however"
f"received {networks} instead."
)
input_shapes, output_shapes = [], []
for index, network in enumerate(networks):
network.network_name = f"model_{index}_"
input_shapes.append(network.input_tensor_spec.shape)
output_shapes.append(network.output_tensor_spec.shape)
if not all(x == input_shapes[0] for x in input_shapes):
raise ValueError(
f"Input shapes for all networks must be the same, however"
f"received {input_shapes} instead."
)
if not all(x == output_shapes[0] for x in output_shapes):
raise ValueError(
f"Output shapes for all networks must be the same, however"
f"received {output_shapes} instead."
)
self.num_outputs = networks[0].flattened_output_shape
self._networks = networks
self._model = self._build_ensemble()
def __repr__(self) -> str:
""""""
return f"KerasEnsemble({self._networks!r})"
@property
def model(self) -> tf.keras.Model:
"""Returns built but uncompiled Keras ensemble model."""
return self._model
@property
def ensemble_size(self) -> int:
"""
Returns the size of the ensemble, that is, the number of base learners or individual neural
network models in the ensemble.
"""
return len(self._networks)
def _build_ensemble(self) -> tf.keras.Model:
"""
Builds the ensemble model by combining all the individual networks in a single Keras model.
This method relies on ``connect_layers`` method of :class:`KerasEnsembleNetwork` objects
to construct individual networks.
:return: The Keras model.
"""
inputs, outputs = zip(*[network.connect_layers() for network in self._networks])
return tf.keras.Model(inputs=inputs, outputs=outputs)
def __getstate__(self) -> dict[str, Any]:
# When pickling use to_json to save the model.
state = self.__dict__.copy()
state["_model"] = self._model.to_json()
state["_weights"] = self._model.get_weights()
# Save the history callback (serializing any model)
if self._model.history:
history_model = self._model.history.model
try:
if history_model is self._model:
# no need to serialize the main model, just use a special value instead
self._model.history.model = ...
elif history_model:
self._model.history.model = (
history_model.to_json(),
history_model.get_weights(),
)
state["_history"] = dill.dumps(self._model.history)
finally:
self._model.history.model = history_model
return state
def __setstate__(self, state: dict[str, Any]) -> None:
# When unpickling restore the model using model_from_json.
self.__dict__.update(state)
self._model = tf.keras.models.model_from_json(
state["_model"], custom_objects={"MultivariateNormalTriL": MultivariateNormalTriL}
)
self._model.set_weights(state["_weights"])
# Restore the history (including any model it contains)
if "_history" in state:
self._model.history = dill.loads(state["_history"])
if self._model.history.model is ...:
self._model.history.set_model(self._model)
elif self._model.history.model:
model_json, weights = self._model.history.model
model = tf.keras.models.model_from_json(
model_json,
custom_objects={"MultivariateNormalTriL": MultivariateNormalTriL},
)
model.set_weights(weights)
self._model.history.set_model(model)
class KerasEnsembleNetwork:
"""
This class is an interface that defines necessary attributes and methods for neural networks
that are meant to be used for building ensembles by
:class:`~trieste.models.keras_networks.KerasEnsemble`. Subclasses are not meant to
build and compile Keras models, instead they are providing specification that
:class:`~trieste.models.keras_networks.KerasEnsemble` will use to build the Keras model.
"""
def __init__(
self,
input_tensor_spec: tf.TensorSpec,
output_tensor_spec: tf.TensorSpec,
network_name: str = "",
):
"""
:param input_tensor_spec: Tensor specification for the input to the network.
:param output_tensor_spec: Tensor specification for the output of the network.
:param network_name: The name to be used when building the network.
"""
if not isinstance(input_tensor_spec, tf.TensorSpec):
raise ValueError(
f"input_tensor_spec must be an instance of tf.TensorSpec, "
f"received {type(input_tensor_spec)} instead."
)
if not isinstance(output_tensor_spec, tf.TensorSpec):
raise ValueError(
f"output_tensor_spec must be an instance of tf.TensorSpec, "
f"received {type(output_tensor_spec)} instead."
)
self.input_tensor_spec = input_tensor_spec
self.output_tensor_spec = output_tensor_spec
self.network_name = network_name
@property
def input_layer_name(self) -> str:
return self.network_name + "input"
@property
def output_layer_name(self) -> str:
return self.network_name + "output"
@property
def flattened_output_shape(self) -> int:
return int(np.prod(self.output_tensor_spec.shape))
@abstractmethod
def connect_layers(self) -> tuple[tf.Tensor, tf.Tensor]:
"""
Connects the layers of the neural network. Architecture, layers and layer specifications
need to be defined by the subclasses.
:return: Input and output tensor of the network, required by :class:`tf.keras.Model` to
build a model.
"""
raise NotImplementedError
class MultivariateNormalTriL(tfp.layers.MultivariateNormalTriL): # type: ignore[misc]
"""Fixed version of tfp.layers.MultivariateNormalTriL that handles saving."""
def __init__(
self,
event_size: int,
convert_to_tensor_fn: Callable[
[tfp.python.distributions.Distribution], TensorType
] = tfp.python.distributions.Distribution.sample,
validate_args: bool = False,
**kwargs: Any,
) -> None:
self._event_size = event_size
self._validate_args = validate_args
super().__init__(event_size, convert_to_tensor_fn, validate_args, **kwargs)
def get_config(self) -> dict[str, Any]:
config = {
"event_size": self._event_size,
"validate_args": self._validate_args,
"convert_to_tensor_fn": _serialize(self._convert_to_tensor_fn),
}
# skip DistributionLambda's get_config because we don't want to serialize the
# make_distribution_fn: both to avoid confusing the constructor, and because it doesn't
# seem to work in TF2.4.
base_config = super(DistributionLambda, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class GaussianNetwork(KerasEnsembleNetwork):
"""
This class defines layers of a probabilistic neural network using Keras. The network
architecture is a multilayer fully-connected feed-forward network, with Gaussian
distribution as an output. The layers are meant to be built as an ensemble model by
:class:`KerasEnsemble`. Note that this is not a Bayesian neural network.
"""
def __init__(
self,
input_tensor_spec: tf.TensorSpec,
output_tensor_spec: tf.TensorSpec,
hidden_layer_args: Sequence[dict[str, Any]] = (
{"units": 50, "activation": "relu"},
{"units": 50, "activation": "relu"},
),
independent: bool = False,
):
"""
:param input_tensor_spec: Tensor specification for the input to the network.
:param output_tensor_spec: Tensor specification for the output of the network.
:param hidden_layer_args: Specification for building dense hidden layers. Each element in
the sequence should be a dictionary containing arguments (keys) and their values for a
:class:`~tf.keras.layers.Dense` hidden layer. Please check Keras Dense layer API for
available arguments. Objects in the sequence will sequentially be used to add
:class:`~tf.keras.layers.Dense` layers. Length of this sequence determines the number of
hidden layers in the network. Default value is two hidden layers, 50 nodes each, with
ReLu activation functions. Empty sequence needs to be passed to have no hidden layers.
:param independent: In case multiple outputs are modeled, if set to `True` then
:class:`~tfp.layers.IndependentNormal` layer
is used as the output layer. This models outputs as independent, only the diagonal
elements of the covariance matrix are parametrized. If left as the default `False`,
then :class:`~tfp.layers.MultivariateNormalTriL` layer is used where correlations
between outputs are learned as well.
:raise ValueError: If objects in ``hidden_layer_args`` are not dictionaries.
"""
super().__init__(input_tensor_spec, output_tensor_spec)
self._hidden_layer_args = hidden_layer_args
self._independent = independent
def _gen_input_tensor(self) -> tf.keras.Input:
input_tensor = tf.keras.Input(
shape=self.input_tensor_spec.shape,
dtype=self.input_tensor_spec.dtype,
name=self.input_layer_name,
)
return input_tensor
def _gen_hidden_layers(self, input_tensor: tf.Tensor) -> tf.Tensor:
for index, hidden_layer_args in enumerate(self._hidden_layer_args):
layer_name = f"{self.network_name}dense_{index}"
layer = tf.keras.layers.Dense(**hidden_layer_args, name=layer_name)
input_tensor = layer(input_tensor)
return input_tensor
def _gen_multi_output_layer(self, input_tensor: tf.Tensor) -> tf.Tensor:
dist_layer = tfp.layers.IndependentNormal if self._independent else MultivariateNormalTriL
n_params = dist_layer.params_size(self.flattened_output_shape)
parameter_layer = tf.keras.layers.Dense(
n_params, name=self.network_name + "dense_parameters"
)(input_tensor)
distribution = dist_layer(
self.flattened_output_shape,
tfp.python.distributions.Distribution.mean,
name=self.output_layer_name,
)(parameter_layer)
return distribution
def _gen_single_output_layer(self, input_tensor: tf.Tensor) -> tf.Tensor:
parameter_layer = tf.keras.layers.Dense(2, name=self.network_name + "dense_parameters")(
input_tensor
)
def distribution_fn(inputs: TensorType) -> tfp.distributions.Distribution:
return tfp.distributions.Normal(inputs[..., :1], tf.math.softplus(inputs[..., 1:]))
distribution = tfp.layers.DistributionLambda(
make_distribution_fn=distribution_fn,
convert_to_tensor_fn=tfp.distributions.Distribution.mean,
name=self.output_layer_name,
)(parameter_layer)
return distribution
def connect_layers(self) -> tuple[tf.Tensor, tf.Tensor]:
"""
Connect all layers in the network. We start by generating an input tensor based on input
tensor specification. Next we generate a sequence of hidden dense layers based on
hidden layer arguments. Finally, we generate a dense layer whose nodes act as parameters of
a Gaussian distribution in the final probabilistic layer.
:return: Input and output tensor of the sequence of layers.
"""
input_tensor = self._gen_input_tensor()
hidden_tensor = self._gen_hidden_layers(input_tensor)
if self.flattened_output_shape == 1:
output_tensor = self._gen_single_output_layer(hidden_tensor)
else:
output_tensor = self._gen_multi_output_layer(hidden_tensor)
return input_tensor, output_tensor
| 14,523 | 40.497143 | 100 | py |
trieste-develop | trieste-develop/trieste/models/keras/builders.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains builders for Keras models supported in Trieste. We found the default
configurations used here to work well in most situation, but they should not be taken as
universally good solutions.
"""
from __future__ import annotations
from typing import Union
import tensorflow as tf
from ...data import Dataset
from .architectures import GaussianNetwork, KerasEnsemble
from .utils import get_tensor_spec_from_data
def build_keras_ensemble(
data: Dataset,
ensemble_size: int = 5,
num_hidden_layers: int = 2,
units: int = 25,
activation: Union[str, tf.keras.layers.Activation] = "relu",
independent_normal: bool = False,
) -> KerasEnsemble:
"""
Builds a simple ensemble of neural networks in Keras where each network has the same
architecture: number of hidden layers, nodes in hidden layers and activation function.
Default ensemble size and activation function seem to work well in practice, in regression type
of problems at least. Number of hidden layers and units per layer should be modified according
to the dataset size and complexity of the function - the default values seem to work well
for small datasets common in Bayesian optimization. Using the independent normal is relevant
only if one is modelling multiple output variables, as it simplifies the distribution by
ignoring correlations between outputs.
:param data: Data for training, used for extracting input and output tensor specifications.
:param ensemble_size: The size of the ensemble, that is, the number of base learners or
individual neural networks in the ensemble.
:param num_hidden_layers: The number of hidden layers in each network.
:param units: The number of nodes in each hidden layer.
:param activation: The activation function in each hidden layer.
:param independent_normal: If set to `True` then :class:`~tfp.layers.IndependentNormal` layer
is used as the output layer. This models outputs as independent, only the diagonal
elements of the covariance matrix are parametrized. If left as the default `False`,
then :class:`~tfp.layers.MultivariateNormalTriL` layer is used where correlations
between outputs are learned as well. Note that this is only relevant for multi-output
models.
:return: Keras ensemble model.
"""
input_tensor_spec, output_tensor_spec = get_tensor_spec_from_data(data)
hidden_layer_args = []
for i in range(num_hidden_layers):
hidden_layer_args.append({"units": units, "activation": activation})
networks = [
GaussianNetwork(
input_tensor_spec,
output_tensor_spec,
hidden_layer_args,
independent_normal,
)
for _ in range(ensemble_size)
]
keras_ensemble = KerasEnsemble(networks)
return keras_ensemble
| 3,471 | 40.831325 | 99 | py |
trieste-develop | trieste-develop/trieste/models/keras/models.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import re
from typing import Any, Dict, Optional
import dill
import tensorflow as tf
import tensorflow_probability as tfp
import tensorflow_probability.python.distributions as tfd
from tensorflow.python.keras.callbacks import Callback
from ... import logging
from ...data import Dataset
from ...types import TensorType
from ...utils import flatten_leading_dims
from ..interfaces import HasTrajectorySampler, TrainableProbabilisticModel, TrajectorySampler
from ..optimizer import KerasOptimizer
from ..utils import write_summary_data_based_metrics
from .architectures import KerasEnsemble, MultivariateNormalTriL
from .interface import DeepEnsembleModel, KerasPredictor
from .sampler import DeepEnsembleTrajectorySampler
from .utils import negative_log_likelihood, sample_model_index, sample_with_replacement
class DeepEnsemble(
KerasPredictor, TrainableProbabilisticModel, DeepEnsembleModel, HasTrajectorySampler
):
"""
A :class:`~trieste.model.TrainableProbabilisticModel` wrapper for deep ensembles built using
Keras.
Deep ensembles are ensembles of deep neural networks that have been found to have good
representation of uncertainty in practice (<cite data-cite="lakshminarayanan2017simple"/>).
This makes them a potentially attractive model for Bayesian optimization for use-cases with
large number of observations, non-stationary objective functions and need for fast predictions,
in which standard Gaussian process models are likely to struggle. The model consists of simple
fully connected multilayer probabilistic networks as base learners, with Gaussian distribution
as a final layer, using the negative log-likelihood loss for training the networks. The
model relies on differences in random initialization of weights for generating diversity among
base learners.
The original formulation of the model does not include boostrapping of the data. The authors
found that it does not improve performance the model. We include bootstrapping as an option
as later work that more precisely measured uncertainty quantification found that boostrapping
does help with uncertainty representation (see <cite data-cite="osband2021epistemic"/>).
We provide classes for constructing ensembles using Keras
(:class:`~trieste.models.keras.KerasEnsemble`) in the `architectures` package that should be
used with the :class:`~trieste.models.keras.DeepEnsemble` wrapper. There we also provide a
:class:`~trieste.models.keras.GaussianNetwork` base learner following the original
formulation in <cite data-cite="lakshminarayanan2017simple"/>, but any user-specified network
can be supplied, as long as it has a Gaussian distribution as a final layer and follows the
:class:`~trieste.models.keras.KerasEnsembleNetwork` interface.
A word of caution in case a learning rate scheduler is used in ``fit_args`` to
:class:`KerasOptimizer` optimizer instance. Typically one would not want to continue with the
reduced learning rate in the subsequent Bayesian optimization step. Hence, we reset the
learning rate to the original one after calling the ``fit`` method. In case this is not the
behaviour you would like, you will need to subclass the model and overwrite the
:meth:`optimize` method.
Currently we do not support setting up the model with dictionary config.
"""
def __init__(
self,
model: KerasEnsemble,
optimizer: Optional[KerasOptimizer] = None,
bootstrap: bool = False,
diversify: bool = False,
continuous_optimisation: bool = True,
) -> None:
"""
:param model: A Keras ensemble model with probabilistic networks as ensemble members. The
model has to be built but not compiled.
:param optimizer: The optimizer wrapper with necessary specifications for compiling and
training the model. Defaults to :class:`~trieste.models.optimizer.KerasOptimizer` with
:class:`~tf.optimizers.Adam` optimizer, negative log likelihood loss, mean squared
error metric and a dictionary of default arguments for Keras `fit` method: 3000 epochs,
batch size 16, early stopping callback with patience of 50, and verbose 0.
See https://keras.io/api/models/model_training_apis/#fit-method for a list of possible
arguments.
:param bootstrap: Sample with replacement data for training each network in the ensemble.
By default set to `False`.
:param diversify: Whether to use quantiles from the approximate Gaussian distribution of
the ensemble as trajectories instead of mean predictions when calling
:meth:`trajectory_sampler`. This mode can be used to increase the diversity
in case of optimizing very large batches of trajectories. By
default set to `False`.
:param continuous_optimisation: If True (default), the optimizer will keep track of the
number of epochs across BO iterations and use this number as initial_epoch. This is
essential to allow monitoring of model training across BO iterations.
:raise ValueError: If ``model`` is not an instance of
:class:`~trieste.models.keras.KerasEnsemble` or ensemble has less than two base
learners (networks).
"""
if model.ensemble_size < 2:
raise ValueError(f"Ensemble size must be greater than 1 but got {model.ensemble_size}.")
super().__init__(optimizer)
if not self.optimizer.fit_args:
self.optimizer.fit_args = {
"verbose": 0,
"epochs": 3000,
"batch_size": 16,
"callbacks": [
tf.keras.callbacks.EarlyStopping(
monitor="loss", patience=50, restore_best_weights=True
)
],
}
if self.optimizer.loss is None:
self.optimizer.loss = negative_log_likelihood
if self.optimizer.metrics is None:
self.optimizer.metrics = ["mse"]
model.model.compile(
self.optimizer.optimizer,
loss=[self.optimizer.loss] * model.ensemble_size,
metrics=[self.optimizer.metrics] * model.ensemble_size,
)
if not isinstance(
self.optimizer.optimizer.lr, tf.keras.optimizers.schedules.LearningRateSchedule
):
self.original_lr = self.optimizer.optimizer.lr.numpy()
self._absolute_epochs = 0
self._continuous_optimisation = continuous_optimisation
self._model = model
self._bootstrap = bootstrap
self._diversify = diversify
def __repr__(self) -> str:
""""""
return (
f"DeepEnsemble({self.model!r}, {self.optimizer!r}, {self._bootstrap!r}, "
f"{self._diversify!r})"
)
@property
def model(self) -> tf.keras.Model:
""" " Returns compiled Keras ensemble model."""
return self._model.model
@property
def ensemble_size(self) -> int:
"""
Returns the size of the ensemble, that is, the number of base learners or individual neural
network models in the ensemble.
"""
return self._model.ensemble_size
@property
def num_outputs(self) -> int:
"""
Returns the number of outputs trained on by each member network.
"""
return self._model.num_outputs
def prepare_dataset(
self, dataset: Dataset
) -> tuple[Dict[str, TensorType], Dict[str, TensorType]]:
"""
Transform ``dataset`` into inputs and outputs with correct names that can be used for
training the :class:`KerasEnsemble` model.
If ``bootstrap`` argument in the :class:`~trieste.models.keras.DeepEnsemble` is set to
`True`, data will be additionally sampled with replacement, independently for
each network in the ensemble.
:param dataset: A dataset with ``query_points`` and ``observations`` tensors.
:return: A dictionary with input data and a dictionary with output data.
"""
inputs = {}
outputs = {}
for index in range(self.ensemble_size):
if self._bootstrap:
resampled_data = sample_with_replacement(dataset)
else:
resampled_data = dataset
input_name = self.model.input_names[index]
output_name = self.model.output_names[index]
inputs[input_name], outputs[output_name] = resampled_data.astuple()
return inputs, outputs
def prepare_query_points(self, query_points: TensorType) -> Dict[str, TensorType]:
"""
Transform ``query_points`` into inputs with correct names that can be used for
predicting with the model.
:param query_points: A tensor with ``query_points``.
:return: A dictionary with query_points prepared for predictions.
"""
inputs = {}
for index in range(self.ensemble_size):
inputs[self.model.input_names[index]] = query_points
return inputs
def ensemble_distributions(self, query_points: TensorType) -> tuple[tfd.Distribution, ...]:
"""
Return distributions for each member of the ensemble.
:param query_points: The points at which to return distributions.
:return: The distributions for the observations at the specified
``query_points`` for each member of the ensemble.
"""
x_transformed: dict[str, TensorType] = self.prepare_query_points(query_points)
return self._model.model(x_transformed)
def predict(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
r"""
Returns mean and variance at ``query_points`` for the whole ensemble.
Following <cite data-cite="lakshminarayanan2017simple"/> we treat the ensemble as a
uniformly-weighted Gaussian mixture model and combine the predictions as
.. math:: p(y|\mathbf{x}) = M^{-1} \Sum_{m=1}^M \mathcal{N}
(\mu_{\theta_m}(\mathbf{x}),\,\sigma_{\theta_m}^{2}(\mathbf{x}))
We further approximate the ensemble prediction as a Gaussian whose mean and variance
are respectively the mean and variance of the mixture, given by
.. math:: \mu_{*}(\mathbf{x}) = M^{-1} \Sum_{m=1}^M \mu_{\theta_m}(\mathbf{x})
.. math:: \sigma^2_{*}(\mathbf{x}) = M^{-1} \Sum_{m=1}^M (\sigma_{\theta_m}^{2}(\mathbf{x})
+ \mu^2_{\theta_m}(\mathbf{x})) - \mu^2_{*}(\mathbf{x})
This method assumes that the final layer in each member of the ensemble is
probabilistic, an instance of :class:`~tfp.distributions.Distribution`. In particular, given
the nature of the approximations stated above the final layer should be a Gaussian
distribution with `mean` and `variance` methods.
:param query_points: The points at which to make predictions.
:return: The predicted mean and variance of the observations at the specified
``query_points``.
"""
# handle leading batch dimensions, while still allowing `Functional` to
# "allow (None,) and (None, 1) Tensors to be passed interchangeably"
input_dims = min(len(query_points.shape), len(self.model.input_shape[0]))
flat_x, unflatten = flatten_leading_dims(query_points, output_dims=input_dims)
ensemble_distributions = self.ensemble_distributions(flat_x)
predicted_means = tf.math.reduce_mean(
[dist.mean() for dist in ensemble_distributions], axis=0
)
predicted_vars = (
tf.math.reduce_mean(
[dist.variance() + dist.mean() ** 2 for dist in ensemble_distributions], axis=0
)
- predicted_means**2
)
return unflatten(predicted_means), unflatten(predicted_vars)
def predict_ensemble(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
"""
Returns mean and variance at ``query_points`` for each member of the ensemble. First tensor
is the mean and second is the variance, where each has shape [..., M, N, 1], where M is
the ``ensemble_size``.
This method assumes that the final layer in each member of the ensemble is
probabilistic, an instance of :class:`¬tfp.distributions.Distribution`, in particular
`mean` and `variance` methods should be available.
:param query_points: The points at which to make predictions.
:return: The predicted mean and variance of the observations at the specified
``query_points`` for each member of the ensemble.
"""
ensemble_distributions = self.ensemble_distributions(query_points)
predicted_means = tf.convert_to_tensor([dist.mean() for dist in ensemble_distributions])
predicted_vars = tf.convert_to_tensor([dist.variance() for dist in ensemble_distributions])
return predicted_means, predicted_vars
def sample(self, query_points: TensorType, num_samples: int) -> TensorType:
"""
Return ``num_samples`` samples at ``query_points``. We use the mixture approximation in
:meth:`predict` for ``query_points`` and sample ``num_samples`` times from a Gaussian
distribution given by the predicted mean and variance.
:param query_points: The points at which to sample, with shape [..., N, D].
:param num_samples: The number of samples at each point.
:return: The samples. For a predictive distribution with event shape E, this has shape
[..., S, N] + E, where S is the number of samples.
"""
predicted_means, predicted_vars = self.predict(query_points)
normal = tfp.distributions.Normal(predicted_means, tf.sqrt(predicted_vars))
samples = normal.sample(num_samples)
return samples # [num_samples, len(query_points), 1]
def sample_ensemble(self, query_points: TensorType, num_samples: int) -> TensorType:
"""
Return ``num_samples`` samples at ``query_points``. Each sample is taken from a Gaussian
distribution given by the predicted mean and variance of a randomly chosen network in the
ensemble. This avoids using the Gaussian mixture approximation and samples directly from
individual Gaussian distributions given by each network in the ensemble.
:param query_points: The points at which to sample, with shape [..., N, D].
:param num_samples: The number of samples at each point.
:return: The samples. For a predictive distribution with event shape E, this has shape
[..., S, N] + E, where S is the number of samples.
"""
ensemble_distributions = self.ensemble_distributions(query_points)
network_indices = sample_model_index(self.ensemble_size, num_samples)
stacked_samples = []
for i in range(num_samples):
stacked_samples.append(ensemble_distributions[network_indices[i]].sample())
samples = tf.stack(stacked_samples, axis=0)
return samples # [num_samples, len(query_points), 1]
def trajectory_sampler(self) -> TrajectorySampler[DeepEnsemble]:
"""
Return a trajectory sampler. For :class:`DeepEnsemble`, we use an ensemble
sampler that randomly picks a network from the ensemble and uses its predicted means
for generating a trajectory, or optionally randomly sampled quantiles rather than means.
:return: The trajectory sampler.
"""
return DeepEnsembleTrajectorySampler(self, self._diversify)
def update(self, dataset: Dataset) -> None:
"""
Neural networks are parametric models and do not need to update data.
`TrainableProbabilisticModel` interface, however, requires an update method, so
here we simply pass the execution.
"""
return
def optimize(self, dataset: Dataset) -> None:
"""
Optimize the underlying Keras ensemble model with the specified ``dataset``.
Optimization is performed by using the Keras `fit` method, rather than applying the
optimizer and using the batches supplied with the optimizer wrapper. User can pass
arguments to the `fit` method through ``minimize_args`` argument in the optimizer wrapper.
These default to using 100 epochs, batch size 100, and verbose 0. See
https://keras.io/api/models/model_training_apis/#fit-method for a list of possible
arguments.
Note that optimization does not return the result, instead optimization results are
stored in a history attribute of the model object.
:param dataset: The data with which to optimize the model.
"""
fit_args = dict(self.optimizer.fit_args)
# Tell optimizer how many epochs have been used before: the optimizer will "continue"
# optimization across multiple BO iterations rather than start fresh at each iteration.
# This allows us to monitor training across iterations.
if "epochs" in fit_args:
fit_args["epochs"] = fit_args["epochs"] + self._absolute_epochs
x, y = self.prepare_dataset(dataset)
history = self.model.fit(
x=x,
y=y,
**fit_args,
initial_epoch=self._absolute_epochs,
)
if self._continuous_optimisation:
self._absolute_epochs = self._absolute_epochs + len(history.history["loss"])
# Reset lr in case there was an lr schedule: a schedule will have changed the learning
# rate, so that the next time we call `optimize` the starting learning rate would be
# different. Therefore, we make sure the learning rate is set back to its initial value.
# However, this is not needed for `LearningRateSchedule` instances.
if not isinstance(
self.optimizer.optimizer.lr, tf.keras.optimizers.schedules.LearningRateSchedule
):
self.optimizer.optimizer.lr.assign(self.original_lr)
def log(self, dataset: Optional[Dataset] = None) -> None:
"""
Log model training information at a given optimization step to the Tensorboard.
We log several summary statistics of losses and metrics given in ``fit_args`` to
``optimizer`` (final, difference between inital and final loss, min and max). We also log
epoch statistics, but as histograms, rather than time series. We also log several training
data based metrics, such as root mean square error between predictions and observations,
and several others.
We do not log statistics of individual models in the ensemble unless specifically switched
on with ``trieste.logging.set_summary_filter(lambda name: True)``.
For custom logs user will need to subclass the model and overwrite this method.
:param dataset: Optional data that can be used to log additional data-based model summaries.
"""
summary_writer = logging.get_tensorboard_writer()
if summary_writer:
with summary_writer.as_default(step=logging.get_step_number()):
logging.scalar("epochs/num_epochs", len(self.model.history.epoch))
for k, v in self.model.history.history.items():
KEY_SPLITTER = {
# map history keys to prefix and suffix
"loss": ("loss", ""),
r"(?P<model>model_\d+)_output_loss": ("loss", r"_\g<model>"),
r"(?P<model>model_\d+)_output_(?P<metric>.+)": (
r"\g<metric>",
r"_\g<model>",
),
}
for pattern, (pre_sub, post_sub) in KEY_SPLITTER.items():
if re.match(pattern, k):
pre = re.sub(pattern, pre_sub, k)
post = re.sub(pattern, post_sub, k)
break
else:
# unrecognised history key; ignore
continue
if "model" in post and not logging.include_summary("_ensemble"):
break
else:
if "model" in post:
pre = pre + "/_ensemble"
logging.histogram(f"{pre}/epoch{post}", lambda: v)
logging.scalar(f"{pre}/final{post}", lambda: v[-1])
logging.scalar(f"{pre}/diff{post}", lambda: v[0] - v[-1])
logging.scalar(f"{pre}/min{post}", lambda: tf.reduce_min(v))
logging.scalar(f"{pre}/max{post}", lambda: tf.reduce_max(v))
if dataset:
write_summary_data_based_metrics(
dataset=dataset, model=self, prefix="training_"
)
if logging.include_summary("_ensemble"):
predict_ensemble_variance = self.predict_ensemble(dataset.query_points)[1]
for i in range(predict_ensemble_variance.shape[0]):
logging.histogram(
f"variance/_ensemble/predict_variance_model_{i}",
predict_ensemble_variance[i, ...],
)
logging.scalar(
f"variance/_ensemble/predict_variance_mean_model_{i}",
tf.reduce_mean(predict_ensemble_variance[i, ...]),
)
def __getstate__(self) -> dict[str, Any]:
# use to_json and get_weights to save any optimizer fit_arg callback models
state = self.__dict__.copy()
if self._optimizer:
callbacks: list[Callback] = self._optimizer.fit_args.get("callbacks", [])
saved_models: list[KerasOptimizer] = []
tensorboard_writers: list[dict[str, Any]] = []
try:
for callback in callbacks:
# serialize the callback models before pickling the optimizer
saved_models.append(callback.model)
if callback.model is self.model:
# no need to serialize the main model, just use a special value instead
callback.model = ...
elif callback.model:
callback.model = (callback.model.to_json(), callback.model.get_weights())
# don't pickle tensorboard writers either; they'll be recreated when needed
if isinstance(callback, tf.keras.callbacks.TensorBoard):
tensorboard_writers.append(callback._writers)
callback._writers = {}
state["_optimizer"] = dill.dumps(state["_optimizer"])
except Exception as e:
raise NotImplementedError(
"Failed to copy DeepEnsemble optimizer due to unsupported callbacks."
) from e
finally:
# revert original state, even if the pickling failed
for callback, model in zip(callbacks, saved_models):
callback.model = model
for callback, writers in zip(
(cb for cb in callbacks if isinstance(cb, tf.keras.callbacks.TensorBoard)),
tensorboard_writers,
):
callback._writers = writers
return state
def __setstate__(self, state: dict[str, Any]) -> None:
# Restore optimizer and callback models after depickling, and recompile.
self.__dict__.update(state)
# Unpickle the optimizer, and restore all the callback models
self._optimizer = dill.loads(self._optimizer)
for callback in self._optimizer.fit_args.get("callbacks", []):
if callback.model is ...:
callback.set_model(self.model)
elif callback.model:
model_json, weights = callback.model
model = tf.keras.models.model_from_json(
model_json,
custom_objects={"MultivariateNormalTriL": MultivariateNormalTriL},
)
model.set_weights(weights)
callback.set_model(model)
# Recompile the model
self.model.compile(
self.optimizer.optimizer,
loss=[self.optimizer.loss] * self._model.ensemble_size,
metrics=[self.optimizer.metrics] * self._model.ensemble_size,
)
| 25,439 | 47.923077 | 100 | py |
trieste-develop | trieste-develop/trieste/models/keras/__init__.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This package contains the primary interface for deep neural network models. It also contains a
number of :class:`TrainableProbabilisticModel` wrappers for neural network models. Note that
currently copying/saving models is not supported, so when
:class:`~trieste.bayesian_optimizer.BayesianOptimizer` is used ``track_state`` should be set
to `False`.
"""
from .architectures import GaussianNetwork, KerasEnsemble, KerasEnsembleNetwork
from .builders import build_keras_ensemble
from .interface import DeepEnsembleModel, KerasPredictor
from .models import DeepEnsemble
from .sampler import DeepEnsembleTrajectorySampler, deep_ensemble_trajectory
from .utils import get_tensor_spec_from_data, negative_log_likelihood, sample_with_replacement
| 1,334 | 45.034483 | 94 | py |
trieste-develop | trieste-develop/trieste/models/keras/interface.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Optional
import tensorflow as tf
import tensorflow_probability as tfp
from typing_extensions import Protocol, runtime_checkable
from ...types import TensorType
from ..interfaces import ProbabilisticModel
from ..optimizer import KerasOptimizer
class KerasPredictor(ProbabilisticModel, ABC):
"""
This is an interface for trainable wrappers of TensorFlow and Keras neural network models.
"""
def __init__(self, optimizer: Optional[KerasOptimizer] = None):
"""
:param optimizer: The optimizer wrapper containing the optimizer with which to train the
model and arguments for the wrapper and the optimizer. The optimizer must
be an instance of a :class:`~tf.optimizers.Optimizer`. Defaults to
:class:`~tf.optimizers.Adam` optimizer with default parameters.
:raise ValueError: If the optimizer is not an instance of :class:`~tf.optimizers.Optimizer`.
"""
if optimizer is None:
optimizer = KerasOptimizer(tf.optimizers.Adam())
self._optimizer = optimizer
if not isinstance(optimizer.optimizer, tf.optimizers.Optimizer):
raise ValueError(
f"Optimizer for `KerasPredictor` models must be an instance of a "
f"`tf.optimizers.Optimizer`, received {type(optimizer.optimizer)} instead."
)
@property
@abstractmethod
def model(self) -> tf.keras.Model:
"""The compiled Keras model."""
raise NotImplementedError
@property
def optimizer(self) -> KerasOptimizer:
"""The optimizer wrapper for training the model."""
return self._optimizer
def predict(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
return self.model.predict(query_points)
def sample(self, query_points: TensorType, num_samples: int) -> TensorType:
raise NotImplementedError(
"""
KerasPredictor does not implement sampling. Acquisition
functions relying on it cannot be used with this class by default. Certain
types of neural networks might be able to generate samples and
such subclasses should overwrite this method.
"""
)
@runtime_checkable
class DeepEnsembleModel(ProbabilisticModel, Protocol):
"""
This is an interface for deep ensemble type of model, primarily for usage by trajectory
samplers, to avoid circular imports. These models can act as probabilistic models
by deriving estimates of epistemic uncertainty from the diversity of predictions made by
individual models in the ensemble.
"""
@property
@abstractmethod
def ensemble_size(self) -> int:
"""
Returns the size of the ensemble, that is, the number of base learners or individual
models in the ensemble.
"""
raise NotImplementedError
@property
@abstractmethod
def num_outputs(self) -> int:
"""
Returns the number of outputs trained on by each member network.
"""
raise NotImplementedError
@abstractmethod
def ensemble_distributions(
self, query_points: TensorType
) -> tuple[tfp.distributions.Distribution, ...]:
"""
Return distributions for each member of the ensemble. Type of the output will depend on the
subclass, it might be a predicted value or a distribution.
:param query_points: The points at which to return outputs.
:return: The outputs for the observations at the specified ``query_points`` for each member
of the ensemble.
"""
raise NotImplementedError
| 4,335 | 36.37931 | 100 | py |
trieste-develop | trieste-develop/tests/unit/models/gpflux/test_interface.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import gpflow
import numpy.testing as npt
import pytest
import tensorflow as tf
from gpflow.conditionals.util import sample_mvn
from gpflux.helpers import construct_basic_inducing_variables, construct_basic_kernel
from gpflux.layers import GPLayer
from gpflux.models import DeepGP
from tests.util.misc import random_seed
from trieste.data import Dataset
from trieste.models.gpflux import GPfluxPredictor
from trieste.types import TensorType
class _QuadraticPredictor(GPfluxPredictor):
def __init__(
self,
optimizer: tf.optimizers.Optimizer | None = None,
likelihood: gpflow.likelihoods.Likelihood = gpflow.likelihoods.Gaussian(0.01),
):
super().__init__(optimizer=optimizer)
if optimizer is None:
self._optimizer = tf.optimizers.Adam()
else:
self._optimizer = optimizer
self._model_gpflux = _QuadraticGPModel(likelihood=likelihood)
self._model_keras = self._model_gpflux.as_training_model()
@property
def model_gpflux(self) -> DeepGP:
return self._model_gpflux
@property
def model_keras(self) -> tf.keras.Model:
return self._model_keras
@property
def optimizer(self) -> tf.keras.optimizers.Optimizer:
return self._optimizer
def sample(self, query_points: TensorType, num_samples: int) -> TensorType:
# Taken from GPflow implementation of `GPModel.predict_f_samples` in gpflow.models.model
mean, cov = self._model_gpflux.predict_f(query_points, full_cov=True)
mean_for_sample = tf.linalg.adjoint(mean)
samples = sample_mvn(mean_for_sample, cov, True, num_samples=num_samples)
samples = tf.linalg.adjoint(samples)
return samples
def update(self, dataset: Dataset) -> None:
return
class _QuadraticGPModel(DeepGP):
def __init__(
self, likelihood: gpflow.likelihoods.Likelihood = gpflow.likelihoods.Gaussian(0.01)
) -> None:
kernel = construct_basic_kernel(
gpflow.kernels.SquaredExponential(), output_dim=1, share_hyperparams=True
)
inducing_var = construct_basic_inducing_variables(
num_inducing=5,
input_dim=1,
share_variables=True,
z_init=tf.random.normal([5, 1], dtype=gpflow.default_float()),
)
gp_layer = GPLayer(kernel, inducing_var, 10)
super().__init__(
[gp_layer], # not actually used
likelihood,
)
def predict_f(
self, Xnew: tf.Tensor, full_cov: bool = False, full_output_cov: bool = False
) -> tuple[tf.Tensor, tf.Tensor]:
assert not full_output_cov, "Test utility not implemented for full output covariance"
mean = tf.reduce_sum(Xnew**2, axis=1, keepdims=True)
*leading, x_samples, y_dims = mean.shape
var_shape = [*leading, y_dims, x_samples, x_samples] if full_cov else mean.shape
return mean, tf.ones(var_shape, dtype=mean.dtype)
def test_gpflux_predictor_predict() -> None:
model = _QuadraticPredictor()
mean, variance = model.predict(tf.constant([[2.5]], gpflow.default_float()))
assert mean.shape == [1, 1]
assert variance.shape == [1, 1]
npt.assert_allclose(mean, [[6.25]], rtol=0.01)
npt.assert_allclose(variance, [[1.0]], rtol=0.01)
@random_seed
def test_gpflux_predictor_sample() -> None:
model = _QuadraticPredictor()
num_samples = 20_000
samples = model.sample(tf.constant([[2.5]], gpflow.default_float()), num_samples)
assert samples.shape == [num_samples, 1, 1]
sample_mean = tf.reduce_mean(samples, axis=0)
sample_variance = tf.reduce_mean((samples - sample_mean) ** 2)
linear_error = 1 / tf.sqrt(tf.cast(num_samples, tf.float32))
npt.assert_allclose(sample_mean, [[6.25]], rtol=linear_error)
npt.assert_allclose(sample_variance, 1.0, rtol=2 * linear_error)
def test_gpflux_predictor_sample_0_samples() -> None:
samples = _QuadraticPredictor().sample(tf.constant([[50.0]], gpflow.default_float()), 0)
assert samples.shape == (0, 1, 1)
def test_gpflux_predictor_get_observation_noise() -> None:
noise_var = 0.1
likelihood = gpflow.likelihoods.Gaussian(noise_var)
model = _QuadraticPredictor(likelihood=likelihood)
npt.assert_allclose(model.get_observation_noise(), noise_var)
def test_gpflux_predictor_get_observation_noise_raises_for_non_gaussian_likelihood() -> None:
likelihood = gpflow.likelihoods.StudentT()
model = _QuadraticPredictor(likelihood=likelihood)
with pytest.raises(NotImplementedError):
model.get_observation_noise()
| 5,238 | 34.639456 | 96 | py |
trieste-develop | trieste-develop/tests/unit/models/gpflux/test_models.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
In this module, we test the *behaviour* of Trieste models against reference GPflux models (thus
implicitly assuming the latter are correct).
*NOTE:* Where GPflux models are used as the underlying model in an Trieste model, we should
*not* test that the underlying model is used in any particular way. To do so would break
encapsulation. For example, we should *not* test that methods on the GPflux models are called
(except in the rare case that such behaviour is an explicitly documented behaviour of the
Trieste model).
"""
from __future__ import annotations
import copy
import operator
import tempfile
import unittest.mock
from functools import partial
from typing import Callable
import gpflow
import gpflux.encoders
import numpy as np
import numpy.testing as npt
import pytest
import tensorflow as tf
from gpflux.models import DeepGP
from gpflux.models.deep_gp import sample_dgp
from tensorflow.python.keras.callbacks import Callback
from tests.util.misc import random_seed
from tests.util.models.gpflux.models import single_layer_dgp_model
from tests.util.models.keras.models import keras_optimizer_weights
from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10
from trieste.data import Dataset
from trieste.logging import step_number, tensorboard_writer
from trieste.models.gpflux import DeepGaussianProcess
from trieste.models.interfaces import HasTrajectorySampler
from trieste.models.optimizer import KerasOptimizer
from trieste.types import TensorType
def test_deep_gaussian_process_raises_for_non_tf_optimizer(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
dgp = two_layer_model(x)
optimizer = KerasOptimizer(gpflow.optimizers.Scipy())
with pytest.raises(ValueError):
DeepGaussianProcess(dgp, optimizer)
def test_deep_gaussian_process_raises_for_keras_layer() -> None:
keras_layer_1 = tf.keras.layers.Dense(50, activation="relu")
keras_layer_2 = tf.keras.layers.Dense(2, activation="relu")
kernel = gpflow.kernels.SquaredExponential()
num_inducing = 5
inducing_variable = gpflow.inducing_variables.InducingPoints(
np.concatenate(
[
np.random.randn(num_inducing, 2),
],
axis=1,
)
)
gp_layer = gpflux.layers.GPLayer(
kernel,
inducing_variable,
num_data=5,
num_latent_gps=1,
mean_function=gpflow.mean_functions.Zero(),
)
likelihood_layer = gpflux.layers.LikelihoodLayer(gpflow.likelihoods.Gaussian(0.01))
dgp = DeepGP([keras_layer_1, keras_layer_2, gp_layer], likelihood_layer)
with pytest.raises(ValueError):
DeepGaussianProcess(dgp)
def test_deep_gaussian_process_model_attribute(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
dgp = two_layer_model(x)
model = DeepGaussianProcess(dgp)
assert model.model_gpflux is dgp
def test_deep_gaussian_process_update(two_layer_model: Callable[[TensorType], DeepGP]) -> None:
x = tf.zeros([1, 4], dtype=tf.float64)
dgp = two_layer_model(x)
model = DeepGaussianProcess(dgp)
assert model.model_gpflux.num_data == 1
for layer in model.model_gpflux.f_layers:
assert layer.num_data == 1
model.update(Dataset(tf.zeros([5, 4]), tf.zeros([5, 1])))
assert model.model_gpflux.num_data == 5
for layer in model.model_gpflux.f_layers:
assert layer.num_data == 5
@pytest.mark.parametrize(
"new_data",
[Dataset(tf.zeros([3, 5]), tf.zeros([3, 1])), Dataset(tf.zeros([3, 4]), tf.zeros([3, 2]))],
)
def test_deep_gaussian_process_update_raises_for_invalid_shapes(
two_layer_model: Callable[[TensorType], DeepGP], new_data: Dataset
) -> None:
x = tf.zeros([1, 4], dtype=tf.float64)
dgp = two_layer_model(x)
model = DeepGaussianProcess(dgp)
with pytest.raises(ValueError):
model.update(new_data)
def test_deep_gaussian_process_optimize_with_defaults(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
x_observed = np.linspace(0, 100, 100).reshape((-1, 1))
y_observed = fnc_2sin_x_over_3(x_observed)
data = x_observed, y_observed
dataset = Dataset(*data)
model = DeepGaussianProcess(two_layer_model(x_observed))
elbo = model.model_gpflux.elbo(data)
model.optimize(dataset)
assert model.model_gpflux.elbo(data) > elbo
@pytest.mark.parametrize("batch_size", [10, 100])
def test_deep_gaussian_process_optimize(
two_layer_model: Callable[[TensorType], DeepGP], batch_size: int
) -> None:
x_observed = np.linspace(0, 100, 100).reshape((-1, 1))
y_observed = fnc_2sin_x_over_3(x_observed)
data = x_observed, y_observed
dataset = Dataset(*data)
fit_args = {"batch_size": batch_size, "epochs": 10, "verbose": 0}
optimizer = KerasOptimizer(tf.optimizers.Adam(), fit_args)
model = DeepGaussianProcess(two_layer_model(x_observed), optimizer)
elbo = model.model_gpflux.elbo(data)
model.optimize(dataset)
assert model.model_gpflux.elbo(data) > elbo
def test_deep_gaussian_process_loss(two_layer_model: Callable[[TensorType], DeepGP]) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
y = fnc_3x_plus_10(x)
reference_model = two_layer_model(x)
model = DeepGaussianProcess(two_layer_model(x))
internal_model = model.model_gpflux
npt.assert_allclose(internal_model.elbo((x, y)), reference_model.elbo((x, y)), rtol=1e-6)
def test_deep_gaussian_process_predict() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
reference_model = single_layer_dgp_model(x)
model = DeepGaussianProcess(single_layer_dgp_model(x))
test_x = tf.constant([[2.5]], dtype=gpflow.default_float())
ref_mean, ref_var = reference_model.predict_f(test_x)
f_mean, f_var = model.predict(test_x)
npt.assert_allclose(f_mean, ref_mean)
npt.assert_allclose(f_var, ref_var)
def test_deep_gaussian_process_predict_broadcasts() -> None:
x = tf.constant(np.arange(6).reshape(3, 2), dtype=gpflow.default_float())
reference_model = single_layer_dgp_model(x)
model = DeepGaussianProcess(single_layer_dgp_model(x))
test_x = tf.constant(np.arange(12).reshape(1, 2, 3, 2), dtype=gpflow.default_float())
ref_mean, ref_var = reference_model.predict_f(test_x)
f_mean, f_var = model.predict(test_x)
assert f_mean.shape == (1, 2, 3, 1)
assert f_var.shape == (1, 2, 3, 1)
npt.assert_allclose(f_mean, ref_mean)
npt.assert_allclose(f_var, ref_var)
@random_seed
def test_deep_gaussian_process_sample(two_layer_model: Callable[[TensorType], DeepGP]) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model = DeepGaussianProcess(two_layer_model(x))
num_samples = 100
test_x = tf.constant([[2.5]], dtype=gpflow.default_float())
samples = model.sample(test_x, num_samples)
assert samples.shape == [num_samples, 1, 1]
sample_mean = tf.reduce_mean(samples, axis=0)
sample_variance = tf.reduce_mean((samples - sample_mean) ** 2)
reference_model = two_layer_model(x)
def get_samples(query_points: TensorType, num_samples: int) -> TensorType:
samples = []
for _ in range(num_samples):
samples.append(sample_dgp(reference_model)(query_points))
return tf.stack(samples)
ref_samples = get_samples(test_x, num_samples)
ref_mean = tf.reduce_mean(ref_samples, axis=0)
ref_variance = tf.reduce_mean((ref_samples - ref_mean) ** 2)
error = 1 / tf.sqrt(tf.cast(num_samples, tf.float32))
npt.assert_allclose(sample_mean, ref_mean, atol=2 * error)
npt.assert_allclose(sample_mean, 0, atol=error)
npt.assert_allclose(sample_variance, ref_variance, atol=4 * error)
def test_deep_gaussian_process_resets_lr_with_lr_schedule(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
y = fnc_3x_plus_10(x)
epochs = 2
init_lr = 0.01
def scheduler(epoch: int, lr: float) -> float:
if epoch == epoch // 2:
return lr * 0.1
else:
return lr
fit_args = {
"epochs": epochs,
"batch_size": 100,
"verbose": 0,
"callbacks": tf.keras.callbacks.LearningRateScheduler(scheduler),
}
optimizer = KerasOptimizer(tf.optimizers.Adam(init_lr), fit_args)
model = DeepGaussianProcess(two_layer_model(x), optimizer)
npt.assert_allclose(model.model_keras.optimizer.lr.numpy(), init_lr, rtol=1e-6)
model.optimize(Dataset(x, y))
npt.assert_allclose(model.model_keras.optimizer.lr.numpy(), init_lr, rtol=1e-6)
def test_deep_gaussian_process_with_lr_scheduler(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
y = fnc_3x_plus_10(x)
epochs = 2
init_lr = 1.0
fit_args = {
"epochs": epochs,
"batch_size": 20,
"verbose": 0,
}
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=init_lr, decay_steps=1, decay_rate=0.5
)
optimizer = KerasOptimizer(tf.optimizers.Adam(lr_schedule), fit_args)
model = DeepGaussianProcess(two_layer_model(x), optimizer)
model.optimize(Dataset(x, y))
assert len(model.model_keras.history.history["loss"]) == epochs
def test_deep_gaussian_process_default_optimizer_is_correct(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model = DeepGaussianProcess(two_layer_model(x))
model_fit_args = dict(model.optimizer.fit_args)
model_fit_args.pop("callbacks")
fit_args = {
"verbose": 0,
"epochs": 400,
"batch_size": 1000,
}
assert isinstance(model.optimizer, KerasOptimizer)
assert isinstance(model.optimizer.optimizer, tf.optimizers.Optimizer)
assert model_fit_args == fit_args
def test_deep_gaussian_process_subclass_default_optimizer_is_correct(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
class DummySubClass(DeepGaussianProcess):
"""Dummy subclass"""
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model = DummySubClass(two_layer_model(x))
model_fit_args = dict(model.optimizer.fit_args)
model_fit_args.pop("callbacks")
fit_args = {
"verbose": 0,
"epochs": 400,
"batch_size": 1000,
}
assert isinstance(model.optimizer, KerasOptimizer)
assert isinstance(model.optimizer.optimizer, tf.optimizers.Optimizer)
assert model_fit_args == fit_args
def test_deepgp_deep_copyable() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model = DeepGaussianProcess(partial(single_layer_dgp_model, x))
model_copy = copy.deepcopy(model)
test_x = tf.constant([[2.5]], dtype=gpflow.default_float())
assert model.model_gpflux.inputs.dtype == model_copy.model_gpflux.inputs.dtype
assert model.model_gpflux.targets.dtype == model_copy.model_gpflux.targets.dtype
mean_f, variance_f = model.predict(test_x)
mean_f_copy, variance_f_copy = model_copy.predict(test_x)
npt.assert_allclose(mean_f, mean_f_copy)
npt.assert_allclose(variance_f, variance_f_copy)
# check that updating the original doesn't break or change the deepcopy
dataset = Dataset(x, fnc_3x_plus_10(x))
model.update(dataset)
model.optimize(dataset)
mean_f_updated, variance_f_updated = model.predict(test_x)
mean_f_copy_updated, variance_f_copy_updated = model_copy.predict(test_x)
npt.assert_allclose(mean_f_copy_updated, mean_f_copy)
npt.assert_allclose(variance_f_copy_updated, variance_f_copy)
npt.assert_array_compare(operator.__ne__, mean_f_updated, mean_f)
npt.assert_array_compare(operator.__ne__, variance_f_updated, variance_f)
# # check that we can also update the copy
dataset2 = Dataset(x, fnc_2sin_x_over_3(x))
model_copy.update(dataset2)
model_copy.optimize(dataset2)
mean_f_updated_2, variance_f_updated_2 = model.predict(test_x)
mean_f_copy_updated_2, variance_f_copy_updated_2 = model_copy.predict(test_x)
npt.assert_allclose(mean_f_updated_2, mean_f_updated)
npt.assert_allclose(variance_f_updated_2, variance_f_updated)
npt.assert_array_compare(operator.__ne__, mean_f_copy_updated_2, mean_f_copy_updated)
npt.assert_array_compare(operator.__ne__, variance_f_copy_updated_2, variance_f_copy_updated)
def test_deepgp_tf_saved_model() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model = DeepGaussianProcess(partial(single_layer_dgp_model, x))
with tempfile.TemporaryDirectory() as path:
# create a trajectory sampler (used for sample method)
assert isinstance(model, HasTrajectorySampler)
trajectory_sampler = model.trajectory_sampler()
trajectory = trajectory_sampler.get_trajectory()
# generate client model with predict and sample methods
module = model.get_module_with_variables(trajectory_sampler, trajectory)
module.predict = tf.function(
model.predict, input_signature=[tf.TensorSpec(shape=[None, 1], dtype=tf.float64)]
)
def _sample(query_points: TensorType, num_samples: int) -> TensorType:
trajectory_updated = trajectory_sampler.resample_trajectory(trajectory)
expanded_query_points = tf.expand_dims(query_points, -2) # [N, 1, D]
tiled_query_points = tf.tile(expanded_query_points, [1, num_samples, 1]) # [N, S, D]
return tf.transpose(trajectory_updated(tiled_query_points), [1, 0, 2])[
:, :, :1
] # [S, N, L]
module.sample = tf.function(
_sample,
input_signature=[
tf.TensorSpec(shape=[None, 1], dtype=tf.float64), # query_points
tf.TensorSpec(shape=(), dtype=tf.int32), # num_samples
],
)
tf.saved_model.save(module, str(path))
client_model = tf.saved_model.load(str(path))
# test exported methods
test_x = tf.constant([[2.5]], dtype=gpflow.default_float())
mean_f, variance_f = model.predict(test_x)
mean_f_copy, variance_f_copy = client_model.predict(test_x)
npt.assert_allclose(mean_f, mean_f_copy)
npt.assert_allclose(variance_f, variance_f_copy)
client_model.sample(x, 10)
def test_deepgp_deep_copies_optimizer_state() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model = DeepGaussianProcess(partial(single_layer_dgp_model, x))
dataset = Dataset(x, fnc_3x_plus_10(x))
model.update(dataset)
assert not keras_optimizer_weights(model.optimizer.optimizer)
model.optimize(dataset)
assert keras_optimizer_weights(model.optimizer.optimizer)
npt.assert_allclose(model.optimizer.optimizer.iterations, 400)
assert model.optimizer.fit_args["callbacks"][0].model is model.model_keras
model_copy = copy.deepcopy(model)
assert model.optimizer.optimizer is not model_copy.optimizer.optimizer
npt.assert_allclose(model_copy.optimizer.optimizer.iterations, 400)
npt.assert_equal(
keras_optimizer_weights(model.optimizer.optimizer),
keras_optimizer_weights(model_copy.optimizer.optimizer),
)
assert model_copy.optimizer.fit_args["callbacks"][0].model is model_copy.model_keras
@pytest.mark.parametrize(
"callbacks",
[
[
tf.keras.callbacks.CSVLogger("csv"),
tf.keras.callbacks.EarlyStopping(monitor="loss", patience=100),
tf.keras.callbacks.History(),
tf.keras.callbacks.LambdaCallback(lambda epoch, lr: lr),
tf.keras.callbacks.LearningRateScheduler(lambda epoch, lr: lr),
tf.keras.callbacks.ProgbarLogger(),
tf.keras.callbacks.ReduceLROnPlateau(),
tf.keras.callbacks.RemoteMonitor(),
tf.keras.callbacks.TensorBoard(),
tf.keras.callbacks.TerminateOnNaN(),
],
pytest.param(
[
tf.keras.callbacks.experimental.BackupAndRestore("backup"),
tf.keras.callbacks.BaseLogger(),
tf.keras.callbacks.ModelCheckpoint("weights"),
],
marks=pytest.mark.skip(reason="callbacks currently causing optimize to fail"),
),
],
)
def test_deepgp_deep_copies_different_callback_types(callbacks: list[Callback]) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model = DeepGaussianProcess(partial(single_layer_dgp_model, x))
model.optimizer.fit_args["callbacks"] = callbacks
dataset = Dataset(x, fnc_3x_plus_10(x))
model.update(dataset)
model.optimize(dataset)
model_copy = copy.deepcopy(model)
assert model.optimizer is not model_copy.optimizer
assert tuple(type(callback) for callback in model.optimizer.fit_args["callbacks"]) == tuple(
type(callback) for callback in model_copy.optimizer.fit_args["callbacks"]
)
def test_deepgp_deep_copies_optimization_history() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model = DeepGaussianProcess(partial(single_layer_dgp_model, x))
dataset = Dataset(x, fnc_3x_plus_10(x))
model.update(dataset)
model.optimize(dataset)
assert model.model_keras.history.history
expected_history = model.model_keras.history.history
model_copy = copy.deepcopy(model)
assert model_copy.model_keras.history.history
history = model_copy.model_keras.history.history
assert history.keys() == expected_history.keys()
for k, v in expected_history.items():
assert history[k] == v
@unittest.mock.patch("trieste.logging.tf.summary.histogram")
@unittest.mock.patch("trieste.logging.tf.summary.scalar")
@pytest.mark.parametrize("use_dataset", [False, True])
def test_deepgp_log(
mocked_summary_scalar: unittest.mock.MagicMock,
mocked_summary_histogram: unittest.mock.MagicMock,
use_dataset: bool,
) -> None:
x_observed = np.linspace(0, 100, 100).reshape((-1, 1))
y_observed = fnc_2sin_x_over_3(x_observed)
dataset = Dataset(x_observed, y_observed)
model = DeepGaussianProcess(
single_layer_dgp_model(x_observed),
KerasOptimizer(tf.optimizers.Adam(), {"batch_size": 200, "epochs": 3, "verbose": 0}),
)
model.optimize(dataset)
mocked_summary_writer = unittest.mock.MagicMock()
with tensorboard_writer(mocked_summary_writer):
with step_number(42):
if use_dataset:
model.log(dataset)
else:
model.log(None)
assert len(mocked_summary_writer.method_calls) == 1
assert mocked_summary_writer.method_calls[0][0] == "as_default"
assert mocked_summary_writer.method_calls[0][-1]["step"] == 42
num_scalars = 10 # 3 write_summary_kernel_parameters, write_summary_likelihood_parameters + 7
num_histogram = 3 # 3
if use_dataset: # write_summary_data_based_metrics
num_scalars += 8
num_histogram += 6
assert mocked_summary_scalar.call_count == num_scalars
assert mocked_summary_histogram.call_count == num_histogram
| 20,131 | 35.67031 | 98 | py |
trieste-develop | trieste-develop/tests/unit/models/keras/test_architectures.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Tuple
import numpy as np
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from tests.util.misc import empty_dataset
from tests.util.models.keras.models import trieste_keras_ensemble_model
from trieste.models.keras import (
GaussianNetwork,
KerasEnsembleNetwork,
get_tensor_spec_from_data,
negative_log_likelihood,
)
_ENSEMBLE_SIZE = 3
@pytest.fixture(name="ensemble_size", params=[2, 5])
def _ensemble_size_fixture(request: Any) -> int:
return request.param
@pytest.fixture(name="independent_normal", params=[False, True])
def _independent_normal_fixture(request: Any) -> bool:
return request.param
@pytest.fixture(name="num_hidden_layers", params=[0, 1, 3])
def _num_hidden_layers_fixture(request: Any) -> int:
return request.param
def test_keras_ensemble_repr(
ensemble_size: int,
independent_normal: bool,
) -> None:
example_data = empty_dataset([1], [1])
keras_ensemble = trieste_keras_ensemble_model(example_data, ensemble_size, independent_normal)
expected_repr = f"KerasEnsemble({keras_ensemble._networks!r})"
assert type(keras_ensemble).__name__ in repr(keras_ensemble)
assert repr(keras_ensemble) == expected_repr
def test_keras_ensemble_model_attributes() -> None:
example_data = empty_dataset([1], [1])
keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE)
assert isinstance(keras_ensemble.model, tf.keras.Model)
def test_keras_ensemble_ensemble_size_attributes(ensemble_size: int) -> None:
example_data = empty_dataset([1], [1])
keras_ensemble = trieste_keras_ensemble_model(example_data, ensemble_size)
assert keras_ensemble.ensemble_size == ensemble_size
@pytest.mark.parametrize(
"query_point_shape, observation_shape",
[
([1], [1]),
([5], [1]),
([5], [2]),
],
)
def test_keras_ensemble_build_ensemble_seems_correct(
ensemble_size: int,
independent_normal: bool,
query_point_shape: List[int],
observation_shape: List[int],
) -> None:
n_obs = 10
example_data = empty_dataset(query_point_shape, observation_shape)
query_points = tf.random.uniform([n_obs] + query_point_shape)
keras_ensemble = trieste_keras_ensemble_model(example_data, ensemble_size, independent_normal)
# basics
assert isinstance(keras_ensemble.model, tf.keras.Model)
assert keras_ensemble.model.built
# check ensemble size
assert len(keras_ensemble.model.inputs) == ensemble_size
assert len(keras_ensemble.model.input_names) == ensemble_size
assert len(keras_ensemble.model.output_names) == ensemble_size
# check input shape
for shape in keras_ensemble.model.input_shape:
assert shape[1:] == tf.TensorShape(query_point_shape)
# testing output shape is more complex as probabilistic layers don't have some properties
# we make some predictions instead and then check the output is correct
predictions = keras_ensemble.model.predict([query_points] * ensemble_size)
assert len(predictions) == ensemble_size
for pred in predictions:
assert pred.shape == tf.TensorShape([n_obs] + observation_shape)
# check input/output names
for ens in range(ensemble_size):
ins = ["model_" + str(ens) in i_name for i_name in keras_ensemble.model.input_names]
assert np.any(ins)
outs = ["model_" + str(ens) in o_name for o_name in keras_ensemble.model.output_names]
assert np.any(outs)
# check the model has not been compiled
assert keras_ensemble.model.compiled_loss is None
assert keras_ensemble.model.compiled_metrics is None
assert keras_ensemble.model.optimizer is None
# check correct number of layers
assert len(keras_ensemble.model.layers) == 2 * ensemble_size + 3 * ensemble_size
def test_keras_ensemble_can_be_compiled() -> None:
example_data = empty_dataset([1], [1])
keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE)
keras_ensemble.model.compile(tf.optimizers.Adam(), negative_log_likelihood)
assert keras_ensemble.model.compiled_loss is not None
assert keras_ensemble.model.compiled_metrics is not None
assert keras_ensemble.model.optimizer is not None
class _DummyKerasEnsembleNetwork(KerasEnsembleNetwork):
def connect_layers(self) -> Tuple[tf.Tensor, tf.Tensor]:
raise NotImplementedError
def test_keras_ensemble_network_raises_on_incorrect_tensor_spec() -> None:
with pytest.raises(ValueError):
_DummyKerasEnsembleNetwork(
[1],
tf.TensorSpec(shape=(1,), dtype=tf.float32),
tf.keras.losses.MeanSquaredError(),
)
with pytest.raises(ValueError):
_DummyKerasEnsembleNetwork(
tf.TensorSpec(shape=(1,), dtype=tf.float32),
[1],
tf.keras.losses.MeanSquaredError(),
)
def test_keras_ensemble_network_network_and_layer_name() -> None:
model = _DummyKerasEnsembleNetwork(
tf.TensorSpec(shape=(1,), dtype=tf.float32),
tf.TensorSpec(shape=(1,), dtype=tf.float32),
)
# check defaults
assert model.network_name == ""
assert model.input_layer_name == "input"
assert model.output_layer_name == "output"
# check that network name is changed
model.network_name = "model_"
assert model.network_name == "model_"
assert model.input_layer_name == "model_" + "input"
assert model.output_layer_name == "model_" + "output"
@pytest.mark.parametrize("n_dims", list(range(10)))
def test_keras_ensemble_network_flattened_output_shape(n_dims: int) -> None:
shape = np.random.randint(1, 10, (n_dims,))
tensor = np.random.randint(0, 1, shape)
tensor_spec = tf.TensorSpec(shape)
model = _DummyKerasEnsembleNetwork(
tensor_spec,
tensor_spec,
)
flattened_shape = model.flattened_output_shape
assert flattened_shape == np.size(tensor)
def test_gaussian_network_check_default_hidden_layer_args() -> None:
example_data = empty_dataset([1], [1])
input_tensor_spec, output_tensor_spec = get_tensor_spec_from_data(example_data)
network = GaussianNetwork(
input_tensor_spec,
output_tensor_spec,
)
default_args = ({"units": 50, "activation": "relu"}, {"units": 50, "activation": "relu"})
assert network._hidden_layer_args == default_args
@pytest.mark.parametrize(
"query_point_shape, observation_shape",
[
([1], [1]),
([5], [1]),
([5], [2]),
],
)
def test_gaussian_network_is_correctly_constructed(
query_point_shape: List[int], observation_shape: List[int], num_hidden_layers: int
) -> None:
n_obs = 10
example_data = empty_dataset(query_point_shape, observation_shape)
query_points = tf.random.uniform([n_obs] + query_point_shape)
input_tensor_spec, output_tensor_spec = get_tensor_spec_from_data(example_data)
hidden_layer_args = []
for i in range(num_hidden_layers):
hidden_layer_args.append({"units": 10, "activation": "relu"})
network = GaussianNetwork(
input_tensor_spec,
output_tensor_spec,
hidden_layer_args,
)
network_input, network_output = network.connect_layers()
network_built = tf.keras.Model(inputs=network_input, outputs=network_output)
# check input shape
assert network_input.shape[1:] == tf.TensorShape(query_point_shape)
# testing output shape is more complex as probabilistic layers don't have some properties
# we make some predictions instead and then check the output is correct
predictions = network_built.predict(query_points)
assert predictions.shape == tf.TensorShape([n_obs] + observation_shape)
# check layers
assert isinstance(network_built.layers[0], tf.keras.layers.InputLayer)
assert len(network_built.layers[1:-2]) == num_hidden_layers
assert isinstance(network_built.layers[-1], tfp.layers.DistributionLambda)
def test_multivariatenormaltril_layer_fails_to_serialilze() -> None:
# tfp.layers.MultivariateNormalTriL currently fails to serialize out of the box
# (with different errors in TF2.4 and TF2.5). When that's fixed we can remove our workaround.
layer = tfp.layers.MultivariateNormalTriL(1)
with pytest.raises(Exception):
serialized = tf.keras.utils.serialize_keras_object(layer)
tf.keras.utils.deserialize_keras_object(
serialized, custom_objects={"MultivariateNormalTriL": tfp.layers.MultivariateNormalTriL}
)
| 9,121 | 34.084615 | 100 | py |
trieste-develop | trieste-develop/tests/unit/models/keras/test_interface.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import gpflow
import pytest
import tensorflow as tf
from tests.util.misc import empty_dataset, raise_exc
from trieste.models.keras import KerasPredictor
from trieste.models.optimizer import KerasOptimizer
class _DummyKerasPredictor(KerasPredictor):
@property
def model(self) -> tf.keras.Model:
return raise_exc
def test_keras_predictor_repr_includes_class_name() -> None:
model = _DummyKerasPredictor()
assert type(model).__name__ in repr(model)
def test_keras_predictor_default_optimizer_is_correct() -> None:
model = _DummyKerasPredictor()
assert isinstance(model._optimizer, KerasOptimizer)
assert isinstance(model._optimizer.optimizer, tf.optimizers.Adam)
assert isinstance(model.optimizer, KerasOptimizer)
assert isinstance(model.optimizer.optimizer, tf.optimizers.Adam)
def test_keras_predictor_check_optimizer_property() -> None:
optimizer = KerasOptimizer(tf.optimizers.RMSprop())
model = _DummyKerasPredictor(optimizer)
assert model.optimizer == optimizer
def test_keras_predictor_raises_on_sample_call() -> None:
model = _DummyKerasPredictor()
with pytest.raises(NotImplementedError):
model.sample(empty_dataset([1], [1]).query_points, 1)
def test_keras_predictor_raises_for_non_tf_optimizer() -> None:
with pytest.raises(ValueError):
_DummyKerasPredictor(optimizer=KerasOptimizer(gpflow.optimizers.Scipy()))
| 2,045 | 30.96875 | 81 | py |
trieste-develop | trieste-develop/tests/unit/models/keras/test_builders.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from tests.util.misc import empty_dataset
from trieste.models.keras import build_keras_ensemble
@pytest.mark.parametrize("units, activation", [(10, "relu"), (50, tf.keras.activations.tanh)])
@pytest.mark.parametrize("ensemble_size", [2, 5])
@pytest.mark.parametrize("independent_normal", [False, True])
@pytest.mark.parametrize("num_hidden_layers", [0, 1, 3])
@pytest.mark.parametrize("num_outputs", [1, 3])
def test_build_keras_ensemble(
num_outputs: int,
ensemble_size: int,
num_hidden_layers: int,
units: int,
activation: Union[str, tf.keras.layers.Activation],
independent_normal: bool,
) -> None:
example_data = empty_dataset([num_outputs], [num_outputs])
keras_ensemble = build_keras_ensemble(
example_data,
ensemble_size,
num_hidden_layers,
units,
activation,
independent_normal,
)
assert keras_ensemble.ensemble_size == ensemble_size
assert len(keras_ensemble.model.layers) == num_hidden_layers * ensemble_size + 3 * ensemble_size
if num_outputs > 1:
if independent_normal:
assert isinstance(keras_ensemble.model.layers[-1], tfp.layers.IndependentNormal)
else:
assert isinstance(keras_ensemble.model.layers[-1], tfp.layers.MultivariateNormalTriL)
else:
assert isinstance(keras_ensemble.model.layers[-1], tfp.layers.DistributionLambda)
if num_hidden_layers > 0:
for layer in keras_ensemble.model.layers[ensemble_size : -ensemble_size * 2]:
assert layer.units == units
assert layer.activation == activation or layer.activation.__name__ == activation
| 2,334 | 37.278689 | 100 | py |
trieste-develop | trieste-develop/tests/unit/models/keras/test_models.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import copy
import operator
import tempfile
import unittest.mock
from typing import Any, Optional
import numpy as np
import numpy.testing as npt
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.python.keras.callbacks import Callback
from tests.util.misc import ShapeLike, empty_dataset, random_seed
from tests.util.models.keras.models import (
keras_optimizer_weights,
trieste_deep_ensemble_model,
trieste_keras_ensemble_model,
)
from trieste.data import Dataset
from trieste.logging import step_number, tensorboard_writer
from trieste.models.interfaces import HasTrajectorySampler
from trieste.models.keras import (
DeepEnsemble,
KerasEnsemble,
negative_log_likelihood,
sample_with_replacement,
)
from trieste.models.optimizer import KerasOptimizer, TrainingData
from trieste.types import TensorType
_ENSEMBLE_SIZE = 3
@pytest.fixture(name="ensemble_size", params=[2, 5])
def _ensemble_size_fixture(request: Any) -> int:
return request.param
@pytest.fixture(name="num_outputs", params=[1, 3])
def _num_outputs_fixture(request: Any) -> int:
return request.param
@pytest.fixture(name="dataset_size", params=[10, 100])
def _dataset_size_fixture(request: Any) -> int:
return request.param
@pytest.fixture(name="independent_normal", params=[False, True])
def _independent_normal_fixture(request: Any) -> bool:
return request.param
@pytest.fixture(name="bootstrap_data", params=[False, True])
def _bootstrap_data_fixture(request: Any) -> bool:
return request.param
def _get_example_data(
query_point_shape: ShapeLike, observation_shape: Optional[ShapeLike] = None
) -> Dataset:
qp = tf.random.uniform(tf.TensorShape(query_point_shape), dtype=tf.float64)
if observation_shape is None:
observation_shape = query_point_shape[:-1] + [1] # type: ignore
obs = tf.random.uniform(tf.TensorShape(observation_shape), dtype=tf.float64)
return Dataset(qp, obs)
def _ensemblise_data(
model: KerasEnsemble, data: Dataset, ensemble_size: int, bootstrap: bool = False
) -> TrainingData:
inputs = {}
outputs = {}
for index in range(ensemble_size):
if bootstrap:
resampled_data = sample_with_replacement(data)
else:
resampled_data = data
input_name = model.model.input_names[index]
output_name = model.model.output_names[index]
inputs[input_name], outputs[output_name] = resampled_data.astuple()
return inputs, outputs
@pytest.mark.parametrize("optimizer", [tf.optimizers.Adam(), tf.optimizers.RMSprop()])
@pytest.mark.parametrize("diversify", [False, True])
def test_deep_ensemble_repr(
optimizer: tf.optimizers.Optimizer, bootstrap_data: bool, diversify: bool
) -> None:
example_data = empty_dataset([1], [1])
keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE)
keras_ensemble.model.compile(optimizer, loss=negative_log_likelihood)
optimizer_wrapper = KerasOptimizer(optimizer, loss=negative_log_likelihood)
model = DeepEnsemble(keras_ensemble, optimizer_wrapper, bootstrap_data, diversify)
expected_repr = (
f"DeepEnsemble({keras_ensemble.model!r}, {optimizer_wrapper!r}, "
f"{bootstrap_data!r}, {diversify!r})"
)
assert type(model).__name__ in repr(model)
assert repr(model) == expected_repr
def test_deep_ensemble_model_attributes() -> None:
example_data = empty_dataset([1], [1])
model, keras_ensemble, optimizer = trieste_deep_ensemble_model(
example_data, _ENSEMBLE_SIZE, False, False
)
keras_ensemble.model.compile(optimizer=optimizer.optimizer, loss=optimizer.loss)
assert model.model is keras_ensemble.model
def test_deep_ensemble_ensemble_size_attributes(ensemble_size: int) -> None:
example_data = empty_dataset([1], [1])
model, _, _ = trieste_deep_ensemble_model(example_data, ensemble_size, False, False)
assert model.ensemble_size == ensemble_size
def test_deep_ensemble_raises_for_incorrect_ensemble_size() -> None:
with pytest.raises(ValueError):
trieste_deep_ensemble_model(empty_dataset([1], [1]), 1)
def test_deep_ensemble_default_optimizer_is_correct() -> None:
example_data = empty_dataset([1], [1])
keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE, False)
model = DeepEnsemble(keras_ensemble)
default_loss = negative_log_likelihood
default_fit_args = {
"verbose": 0,
"epochs": 3000,
"batch_size": 16,
}
del model.optimizer.fit_args["callbacks"]
assert isinstance(model.optimizer, KerasOptimizer)
assert isinstance(model.optimizer.optimizer, tf.optimizers.Optimizer)
assert model.optimizer.fit_args == default_fit_args
assert model.optimizer.loss == default_loss
def test_deep_ensemble_optimizer_changed_correctly() -> None:
example_data = empty_dataset([1], [1])
custom_fit_args = {
"verbose": 1,
"epochs": 10,
"batch_size": 10,
}
custom_optimizer = tf.optimizers.RMSprop()
custom_loss = tf.keras.losses.MeanSquaredError()
optimizer_wrapper = KerasOptimizer(custom_optimizer, custom_fit_args, custom_loss)
keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE)
model = DeepEnsemble(keras_ensemble, optimizer_wrapper)
assert model.optimizer == optimizer_wrapper
assert model.optimizer.optimizer == custom_optimizer
assert model.optimizer.fit_args == custom_fit_args
def test_deep_ensemble_is_compiled() -> None:
example_data = empty_dataset([1], [1])
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE)
assert model.model.compiled_loss is not None
assert model.model.compiled_metrics is not None
assert model.model.optimizer is not None
def test_deep_ensemble_resets_lr_with_lr_schedule() -> None:
example_data = _get_example_data([100, 1])
keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE)
epochs = 2
init_lr = 1.0
def scheduler(epoch: int, lr: float) -> float:
return lr * 0.5
fit_args = {
"epochs": epochs,
"batch_size": 100,
"verbose": 0,
"callbacks": tf.keras.callbacks.LearningRateScheduler(scheduler),
}
optimizer = KerasOptimizer(tf.optimizers.Adam(init_lr), fit_args)
model = DeepEnsemble(keras_ensemble, optimizer)
npt.assert_allclose(model.model.optimizer.lr.numpy(), init_lr, rtol=1e-6)
model.optimize(example_data)
npt.assert_allclose(model.model.history.history["lr"], [0.5, 0.25])
npt.assert_allclose(model.model.optimizer.lr.numpy(), init_lr, rtol=1e-6)
def test_deep_ensemble_with_lr_scheduler() -> None:
example_data = _get_example_data([100, 1])
keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE)
epochs = 2
init_lr = 1.0
fit_args = {
"epochs": epochs,
"batch_size": 20,
"verbose": 0,
}
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=init_lr, decay_steps=1, decay_rate=0.5
)
optimizer = KerasOptimizer(tf.optimizers.Adam(lr_schedule), fit_args)
model = DeepEnsemble(keras_ensemble, optimizer)
model.optimize(example_data)
assert len(model.model.history.history["loss"]) == epochs
def test_deep_ensemble_ensemble_distributions(ensemble_size: int, dataset_size: int) -> None:
example_data = _get_example_data([dataset_size, 1])
model, _, _ = trieste_deep_ensemble_model(example_data, ensemble_size, False, False)
distributions = model.ensemble_distributions(example_data.query_points)
assert len(distributions) == ensemble_size
for dist in distributions:
assert isinstance(dist, tfp.distributions.Distribution)
try:
predicted_means = dist.mean()
except Exception as exc:
assert False, f"calling 'mean' raised an exception {exc}"
try:
predicted_vars = dist.variance()
except Exception as exc:
assert False, f"calling 'variance' raised an exception {exc}"
assert tf.is_tensor(predicted_means)
assert tf.is_tensor(predicted_vars)
assert predicted_means.shape[-2:] == example_data.observations.shape
assert predicted_vars.shape[-2:] == example_data.observations.shape
def test_deep_ensemble_predict_broadcasts(
ensemble_size: int, dataset_size: int, num_outputs: int
) -> None:
# create a model that expects [dataset_size, num_outputs] spec
dummy_data = _get_example_data([dataset_size, num_outputs], [dataset_size, num_outputs])
model, _, _ = trieste_deep_ensemble_model(dummy_data, ensemble_size, False, False)
# check that it handles predictions with leading batch dimensions
query_data = _get_example_data(
[1, 2, dataset_size, num_outputs], [1, 2, dataset_size, num_outputs]
)
predicted_means, predicted_vars = model.predict(query_data.query_points)
assert tf.is_tensor(predicted_vars)
assert predicted_vars.shape == query_data.observations.shape
assert tf.is_tensor(predicted_means)
assert predicted_means.shape == query_data.observations.shape
def test_deep_ensemble_predict_omit_trailing_dim_one(ensemble_size: int, dataset_size: int) -> None:
dummy_data = _get_example_data([dataset_size, 1], [dataset_size, 1])
model, _, _ = trieste_deep_ensemble_model(dummy_data, ensemble_size, False, False)
# Functional has code to "allow (None,) and (None, 1) Tensors to be passed interchangeably"
qp = tf.random.uniform(tf.TensorShape([dataset_size]), dtype=tf.float64)
predicted_means, predicted_vars = model.predict(qp)
assert tf.is_tensor(predicted_vars)
assert predicted_vars.shape == dummy_data.observations.shape
assert tf.is_tensor(predicted_means)
assert predicted_means.shape == dummy_data.observations.shape
def test_deep_ensemble_predict_call_shape(
ensemble_size: int, dataset_size: int, num_outputs: int
) -> None:
example_data = _get_example_data([dataset_size, num_outputs], [dataset_size, num_outputs])
model, _, _ = trieste_deep_ensemble_model(example_data, ensemble_size, False, False)
predicted_means, predicted_vars = model.predict(example_data.query_points)
assert tf.is_tensor(predicted_vars)
assert predicted_vars.shape == example_data.observations.shape
assert tf.is_tensor(predicted_means)
assert predicted_means.shape == example_data.observations.shape
def test_deep_ensemble_predict_ensemble_call_shape(
ensemble_size: int, dataset_size: int, num_outputs: int
) -> None:
example_data = _get_example_data([dataset_size, num_outputs], [dataset_size, num_outputs])
model, _, _ = trieste_deep_ensemble_model(example_data, ensemble_size, False, False)
predicted_means, predicted_vars = model.predict_ensemble(example_data.query_points)
assert predicted_means.shape[-3] == ensemble_size
assert predicted_vars.shape[-3] == ensemble_size
assert tf.is_tensor(predicted_means)
assert tf.is_tensor(predicted_vars)
assert predicted_means.shape[-2:] == example_data.observations.shape
assert predicted_vars.shape[-2:] == example_data.observations.shape
@pytest.mark.parametrize("num_samples", [6, 12])
@pytest.mark.parametrize("dataset_size", [4, 8])
def test_deep_ensemble_sample_call_shape(
num_samples: int, dataset_size: int, num_outputs: int
) -> None:
example_data = _get_example_data([dataset_size, num_outputs], [dataset_size, num_outputs])
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE, False, False)
samples = model.sample(example_data.query_points, num_samples)
assert tf.is_tensor(samples)
assert samples.shape == [num_samples, dataset_size, num_outputs]
@pytest.mark.parametrize("num_samples", [6, 12])
@pytest.mark.parametrize("dataset_size", [4, 8])
def test_deep_ensemble_sample_ensemble_call_shape(
num_samples: int, dataset_size: int, num_outputs: int
) -> None:
example_data = _get_example_data([dataset_size, num_outputs], [dataset_size, num_outputs])
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE, False, False)
samples = model.sample_ensemble(example_data.query_points, num_samples)
assert tf.is_tensor(samples)
assert samples.shape == [num_samples, dataset_size, num_outputs]
@random_seed
def test_deep_ensemble_optimize_with_defaults() -> None:
example_data = _get_example_data([100, 1])
keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE, False)
model = DeepEnsemble(keras_ensemble)
model.optimize(example_data)
loss = model.model.history.history["loss"]
assert loss[-1] < loss[0]
@random_seed
@pytest.mark.parametrize("epochs", [5, 15])
def test_deep_ensemble_optimize(ensemble_size: int, bootstrap_data: bool, epochs: int) -> None:
example_data = _get_example_data([100, 1])
keras_ensemble = trieste_keras_ensemble_model(example_data, ensemble_size, False)
custom_optimizer = tf.optimizers.RMSprop()
custom_fit_args = {
"verbose": 0,
"epochs": epochs,
"batch_size": 10,
}
custom_loss = tf.keras.losses.MeanSquaredError()
optimizer_wrapper = KerasOptimizer(custom_optimizer, custom_fit_args, custom_loss)
model = DeepEnsemble(keras_ensemble, optimizer_wrapper, bootstrap_data)
model.optimize(example_data)
loss = model.model.history.history["loss"]
ensemble_losses = ["output_loss" in elt for elt in model.model.history.history.keys()]
assert loss[-1] < loss[0]
assert len(loss) == epochs
assert sum(ensemble_losses) == ensemble_size
@random_seed
def test_deep_ensemble_loss(bootstrap_data: bool) -> None:
example_data = _get_example_data([100, 1])
loss = negative_log_likelihood
optimizer = tf.optimizers.Adam()
model = DeepEnsemble(
trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE, False),
KerasOptimizer(optimizer, loss=loss),
bootstrap_data,
)
reference_model = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE, False)
reference_model.model.compile(optimizer=optimizer, loss=loss)
reference_model.model.set_weights(model.model.get_weights())
tranformed_x, tranformed_y = _ensemblise_data(
reference_model, example_data, _ENSEMBLE_SIZE, bootstrap_data
)
loss = model.model.evaluate(tranformed_x, tranformed_y)[: _ENSEMBLE_SIZE + 1]
reference_loss = reference_model.model.evaluate(tranformed_x, tranformed_y)
npt.assert_allclose(tf.constant(loss), reference_loss, rtol=1e-6)
@random_seed
def test_deep_ensemble_predict_ensemble() -> None:
example_data = _get_example_data([100, 1])
loss = negative_log_likelihood
optimizer = tf.optimizers.Adam()
model = DeepEnsemble(
trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE, False),
KerasOptimizer(optimizer, loss=loss),
)
reference_model = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE, False)
reference_model.model.compile(optimizer=optimizer, loss=loss)
reference_model.model.set_weights(model.model.get_weights())
predicted_means, predicted_vars = model.predict_ensemble(example_data.query_points)
tranformed_x, tranformed_y = _ensemblise_data(
reference_model, example_data, _ENSEMBLE_SIZE, False
)
ensemble_distributions = reference_model.model(tranformed_x)
reference_means = tf.convert_to_tensor([dist.mean() for dist in ensemble_distributions])
reference_vars = tf.convert_to_tensor([dist.variance() for dist in ensemble_distributions])
npt.assert_allclose(predicted_means, reference_means)
npt.assert_allclose(predicted_vars, reference_vars)
@random_seed
def test_deep_ensemble_sample() -> None:
example_data = _get_example_data([100, 1])
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE, False, False)
num_samples = 100_000
samples = model.sample(example_data.query_points, num_samples)
sample_mean = tf.reduce_mean(samples, axis=0)
sample_variance = tf.reduce_mean((samples - sample_mean) ** 2, axis=0)
ref_mean, ref_variance = model.predict(example_data.query_points)
error = 1 / tf.sqrt(tf.cast(num_samples, tf.float32))
npt.assert_allclose(sample_mean, ref_mean, atol=4 * error)
npt.assert_allclose(sample_variance, ref_variance, atol=8 * error)
@random_seed
def test_deep_ensemble_sample_ensemble(ensemble_size: int) -> None:
example_data = _get_example_data([20, 1])
model, _, _ = trieste_deep_ensemble_model(example_data, ensemble_size, False, False)
num_samples = 2000
samples = model.sample_ensemble(example_data.query_points, num_samples)
sample_mean = tf.reduce_mean(samples, axis=0)
ref_mean, _ = model.predict(example_data.query_points)
error = 1 / tf.sqrt(tf.cast(num_samples, tf.float32))
npt.assert_allclose(sample_mean, ref_mean, atol=2.5 * error)
@random_seed
def test_deep_ensemble_prepare_data_call(
ensemble_size: int,
bootstrap_data: bool,
) -> None:
n_rows = 100
x = tf.constant(np.arange(0, n_rows, 1), shape=[n_rows, 1])
y = tf.constant(np.arange(0, n_rows, 1), shape=[n_rows, 1])
example_data = Dataset(x, y)
model, _, _ = trieste_deep_ensemble_model(example_data, ensemble_size, bootstrap_data, False)
# call with whole dataset
data = model.prepare_dataset(example_data)
assert isinstance(data, tuple)
for ensemble_data in data:
assert isinstance(ensemble_data, dict)
assert len(ensemble_data.keys()) == ensemble_size
for member_data in ensemble_data:
if bootstrap_data:
assert tf.reduce_any(ensemble_data[member_data] != x)
else:
assert tf.reduce_all(ensemble_data[member_data] == x)
for inp, out in zip(data[0], data[1]):
assert "".join(filter(str.isdigit, inp)) == "".join(filter(str.isdigit, out))
# call with query points alone
inputs = model.prepare_query_points(example_data.query_points)
assert isinstance(inputs, dict)
assert len(inputs.keys()) == ensemble_size
for member_data in inputs:
assert tf.reduce_all(inputs[member_data] == x)
def test_deep_ensemble_deep_copyable() -> None:
example_data = _get_example_data([10, 3], [10, 3])
model, _, _ = trieste_deep_ensemble_model(example_data, 2, False, False)
model_copy = copy.deepcopy(model)
mean_f, variance_f = model.predict(example_data.query_points)
mean_f_copy, variance_f_copy = model_copy.predict(example_data.query_points)
npt.assert_allclose(mean_f, mean_f_copy)
npt.assert_allclose(variance_f, variance_f_copy)
# check that updating the original doesn't break or change the deepcopy
new_example_data = _get_example_data([20, 3], [20, 3])
model.update(new_example_data)
model.optimize(new_example_data)
mean_f_updated, variance_f_updated = model.predict(example_data.query_points)
mean_f_copy_updated, variance_f_copy_updated = model_copy.predict(example_data.query_points)
npt.assert_allclose(mean_f_copy_updated, mean_f_copy)
npt.assert_allclose(variance_f_copy_updated, variance_f_copy)
npt.assert_array_compare(operator.__ne__, mean_f_updated, mean_f)
npt.assert_array_compare(operator.__ne__, variance_f_updated, variance_f)
# check that we can also update the copy
newer_example_data = _get_example_data([30, 3], [30, 3])
model_copy.update(newer_example_data)
model_copy.optimize(newer_example_data)
mean_f_updated_2, variance_f_updated_2 = model.predict(example_data.query_points)
mean_f_copy_updated_2, variance_f_copy_updated_2 = model_copy.predict(example_data.query_points)
npt.assert_allclose(mean_f_updated_2, mean_f_updated)
npt.assert_allclose(variance_f_updated_2, variance_f_updated)
npt.assert_array_compare(operator.__ne__, mean_f_copy_updated_2, mean_f_copy_updated)
npt.assert_array_compare(operator.__ne__, variance_f_copy_updated_2, variance_f_copy_updated)
def test_deep_ensemble_tf_saved_model() -> None:
example_data = _get_example_data([10, 3], [10, 3])
model, _, _ = trieste_deep_ensemble_model(example_data, 2, False, False)
with tempfile.TemporaryDirectory() as path:
# create a trajectory sampler (used for sample method)
assert isinstance(model, HasTrajectorySampler)
trajectory_sampler = model.trajectory_sampler()
trajectory = trajectory_sampler.get_trajectory()
# generate client model with predict and sample methods
module = model.get_module_with_variables(trajectory_sampler, trajectory)
module.predict = tf.function(
model.predict, input_signature=[tf.TensorSpec(shape=[None, 3], dtype=tf.float64)]
)
def _sample(query_points: TensorType, num_samples: int) -> TensorType:
trajectory_updated = trajectory_sampler.resample_trajectory(trajectory)
expanded_query_points = tf.expand_dims(query_points, -2) # [N, 1, D]
tiled_query_points = tf.tile(expanded_query_points, [1, num_samples, 1]) # [N, S, D]
return tf.transpose(trajectory_updated(tiled_query_points), [1, 0, 2])[
:, :, :1
] # [S, N, L]
module.sample = tf.function(
_sample,
input_signature=[
tf.TensorSpec(shape=[None, 3], dtype=tf.float64), # query_points
tf.TensorSpec(shape=(), dtype=tf.int32), # num_samples
],
)
tf.saved_model.save(module, str(path))
client_model = tf.saved_model.load(str(path))
# test exported methods
mean_f, variance_f = model.predict(example_data.query_points)
mean_f_copy, variance_f_copy = client_model.predict(example_data.query_points)
npt.assert_allclose(mean_f, mean_f_copy)
npt.assert_allclose(variance_f, variance_f_copy)
client_model.sample(example_data.query_points, 10)
def test_deep_ensemble_deep_copies_optimizer_state() -> None:
example_data = _get_example_data([10, 3], [10, 3])
model, _, _ = trieste_deep_ensemble_model(example_data, 2, False, False)
new_example_data = _get_example_data([20, 3], [20, 3])
model.update(new_example_data)
assert not keras_optimizer_weights(model.model.optimizer)
model.optimize(new_example_data)
assert keras_optimizer_weights(model.model.optimizer)
model_copy = copy.deepcopy(model)
assert model.model.optimizer is not model_copy.model.optimizer
npt.assert_allclose(model_copy.model.optimizer.iterations, 1)
npt.assert_equal(
keras_optimizer_weights(model.model.optimizer),
keras_optimizer_weights(model_copy.model.optimizer),
)
@pytest.mark.parametrize(
"callbacks",
[
[
tf.keras.callbacks.CSVLogger("csv"),
tf.keras.callbacks.EarlyStopping(monitor="loss", patience=100),
tf.keras.callbacks.History(),
tf.keras.callbacks.LambdaCallback(lambda epoch, lr: lr),
tf.keras.callbacks.LearningRateScheduler(lambda epoch, lr: lr),
tf.keras.callbacks.ProgbarLogger(),
tf.keras.callbacks.ReduceLROnPlateau(),
tf.keras.callbacks.RemoteMonitor(),
tf.keras.callbacks.TensorBoard(),
tf.keras.callbacks.TerminateOnNaN(),
],
pytest.param(
[
tf.keras.callbacks.experimental.BackupAndRestore("backup"),
tf.keras.callbacks.BaseLogger(),
tf.keras.callbacks.ModelCheckpoint("weights"),
],
marks=pytest.mark.skip(reason="callbacks currently causing optimize to fail"),
),
],
)
def test_deep_ensemble_deep_copies_different_callback_types(callbacks: list[Callback]) -> None:
example_data = _get_example_data([10, 3], [10, 3])
model, _, _ = trieste_deep_ensemble_model(example_data, 2, False, False)
model.optimizer.fit_args["callbacks"] = callbacks
new_example_data = _get_example_data([20, 3], [20, 3])
model.update(new_example_data)
model.optimize(new_example_data)
model_copy = copy.deepcopy(model)
assert model.model.optimizer is not model_copy.model.optimizer
assert tuple(type(callback) for callback in model.optimizer.fit_args["callbacks"]) == tuple(
type(callback) for callback in model_copy.optimizer.fit_args["callbacks"]
)
def test_deep_ensemble_deep_copies_optimizer_callback_models() -> None:
example_data = _get_example_data([10, 3], [10, 3])
keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE, False)
model = DeepEnsemble(keras_ensemble)
new_example_data = _get_example_data([20, 3], [20, 3])
model.update(new_example_data)
model.optimize(new_example_data)
callback = model.optimizer.fit_args["callbacks"][0]
assert isinstance(callback, tf.keras.callbacks.EarlyStopping)
assert callback.model is model.model
model_copy = copy.deepcopy(model)
callback_copy = model_copy.optimizer.fit_args["callbacks"][0]
assert isinstance(callback_copy, tf.keras.callbacks.EarlyStopping)
assert callback_copy.model is model_copy.model is not callback.model
npt.assert_equal(callback_copy.model.get_weights(), callback.model.get_weights())
def test_deep_ensemble_deep_copies_optimizer_without_callbacks() -> None:
example_data = _get_example_data([10, 3], [10, 3])
keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE, False)
model = DeepEnsemble(keras_ensemble)
del model.optimizer.fit_args["callbacks"]
model_copy = copy.deepcopy(model)
assert model_copy.optimizer is not model.optimizer
assert model_copy.optimizer.fit_args == model.optimizer.fit_args
def test_deep_ensemble_deep_copies_optimization_history() -> None:
example_data = _get_example_data([10, 3], [10, 3])
keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE, False)
model = DeepEnsemble(keras_ensemble)
model.optimize(example_data)
assert model.model.history.history
expected_history = model.model.history.history
model_copy = copy.deepcopy(model)
assert model_copy.model.history.history
history = model_copy.model.history.history
assert history.keys() == expected_history.keys()
for k, v in expected_history.items():
assert history[k] == v
@unittest.mock.patch("trieste.logging.tf.summary.histogram")
@unittest.mock.patch("trieste.logging.tf.summary.scalar")
@pytest.mark.parametrize("use_dataset", [True, False])
def test_deep_ensemble_log(
mocked_summary_scalar: unittest.mock.MagicMock,
mocked_summary_histogram: unittest.mock.MagicMock,
use_dataset: bool,
) -> None:
example_data = _get_example_data([10, 3], [10, 3])
keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE, False)
model = DeepEnsemble(keras_ensemble)
model.optimize(example_data)
mocked_summary_writer = unittest.mock.MagicMock()
with tensorboard_writer(mocked_summary_writer):
with step_number(42):
if use_dataset:
model.log(example_data)
else:
model.log(None)
assert len(mocked_summary_writer.method_calls) == 1
assert mocked_summary_writer.method_calls[0][0] == "as_default"
assert mocked_summary_writer.method_calls[0][-1]["step"] == 42
num_scalars = 5 # 5 loss and metrics specific
num_histogram = 1 # 1 loss and metrics specific
if use_dataset: # write_summary_data_based_metrics
num_scalars += 8
num_histogram += 6
assert mocked_summary_scalar.call_count == num_scalars
assert mocked_summary_histogram.call_count == num_histogram
| 28,485 | 37.390836 | 100 | py |
trieste-develop | trieste-develop/tests/unit/models/keras/test_sampler.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import random
from typing import Any, Callable, Optional, cast
import numpy as np
import numpy.testing as npt
import pytest
import tensorflow as tf
from tests.util.misc import empty_dataset, quadratic, random_seed
from tests.util.models.keras.models import trieste_deep_ensemble_model
from trieste.data import Dataset
from trieste.models.keras import (
DeepEnsemble,
DeepEnsembleTrajectorySampler,
deep_ensemble_trajectory,
)
from trieste.types import TensorType
_ENSEMBLE_SIZE = 3
@pytest.fixture(name="diversify", params=[True, False])
def _diversify_fixture(request: Any) -> bool:
return request.param
@pytest.fixture(name="num_evals", params=[9, 19])
def _num_evals_fixture(request: Any) -> int:
return request.param
@pytest.fixture(name="batch_size", params=[1, 2])
def _batch_size_fixture(request: Any) -> int:
return request.param
@pytest.fixture(name="num_outputs", params=[1, 3])
def _num_outputs_fixture(request: Any) -> int:
return request.param
def test_ensemble_trajectory_sampler_returns_trajectory_function_with_correctly_shaped_output(
num_evals: int,
batch_size: int,
dim: int,
diversify: bool,
num_outputs: int,
) -> None:
"""
Inputs should be [N,B,d] while output should be [N,B,M]. Note that for diversify
option only single output models are allowed.
"""
example_data = empty_dataset([dim], [num_outputs])
test_data = tf.random.uniform([num_evals, batch_size, dim]) # [N, B, d]
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE)
sampler = DeepEnsembleTrajectorySampler(model, diversify=diversify)
trajectory = sampler.get_trajectory()
assert trajectory(test_data).shape == (num_evals, batch_size, num_outputs)
def test_ensemble_trajectory_sampler_returns_deterministic_trajectory(
num_evals: int, batch_size: int, dim: int, diversify: bool, num_outputs: int
) -> None:
"""
Evaluating the same data with the same trajectory multiple times should yield
exactly the same output.
"""
example_data = empty_dataset([dim], [num_outputs])
test_data = tf.random.uniform([num_evals, batch_size, dim]) # [N, B, d]
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE)
sampler = DeepEnsembleTrajectorySampler(model, diversify=diversify)
trajectory = sampler.get_trajectory()
eval_1 = trajectory(test_data)
eval_2 = trajectory(test_data)
npt.assert_allclose(eval_1, eval_2)
@pytest.mark.parametrize("seed", [42, None])
def test_ensemble_trajectory_sampler_is_not_too_deterministic(
seed: Optional[int], diversify: bool
) -> None:
"""
Different trajectories should have different internal state, even if we set the global RNG seed.
"""
num_evals, batch_size, dim = 19, 5, 10
state = "_eps" if diversify else "_indices"
example_data = empty_dataset([dim], [1])
test_data = tf.random.uniform([num_evals, batch_size, dim]) # [N, B, d]
model1, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE * 2)
model2, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE * 2)
tf.random.set_seed(seed)
np.random.seed(seed)
random.seed(seed)
# check that the initialised states are different
trajectory1 = DeepEnsembleTrajectorySampler(model1, diversify=diversify).get_trajectory()
trajectory2 = DeepEnsembleTrajectorySampler(model2, diversify=diversify).get_trajectory()
eval1 = trajectory1(test_data)
eval2 = trajectory2(test_data)
npt.assert_raises(AssertionError, npt.assert_allclose, eval1, eval2)
npt.assert_raises(
AssertionError,
npt.assert_allclose,
getattr(trajectory1, state),
getattr(trajectory2, state),
)
# check that the state remains different after resampling
for _ in range(2):
cast(deep_ensemble_trajectory, trajectory1).resample()
cast(deep_ensemble_trajectory, trajectory2).resample()
eval1 = trajectory1(test_data)
eval2 = trajectory2(test_data)
npt.assert_raises(AssertionError, npt.assert_allclose, eval1, eval2)
npt.assert_raises(
AssertionError,
npt.assert_allclose,
getattr(trajectory1, state),
getattr(trajectory2, state),
)
def test_ensemble_trajectory_sampler_samples_are_distinct_for_new_instances(
diversify: bool,
) -> None:
"""
If seeds are not fixed instantiating a new sampler should give us different trajectories.
"""
example_data = empty_dataset([1], [1])
test_data = tf.linspace([-10.0], [10.0], 100)
test_data = tf.expand_dims(test_data, -2) # [N, 1, d]
test_data = tf.tile(test_data, [1, 2, 1]) # [N, 2, D]
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE * 10)
def _get_trajectory_evaluation(
model: DeepEnsemble, diversify: bool, seed: int
) -> Callable[[TensorType], TensorType]:
"""This allows us to set a different seed for each instance"""
@random_seed(seed=seed)
def foo(query_points: TensorType) -> TensorType:
sampler = DeepEnsembleTrajectorySampler(model, diversify=diversify)
trajectory = sampler.get_trajectory()
return trajectory(query_points)
return foo
eval_1 = _get_trajectory_evaluation(model, diversify, 0)(test_data)
eval_2 = _get_trajectory_evaluation(model, diversify, 1)(test_data)
npt.assert_array_less(
1e-1, tf.reduce_max(tf.abs(eval_1 - eval_2))
) # distinct between seperate draws
npt.assert_array_less(
1e-1, tf.reduce_max(tf.abs(eval_1[:, 0] - eval_1[:, 1]))
) # distinct for two samples within same draw
npt.assert_array_less(
1e-1, tf.reduce_max(tf.abs(eval_2[:, 0] - eval_2[:, 1]))
) # distinct for two samples within same draw
@random_seed
def test_ensemble_trajectory_sampler_samples_are_distinct_within_batch(diversify: bool) -> None:
"""
Samples for elements of the batch should be different. Note that when diversify is not used,
for small ensembles we could randomnly choose the same network and then we would get the same
result.
"""
example_data = empty_dataset([1], [1])
test_data = tf.linspace([-10.0], [10.0], 100)
test_data = tf.expand_dims(test_data, -2) # [N, 1, d]
test_data = tf.tile(test_data, [1, 2, 1]) # [N, 2, D]
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE * 3)
sampler1 = DeepEnsembleTrajectorySampler(model, diversify=diversify)
trajectory1 = sampler1.get_trajectory()
sampler2 = DeepEnsembleTrajectorySampler(model, diversify=diversify)
trajectory2 = sampler2.get_trajectory()
eval_1 = trajectory1(test_data)
eval_2 = trajectory2(test_data)
npt.assert_array_less(
1e-1, tf.reduce_max(tf.abs(eval_1[:, 0] - eval_1[:, 1]))
) # distinct for two samples within same draw
npt.assert_array_less(
1e-1, tf.reduce_max(tf.abs(eval_2[:, 0] - eval_2[:, 1]))
) # distinct for two samples within same draw
@random_seed
def test_ensemble_trajectory_sampler_eps_broadcasted_correctly() -> None:
"""
We check if eps are broadcasted correctly in diversify mode.
"""
example_data = empty_dataset([1], [1])
test_data = tf.linspace([-10.0], [10.0], 100)
test_data = tf.expand_dims(test_data, -2) # [N, 1, d]
test_data = tf.tile(test_data, [1, 2, 1]) # [N, 2, D]
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE)
trajectory_sampler = DeepEnsembleTrajectorySampler(model, diversify=True)
trajectory = trajectory_sampler.get_trajectory()
_ = trajectory(test_data) # first call needed to initialize the state
trajectory._eps.assign(tf.constant([[0], [1]], dtype=tf.float64)) # type: ignore
evals = trajectory(test_data)
npt.assert_array_less(
1e-1, tf.reduce_max(tf.abs(evals[:, 0] - evals[:, 1]))
) # distinct for two samples within same draw
npt.assert_allclose(
evals[:, 0], model.predict(test_data[:, 0])[0], rtol=5e-6
) # since we set first eps to 0, that trajectory should equal predicted means
@random_seed
def test_ensemble_trajectory_sampler_resample_with_new_sampler_does_not_change_old_sampler(
diversify: bool,
) -> None:
"""
Generating a new trajectory and resampling it will not affect a previous
trajectory instance. Before resampling evaluations from both trajectories
are the same.
"""
example_data = empty_dataset([1], [1])
test_data = tf.linspace([-10.0], [10.0], 100)
test_data = tf.expand_dims(test_data, -2) # [N, 1, d]
test_data = tf.tile(test_data, [1, 2, 1]) # [N, 2, D]
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE * 3)
sampler = DeepEnsembleTrajectorySampler(model, diversify)
trajectory1 = sampler.get_trajectory()
evals_11 = trajectory1(test_data)
trajectory2 = sampler.get_trajectory()
evals_21 = trajectory2(test_data)
trajectory2 = sampler.resample_trajectory(trajectory2)
evals_22 = trajectory2(test_data)
evals_12 = trajectory1(test_data)
npt.assert_array_less(1e-1, tf.reduce_max(tf.abs(evals_22 - evals_21)))
npt.assert_allclose(evals_11, evals_21)
npt.assert_allclose(evals_11, evals_12)
@random_seed
def test_ensemble_trajectory_sampler_new_trajectories_diverge(diversify: bool) -> None:
"""
Generating two trajectories from the same sampler and resampling them will lead to different
trajectories, even though they were initially the same.
"""
example_data = empty_dataset([1], [1])
test_data = tf.linspace([-10.0], [10.0], 100)
test_data = tf.expand_dims(test_data, -2) # [N, 1, d]
test_data = tf.tile(test_data, [1, 2, 1]) # [N, 2, D]
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE * 3)
sampler = DeepEnsembleTrajectorySampler(model, diversify=diversify)
trajectory11 = sampler.get_trajectory()
evals_11 = trajectory11(test_data)
trajectory12 = sampler.resample_trajectory(trajectory11)
evals_12 = trajectory12(test_data)
trajectory21 = sampler.get_trajectory()
evals_21 = trajectory21(test_data)
trajectory22 = sampler.resample_trajectory(trajectory21)
evals_22 = trajectory22(test_data)
npt.assert_allclose(evals_11, evals_21)
npt.assert_array_less(1e-1, tf.reduce_max(tf.abs(evals_22 - evals_12)))
npt.assert_array_less(1e-1, tf.reduce_max(tf.abs(evals_11 - evals_12)))
npt.assert_array_less(1e-1, tf.reduce_max(tf.abs(evals_21 - evals_22)))
@random_seed
def test_ensemble_trajectory_sampler_resample_provides_new_samples_without_retracing(
diversify: bool,
) -> None:
"""
Resampling a trajectory should be done without retracing, we also check whether we
get different samples.
"""
example_data = empty_dataset([1], [1])
test_data = tf.linspace([-10.0], [10.0], 100)
test_data = tf.expand_dims(test_data, -2) # [N, 1, d]
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE * 3)
sampler = DeepEnsembleTrajectorySampler(model, diversify=diversify)
trajectory = sampler.get_trajectory()
evals_1 = trajectory(test_data)
trajectory = sampler.resample_trajectory(trajectory)
evals_2 = trajectory(test_data)
trajectory = sampler.resample_trajectory(trajectory)
evals_3 = trajectory(test_data)
# no retracing
assert trajectory.__call__._get_tracing_count() == 1 # type: ignore
# check all samples are different
npt.assert_array_less(1e-4, tf.abs(evals_1 - evals_2))
npt.assert_array_less(1e-4, tf.abs(evals_2 - evals_3))
npt.assert_array_less(1e-4, tf.abs(evals_1 - evals_3))
@random_seed
def test_ensemble_trajectory_sampler_update_trajectory_updates_and_doesnt_retrace(
diversify: bool,
) -> None:
"""
We do updates after updating the model, check if model is indeed changed and verify
that samples are new.
"""
dim = 3
batch_size = 2
num_data = 100
example_data = empty_dataset([dim], [1])
test_data = tf.random.uniform([num_data, batch_size, dim]) # [N, B, d]
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE)
trajectory_sampler = DeepEnsembleTrajectorySampler(model, diversify=diversify)
trajectory = trajectory_sampler.get_trajectory()
eval_before = trajectory(test_data)
for _ in range(3):
x_train = tf.random.uniform([num_data, dim]) # [N, d]
new_dataset = Dataset(x_train, quadratic(x_train))
model = cast(DeepEnsemble, trajectory_sampler._model)
old_weights = model.model.get_weights()
model.optimize(new_dataset)
trajectory_updated = trajectory_sampler.update_trajectory(trajectory)
eval_after = trajectory(test_data)
assert trajectory_updated is trajectory # check update was in place
npt.assert_array_less(1e-4, tf.abs(model.model.get_weights()[0], old_weights[0]))
npt.assert_array_less(
0.01, tf.reduce_max(tf.abs(eval_before - eval_after))
) # two samples should be different
assert trajectory.__call__._get_tracing_count() == 1 # type: ignore
@random_seed
def test_ensemble_trajectory_sampler_trajectory_on_subsets_same_as_set(diversify: bool) -> None:
"""
We check if the trajectory called on a set of data is the same as calling it on subsets.
"""
x_train = 10 * tf.random.uniform([10000, 1]) # [N, d]
train_data = Dataset(x_train, quadratic(x_train))
test_data = tf.linspace([-10.0], [10.0], 300)
test_data = tf.expand_dims(test_data, -2) # [N, 1, d]
test_data = tf.tile(test_data, [1, 2, 1]) # [N, 2, d]
model, _, _ = trieste_deep_ensemble_model(train_data, _ENSEMBLE_SIZE)
model.optimize(train_data)
trajectory_sampler = DeepEnsembleTrajectorySampler(model, diversify)
trajectory = trajectory_sampler.get_trajectory()
eval_all = trajectory(test_data)
eval_1 = trajectory(test_data[0:100, :])
eval_2 = trajectory(test_data[100:200, :])
eval_3 = trajectory(test_data[200:300, :])
npt.assert_allclose(eval_all, tf.concat([eval_1, eval_2, eval_3], axis=0), rtol=5e-6)
@random_seed
def test_ensemble_trajectory_sampler_trajectory_is_continuous(diversify: bool) -> None:
"""
We check if the trajectory seems to give continuous output, for delta x we get delta y.
"""
x_train = 10 * tf.random.uniform([10000, 1]) # [N, d]
train_data = Dataset(x_train, quadratic(x_train))
test_data = tf.linspace([-10.0], [10.0], 300)
test_data = tf.expand_dims(test_data, -2) # [N, 1, d]
test_data = tf.tile(test_data, [1, 2, 1]) # [N, 2, d]
model, _, _ = trieste_deep_ensemble_model(train_data, _ENSEMBLE_SIZE)
trajectory_sampler = DeepEnsembleTrajectorySampler(model, diversify=diversify)
trajectory = trajectory_sampler.get_trajectory()
npt.assert_array_less(tf.abs(trajectory(test_data + 1e-20) - trajectory(test_data)), 1e-20)
def test_ensemble_trajectory_sampler_returns_state(batch_size: int, diversify: bool) -> None:
dim = 3
num_evals = 10
example_data = empty_dataset([dim], [1])
test_data = tf.random.uniform([num_evals, batch_size, dim]) # [N, B, d]
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE)
sampler = DeepEnsembleTrajectorySampler(model, diversify=diversify)
trajectory = cast(deep_ensemble_trajectory, sampler.get_trajectory())
if diversify:
dtype = tf.float64
rnd_state_name = "eps"
else:
dtype = tf.int32
rnd_state_name = "indices"
# before calling the trajectory internal state should not be initialized
state_pre_call = trajectory.get_state()
assert not state_pre_call["initialized"]
assert state_pre_call["batch_size"] == 0
assert tf.equal(tf.size(state_pre_call[rnd_state_name]), 0)
assert state_pre_call[rnd_state_name].dtype == dtype
# after calling the trajectory internal state should be initialized
_ = trajectory(test_data)
state_post_call = trajectory.get_state()
assert state_post_call["initialized"]
assert state_post_call["batch_size"] == batch_size
assert tf.equal(tf.size(state_post_call[rnd_state_name]), batch_size)
assert state_post_call[rnd_state_name].dtype == dtype
| 17,015 | 35.515021 | 100 | py |
trieste-develop | trieste-develop/tests/unit/models/keras/test_utils.py | # Copyright 2021 The Bellman Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from tests.util.misc import ShapeLike, empty_dataset, random_seed
from trieste.data import Dataset
from trieste.models.keras.utils import (
get_tensor_spec_from_data,
sample_model_index,
sample_with_replacement,
)
def test_get_tensor_spec_from_data_raises_for_incorrect_dataset() -> None:
dataset = empty_dataset([1], [1])
with pytest.raises(ValueError):
get_tensor_spec_from_data(dataset.query_points)
@pytest.mark.parametrize(
"query_point_shape, observation_shape",
[([1], [1]), ([2], [1]), ([5], [1]), ([5], [2]), ([3, 2], [3, 1])],
)
def test_get_tensor_spec_from_data(
query_point_shape: ShapeLike, observation_shape: ShapeLike
) -> None:
dataset = empty_dataset(query_point_shape, observation_shape)
input_spec, output_spec = get_tensor_spec_from_data(dataset)
assert input_spec.shape == query_point_shape
assert input_spec.dtype == dataset.query_points.dtype
assert input_spec.name == "query_points"
assert output_spec.shape == observation_shape
assert output_spec.dtype == dataset.observations.dtype
assert output_spec.name == "observations"
def test_sample_with_replacement_raises_for_invalid_dataset() -> None:
dataset = empty_dataset([1], [1])
with pytest.raises(ValueError):
sample_with_replacement(dataset.query_points)
def test_sample_with_replacement_raises_for_empty_dataset() -> None:
dataset = empty_dataset([1], [1])
with pytest.raises(tf.errors.InvalidArgumentError):
sample_with_replacement(dataset)
@random_seed
@pytest.mark.parametrize("rank", [2, 3])
def test_sample_with_replacement_seems_correct(rank: int) -> None:
n_rows = 100
if rank == 2:
x = tf.constant(np.arange(0, n_rows, 1), shape=[n_rows, 1])
y = tf.constant(np.arange(0, n_rows, 1), shape=[n_rows, 1])
elif rank == 3:
x = tf.constant(np.arange(0, n_rows, 1).repeat(2), shape=[n_rows, 2, 1])
y = tf.constant(np.arange(0, n_rows, 1).repeat(2), shape=[n_rows, 2, 1])
dataset = Dataset(x, y)
dataset_resampled = sample_with_replacement(dataset)
# basic check that original dataset has not been changed
assert tf.reduce_all(dataset.query_points == x)
assert tf.reduce_all(dataset.observations == y)
# x and y should be resampled the same, and should differ from the original
assert tf.reduce_all(dataset_resampled.query_points == dataset_resampled.observations)
assert tf.reduce_any(dataset_resampled.query_points != x)
assert tf.reduce_any(dataset_resampled.observations != y)
# values are likely to repeat due to replacement
_, _, count = tf.unique_with_counts(tf.squeeze(dataset_resampled.query_points[:, 0]))
assert tf.reduce_any(count > 1)
# mean of bootstrap samples should be close to true mean
mean = [
tf.reduce_mean(
tf.cast(sample_with_replacement(dataset).query_points[:, 0], dtype=tf.float32)
)
for _ in range(100)
]
x = tf.cast(x[:, 0], dtype=tf.float32)
assert (tf.reduce_mean(mean) - tf.reduce_mean(x)) < 1
assert tf.math.abs(tf.math.reduce_std(mean) - tf.math.reduce_std(x) / 10.0) < 0.1
@pytest.mark.parametrize("size", [2, 10])
@pytest.mark.parametrize("num_samples", [0, 1, 10])
def test_sample_model_index_call_shape(size: int, num_samples: int) -> None:
indices = sample_model_index(size, num_samples)
assert indices.shape == (num_samples,)
@random_seed
@pytest.mark.parametrize("size", [2, 5, 10, 20])
def test_sample_model_index_size(size: int) -> None:
indices = sample_model_index(size, 1000)
assert tf.math.reduce_variance(tf.cast(indices, tf.float32)) > 0
assert tf.reduce_min(indices) >= 0
assert tf.reduce_max(indices) < size
@pytest.mark.parametrize("size", [10, 20, 50, 100])
def test_sample_model_index_no_replacement(size: int) -> None:
indices = sample_model_index(size, size)
assert tf.reduce_sum(indices) == tf.reduce_sum(tf.range(size))
assert tf.reduce_all(tf.unique_with_counts(indices)[2] == 1)
| 4,687 | 34.78626 | 90 | py |
trieste-develop | trieste-develop/tests/util/models/gpflux/models.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple GPflux models to be used in the tests.
"""
from __future__ import annotations
from typing import Any, Dict, Tuple
import gpflow
import tensorflow as tf
from gpflow.utilities import set_trainable
from gpflux.architectures import Config, build_constant_input_dim_deep_gp
from gpflux.helpers import construct_basic_kernel
from gpflux.layers import GPLayer
from gpflux.models import DeepGP
from trieste.data import Dataset, TensorType
from trieste.models.gpflux import DeepGaussianProcess, build_vanilla_deep_gp
from trieste.models.optimizer import KerasOptimizer
from trieste.space import SearchSpace
from trieste.utils import to_numpy
def single_layer_dgp_model(x: TensorType) -> DeepGP:
x = to_numpy(x)
config = Config(
num_inducing=len(x),
inner_layer_qsqrt_factor=1e-5,
likelihood_noise_variance=1e-2,
whiten=True, # whiten = False not supported yet in GPflux for this model
)
return build_constant_input_dim_deep_gp(X=x, num_layers=1, config=config)
def two_layer_dgp_model(x: TensorType) -> DeepGP:
x = to_numpy(x)
config = Config(
num_inducing=len(x),
inner_layer_qsqrt_factor=1e-5,
likelihood_noise_variance=1e-2,
whiten=True, # whiten = False not supported yet in GPflux for this model
)
return build_constant_input_dim_deep_gp(X=x, num_layers=2, config=config)
def simple_two_layer_dgp_model(x: TensorType) -> DeepGP:
x = to_numpy(x)
x_shape = x.shape[-1]
num_data = len(x)
Z = x.copy()
kernel_1 = gpflow.kernels.SquaredExponential()
inducing_variable_1 = gpflow.inducing_variables.InducingPoints(Z.copy())
gp_layer_1 = GPLayer(
kernel_1,
inducing_variable_1,
num_data=num_data,
num_latent_gps=x_shape,
)
kernel_2 = gpflow.kernels.SquaredExponential()
inducing_variable_2 = gpflow.inducing_variables.InducingPoints(Z.copy())
gp_layer_2 = GPLayer(
kernel_2,
inducing_variable_2,
num_data=num_data,
num_latent_gps=1,
mean_function=gpflow.mean_functions.Zero(),
)
return DeepGP([gp_layer_1, gp_layer_2], gpflow.likelihoods.Gaussian(0.01))
def separate_independent_kernel_two_layer_dgp_model(x: TensorType) -> DeepGP:
x = to_numpy(x)
x_shape = x.shape[-1]
num_data = len(x)
Z = x.copy()
kernel_list = [
gpflow.kernels.SquaredExponential(
variance=tf.exp(tf.random.normal([], dtype=gpflow.default_float())),
lengthscales=tf.exp(tf.random.normal([], dtype=gpflow.default_float())),
)
for _ in range(x_shape)
]
kernel_1 = construct_basic_kernel(kernel_list)
inducing_variable_1 = gpflow.inducing_variables.SharedIndependentInducingVariables(
gpflow.inducing_variables.InducingPoints(Z.copy())
)
gp_layer_1 = GPLayer(
kernel_1,
inducing_variable_1,
num_data=num_data,
num_latent_gps=x_shape,
)
kernel_2 = gpflow.kernels.SquaredExponential()
inducing_variable_2 = gpflow.inducing_variables.InducingPoints(Z.copy())
gp_layer_2 = GPLayer(
kernel_2,
inducing_variable_2,
num_data=num_data,
num_latent_gps=1,
mean_function=gpflow.mean_functions.Zero(),
)
return DeepGP([gp_layer_1, gp_layer_2], gpflow.likelihoods.Gaussian(0.01))
def trieste_deep_gaussian_process(
data: Dataset,
search_space: SearchSpace,
num_layers: int,
num_inducing_points: int,
learning_rate: float,
batch_size: int,
epochs: int,
fix_noise: bool = False,
) -> Tuple[DeepGaussianProcess, Dict[str, Any]]:
dgp = build_vanilla_deep_gp(data, search_space, num_layers, num_inducing_points)
if fix_noise:
dgp.likelihood_layer.likelihood.variance.assign(1e-5)
set_trainable(dgp.likelihood_layer, False)
def scheduler(epoch: int, lr: float) -> float:
if epoch == epochs // 2:
return lr * 0.1
else:
return lr
fit_args = {
"batch_size": batch_size,
"epochs": epochs,
"verbose": 0,
"callbacks": tf.keras.callbacks.LearningRateScheduler(scheduler),
}
optimizer = KerasOptimizer(tf.optimizers.Adam(learning_rate), fit_args)
model = DeepGaussianProcess(dgp, optimizer)
return model, fit_args
def two_layer_trieste_dgp(data: Dataset, search_space: SearchSpace) -> DeepGaussianProcess:
return trieste_deep_gaussian_process(data, search_space, 2, 10, 0.01, 5, 10)[0]
| 5,115 | 30.195122 | 91 | py |
trieste-develop | trieste-develop/tests/util/models/keras/models.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for creating (Keras) neural network models to be used in the tests.
"""
from __future__ import annotations
from typing import Optional, Tuple
import tensorflow as tf
from packaging.version import Version
from trieste.data import Dataset
from trieste.models.keras import (
DeepEnsemble,
GaussianNetwork,
KerasEnsemble,
get_tensor_spec_from_data,
)
from trieste.models.optimizer import KerasOptimizer
from trieste.types import TensorType
def trieste_keras_ensemble_model(
example_data: Dataset,
ensemble_size: int,
independent_normal: bool = False,
) -> KerasEnsemble:
input_tensor_spec, output_tensor_spec = get_tensor_spec_from_data(example_data)
networks = [
GaussianNetwork(
input_tensor_spec,
output_tensor_spec,
hidden_layer_args=[
{"units": 32, "activation": "selu"},
{"units": 32, "activation": "selu"},
],
independent=independent_normal,
)
for _ in range(ensemble_size)
]
keras_ensemble = KerasEnsemble(networks)
return keras_ensemble
def trieste_deep_ensemble_model(
example_data: Dataset,
ensemble_size: int,
bootstrap_data: bool = False,
independent_normal: bool = False,
) -> Tuple[DeepEnsemble, KerasEnsemble, KerasOptimizer]:
keras_ensemble = trieste_keras_ensemble_model(example_data, ensemble_size, independent_normal)
optimizer = tf.keras.optimizers.Adam()
fit_args = {
"batch_size": 100,
"epochs": 1,
"callbacks": [],
"verbose": 0,
}
optimizer_wrapper = KerasOptimizer(optimizer, fit_args)
model = DeepEnsemble(keras_ensemble, optimizer_wrapper, bootstrap_data)
return model, keras_ensemble, optimizer_wrapper
def keras_optimizer_weights(optimizer: tf.keras.optimizers.Optimizer) -> Optional[TensorType]:
# optimizer weight API was changed in TF 2.11: https://github.com/keras-team/keras/issues/16983
if Version(tf.__version__) < Version("2.11"):
return optimizer.get_weights()
else:
return optimizer.variables[0]
| 2,717 | 29.539326 | 99 | py |
trieste-develop | trieste-develop/tests/integration/test_bayesian_optimization.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import tempfile
from functools import partial
from pathlib import Path
from typing import Any, List, Mapping, Optional, Tuple, Type, cast
import dill
import gpflow
import numpy.testing as npt
import pytest
import tensorflow as tf
from _pytest.mark import ParameterSet
from tests.util.misc import random_seed
from trieste.acquisition import (
GIBBON,
AcquisitionFunctionClass,
AugmentedExpectedImprovement,
BatchExpectedImprovement,
BatchMonteCarloExpectedImprovement,
Fantasizer,
GreedyAcquisitionFunctionBuilder,
GreedyContinuousThompsonSampling,
LocalPenalization,
MinValueEntropySearch,
MonteCarloAugmentedExpectedImprovement,
MonteCarloExpectedImprovement,
MultipleOptimismNegativeLowerConfidenceBound,
ParallelContinuousThompsonSampling,
)
from trieste.acquisition.optimizer import generate_continuous_optimizer
from trieste.acquisition.rule import (
TURBO,
AcquisitionRule,
AsynchronousGreedy,
AsynchronousOptimization,
AsynchronousRuleState,
BatchHypervolumeSharpeRatioIndicator,
DiscreteThompsonSampling,
EfficientGlobalOptimization,
TrustRegion,
)
from trieste.acquisition.sampler import ThompsonSamplerFromTrajectory
from trieste.bayesian_optimizer import (
BayesianOptimizer,
FrozenRecord,
OptimizationResult,
TrainableProbabilisticModelType,
stop_at_minimum,
)
from trieste.logging import tensorboard_writer
from trieste.models import TrainableProbabilisticModel, TrajectoryFunctionClass
from trieste.models.gpflow import (
ConditionalImprovementReduction,
GaussianProcessRegression,
GPflowPredictor,
SparseGaussianProcessRegression,
SparseVariational,
VariationalGaussianProcess,
build_gpr,
build_sgpr,
build_svgp,
)
from trieste.models.gpflux import DeepGaussianProcess, build_vanilla_deep_gp
from trieste.models.keras import DeepEnsemble, build_keras_ensemble
from trieste.models.optimizer import KerasOptimizer, Optimizer
from trieste.objectives import ScaledBranin, SimpleQuadratic
from trieste.objectives.utils import mk_observer
from trieste.observer import OBJECTIVE
from trieste.space import Box, SearchSpace
from trieste.types import State, TensorType
try:
import pymoo
except ImportError: # pragma: no cover (tested but not by coverage)
pymoo = None
# Optimizer parameters for testing GPR against the branin function.
# We also use these for a quicker test against a simple quadratic function
# (regenerating is necessary as some of the acquisition rules are stateful).
def GPR_OPTIMIZER_PARAMS() -> Tuple[str, List[ParameterSet]]:
return (
"num_steps, acquisition_rule",
[
pytest.param(20, EfficientGlobalOptimization(), id="EfficientGlobalOptimization"),
pytest.param(
30,
EfficientGlobalOptimization(AugmentedExpectedImprovement().using(OBJECTIVE)),
id="AugmentedExpectedImprovement",
),
pytest.param(
20,
EfficientGlobalOptimization(
MonteCarloExpectedImprovement(int(1e3)).using(OBJECTIVE),
generate_continuous_optimizer(100),
),
id="MonteCarloExpectedImprovement",
),
pytest.param(
24,
EfficientGlobalOptimization(
MinValueEntropySearch(
ScaledBranin.search_space,
min_value_sampler=ThompsonSamplerFromTrajectory(sample_min_value=True),
).using(OBJECTIVE)
),
id="MinValueEntropySearch",
),
pytest.param(
12,
EfficientGlobalOptimization(
BatchExpectedImprovement(sample_size=100).using(OBJECTIVE),
num_query_points=3,
),
id="BatchExpectedImprovement",
),
pytest.param(
12,
EfficientGlobalOptimization(
BatchMonteCarloExpectedImprovement(sample_size=500).using(OBJECTIVE),
num_query_points=3,
),
id="BatchMonteCarloExpectedImprovement",
),
pytest.param(
12, AsynchronousOptimization(num_query_points=3), id="AsynchronousOptimization"
),
pytest.param(
15,
EfficientGlobalOptimization(
LocalPenalization(
ScaledBranin.search_space,
).using(OBJECTIVE),
num_query_points=3,
),
id="LocalPenalization",
),
pytest.param(
15,
AsynchronousGreedy(
LocalPenalization(
ScaledBranin.search_space,
).using(OBJECTIVE),
num_query_points=3,
),
id="LocalPenalization/AsynchronousGreedy",
),
pytest.param(
10,
EfficientGlobalOptimization(
GIBBON(
ScaledBranin.search_space,
).using(OBJECTIVE),
num_query_points=2,
),
id="GIBBON",
),
pytest.param(
20,
EfficientGlobalOptimization(
MultipleOptimismNegativeLowerConfidenceBound(
ScaledBranin.search_space,
).using(OBJECTIVE),
num_query_points=3,
),
id="MultipleOptimismNegativeLowerConfidenceBound",
),
pytest.param(20, TrustRegion(), id="TrustRegion"),
pytest.param(
15,
TrustRegion(
EfficientGlobalOptimization(
MinValueEntropySearch(
ScaledBranin.search_space,
).using(OBJECTIVE)
)
),
id="TrustRegion/MinValueEntropySearch",
),
pytest.param(
10,
TURBO(ScaledBranin.search_space, rule=DiscreteThompsonSampling(500, 3)),
id="Turbo",
),
pytest.param(15, DiscreteThompsonSampling(500, 5), id="DiscreteThompsonSampling"),
pytest.param(
15,
EfficientGlobalOptimization(
Fantasizer(),
num_query_points=3,
),
id="Fantasizer",
),
pytest.param(
10,
EfficientGlobalOptimization(
GreedyContinuousThompsonSampling(),
num_query_points=5,
),
id="GreedyContinuousThompsonSampling",
),
pytest.param(
10,
EfficientGlobalOptimization(
ParallelContinuousThompsonSampling(),
num_query_points=5,
),
id="ParallelContinuousThompsonSampling",
),
pytest.param(
15,
BatchHypervolumeSharpeRatioIndicator() if pymoo else None,
id="BatchHypevolumeSharpeRatioIndicator",
marks=pytest.mark.qhsri,
),
],
)
@random_seed
@pytest.mark.slow # to run this, add --runslow yes to the pytest command
@pytest.mark.parametrize(*GPR_OPTIMIZER_PARAMS())
def test_bayesian_optimizer_with_gpr_finds_minima_of_scaled_branin(
num_steps: int,
acquisition_rule: AcquisitionRule[TensorType, SearchSpace, GaussianProcessRegression]
| AcquisitionRule[
State[TensorType, AsynchronousRuleState | TrustRegion.State], Box, GaussianProcessRegression
],
) -> None:
_test_optimizer_finds_minimum(
GaussianProcessRegression, num_steps, acquisition_rule, optimize_branin=True
)
@random_seed
@pytest.mark.parametrize(*GPR_OPTIMIZER_PARAMS())
def test_bayesian_optimizer_with_gpr_finds_minima_of_simple_quadratic(
num_steps: int,
acquisition_rule: AcquisitionRule[TensorType, SearchSpace, GaussianProcessRegression]
| AcquisitionRule[
State[TensorType, AsynchronousRuleState | TrustRegion.State], Box, GaussianProcessRegression
],
) -> None:
# for speed reasons we sometimes test with a simple quadratic defined on the same search space
# branin; currently assume that every rule should be able to solve this in 6 steps
_test_optimizer_finds_minimum(GaussianProcessRegression, min(num_steps, 6), acquisition_rule)
@random_seed
@pytest.mark.slow
def test_bayesian_optimizer_with_vgp_finds_minima_of_scaled_branin() -> None:
_test_optimizer_finds_minimum(
VariationalGaussianProcess,
10,
EfficientGlobalOptimization[SearchSpace, VariationalGaussianProcess](
builder=ParallelContinuousThompsonSampling(), num_query_points=5
),
)
@random_seed
@pytest.mark.parametrize("use_natgrads", [False, True])
def test_bayesian_optimizer_with_vgp_finds_minima_of_simple_quadratic(use_natgrads: bool) -> None:
# regression test for [#406]; use natgrads doesn't work well as a model for the objective
# so don't bother checking the results, just that it doesn't crash
_test_optimizer_finds_minimum(
VariationalGaussianProcess,
None if use_natgrads else 5,
EfficientGlobalOptimization[SearchSpace, GPflowPredictor](),
model_args={"use_natgrads": use_natgrads},
)
@random_seed
@pytest.mark.slow
def test_bayesian_optimizer_with_svgp_finds_minima_of_scaled_branin() -> None:
_test_optimizer_finds_minimum(
SparseVariational,
40,
EfficientGlobalOptimization[SearchSpace, SparseVariational](),
optimize_branin=True,
model_args={"optimizer": Optimizer(gpflow.optimizers.Scipy(), compile=True)},
)
_test_optimizer_finds_minimum(
SparseVariational,
25,
EfficientGlobalOptimization[SearchSpace, SparseVariational](
builder=ParallelContinuousThompsonSampling(), num_query_points=5
),
optimize_branin=True,
model_args={"optimizer": Optimizer(gpflow.optimizers.Scipy(), compile=True)},
)
@random_seed
def test_bayesian_optimizer_with_svgp_finds_minima_of_simple_quadratic() -> None:
_test_optimizer_finds_minimum(
SparseVariational,
5,
EfficientGlobalOptimization[SearchSpace, SparseVariational](),
model_args={"optimizer": Optimizer(gpflow.optimizers.Scipy(), compile=True)},
)
_test_optimizer_finds_minimum(
SparseVariational,
5,
EfficientGlobalOptimization[SearchSpace, SparseVariational](
builder=ParallelContinuousThompsonSampling(), num_query_points=5
),
model_args={"optimizer": Optimizer(gpflow.optimizers.Scipy(), compile=True)},
)
@random_seed
@pytest.mark.slow
def test_bayesian_optimizer_with_sgpr_finds_minima_of_scaled_branin() -> None:
_test_optimizer_finds_minimum(
SparseGaussianProcessRegression,
9,
EfficientGlobalOptimization[SearchSpace, SparseGaussianProcessRegression](),
optimize_branin=True,
)
_test_optimizer_finds_minimum(
SparseGaussianProcessRegression,
20,
EfficientGlobalOptimization[SearchSpace, SparseGaussianProcessRegression](
builder=ParallelContinuousThompsonSampling(), num_query_points=5
),
optimize_branin=True,
)
@random_seed
def test_bayesian_optimizer_with_sgpr_finds_minima_of_simple_quadratic() -> None:
_test_optimizer_finds_minimum(
SparseGaussianProcessRegression,
5,
EfficientGlobalOptimization[SearchSpace, SparseGaussianProcessRegression](),
)
@random_seed
@pytest.mark.slow
@pytest.mark.parametrize(
"num_steps, acquisition_rule",
[
pytest.param(25, DiscreteThompsonSampling(1000, 8), id="DiscreteThompsonSampling"),
pytest.param(
25,
EfficientGlobalOptimization(
ParallelContinuousThompsonSampling(),
num_query_points=4,
),
id="ParallelContinuousThompsonSampling",
),
pytest.param(
12,
EfficientGlobalOptimization(
GreedyContinuousThompsonSampling(),
num_query_points=4,
),
id="GreedyContinuousThompsonSampling",
marks=pytest.mark.skip(reason="too fragile"),
),
],
)
def test_bayesian_optimizer_with_dgp_finds_minima_of_scaled_branin(
num_steps: int,
acquisition_rule: AcquisitionRule[TensorType, SearchSpace, DeepGaussianProcess],
) -> None:
_test_optimizer_finds_minimum(
DeepGaussianProcess, num_steps, acquisition_rule, optimize_branin=True
)
@random_seed
@pytest.mark.parametrize(
"num_steps, acquisition_rule",
[
pytest.param(5, DiscreteThompsonSampling(1000, 1), id="DiscreteThompsonSampling"),
pytest.param(
5,
EfficientGlobalOptimization(
MonteCarloExpectedImprovement(int(1e2)), generate_continuous_optimizer(100)
),
id="MonteCarloExpectedImprovement",
),
pytest.param(
5,
EfficientGlobalOptimization(
MonteCarloAugmentedExpectedImprovement(int(1e2)), generate_continuous_optimizer(100)
),
id="MonteCarloAugmentedExpectedImprovement",
),
pytest.param(
2,
EfficientGlobalOptimization(
ParallelContinuousThompsonSampling(),
num_query_points=5,
),
id="ParallelContinuousThompsonSampling",
),
pytest.param(
2,
EfficientGlobalOptimization(
GreedyContinuousThompsonSampling(),
num_query_points=5,
),
id="GreedyContinuousThompsonSampling",
),
],
)
def test_bayesian_optimizer_with_dgp_finds_minima_of_simple_quadratic(
num_steps: int,
acquisition_rule: AcquisitionRule[TensorType, SearchSpace, DeepGaussianProcess],
) -> None:
_test_optimizer_finds_minimum(DeepGaussianProcess, num_steps, acquisition_rule)
@random_seed
@pytest.mark.slow
@pytest.mark.parametrize(
"num_steps, acquisition_rule",
[
pytest.param(
60,
EfficientGlobalOptimization(),
id="EfficientGlobalOptimization",
marks=pytest.mark.skip(reason="too fragile"),
),
pytest.param(
30,
EfficientGlobalOptimization(
ParallelContinuousThompsonSampling(),
num_query_points=4,
),
id="ParallelContinuousThompsonSampling",
),
],
)
def test_bayesian_optimizer_with_deep_ensemble_finds_minima_of_scaled_branin(
num_steps: int,
acquisition_rule: AcquisitionRule[TensorType, SearchSpace, DeepEnsemble],
) -> None:
_test_optimizer_finds_minimum(
DeepEnsemble,
num_steps,
acquisition_rule,
optimize_branin=True,
model_args={"bootstrap": True, "diversify": False},
)
@random_seed
@pytest.mark.parametrize(
"num_steps, acquisition_rule",
[
pytest.param(5, EfficientGlobalOptimization(), id="EfficientGlobalOptimization"),
pytest.param(10, DiscreteThompsonSampling(1000, 1), id="DiscreteThompsonSampling"),
pytest.param(
5,
DiscreteThompsonSampling(1000, 1, thompson_sampler=ThompsonSamplerFromTrajectory()),
id="DiscreteThompsonSampling/ThompsonSamplerFromTrajectory",
),
],
)
def test_bayesian_optimizer_with_deep_ensemble_finds_minima_of_simple_quadratic(
num_steps: int, acquisition_rule: AcquisitionRule[TensorType, SearchSpace, DeepEnsemble]
) -> None:
_test_optimizer_finds_minimum(
DeepEnsemble,
num_steps,
acquisition_rule,
)
@random_seed
@pytest.mark.parametrize(
"num_steps, acquisition_rule",
[
pytest.param(
5,
EfficientGlobalOptimization(
ParallelContinuousThompsonSampling(),
num_query_points=3,
),
id="ParallelContinuousThompsonSampling",
),
],
)
def test_bayesian_optimizer_with_PCTS_and_deep_ensemble_finds_minima_of_simple_quadratic(
num_steps: int, acquisition_rule: AcquisitionRule[TensorType, SearchSpace, DeepEnsemble]
) -> None:
_test_optimizer_finds_minimum(
DeepEnsemble,
num_steps,
acquisition_rule,
model_args={"diversify": False},
)
_test_optimizer_finds_minimum(
DeepEnsemble,
num_steps,
acquisition_rule,
model_args={"diversify": True},
)
def _test_optimizer_finds_minimum(
model_type: Type[TrainableProbabilisticModelType],
num_steps: Optional[int],
acquisition_rule: AcquisitionRule[TensorType, SearchSpace, TrainableProbabilisticModelType]
| AcquisitionRule[
State[TensorType, AsynchronousRuleState | TrustRegion.State],
Box,
TrainableProbabilisticModelType,
],
optimize_branin: bool = False,
model_args: Optional[Mapping[str, Any]] = None,
check_regret: bool = False,
) -> None:
model_args = model_args or {}
if optimize_branin:
search_space = ScaledBranin.search_space
minimizers = ScaledBranin.minimizers
minima = ScaledBranin.minimum
rtol_level = 0.005
num_initial_query_points = 5
else:
search_space = SimpleQuadratic.search_space
minimizers = SimpleQuadratic.minimizers
minima = SimpleQuadratic.minimum
rtol_level = 0.05
num_initial_query_points = 10
if model_type in [SparseVariational, DeepEnsemble]:
num_initial_query_points = 20
elif model_type in [DeepGaussianProcess]:
num_initial_query_points = 25
initial_query_points = search_space.sample(num_initial_query_points)
observer = mk_observer(ScaledBranin.objective if optimize_branin else SimpleQuadratic.objective)
initial_data = observer(initial_query_points)
model: TrainableProbabilisticModel # (really TPMType, but that's too complicated for mypy)
if model_type is GaussianProcessRegression:
if "LocalPenalization" in acquisition_rule.__repr__():
likelihood_variance = 1e-3
else:
likelihood_variance = 1e-5
gpr = build_gpr(initial_data, search_space, likelihood_variance=likelihood_variance)
model = GaussianProcessRegression(gpr, **model_args)
elif model_type is SparseGaussianProcessRegression:
sgpr = build_sgpr(initial_data, search_space, num_inducing_points=50)
model = SparseGaussianProcessRegression(
sgpr,
**model_args,
inducing_point_selector=ConditionalImprovementReduction(),
)
elif model_type is VariationalGaussianProcess:
empirical_variance = tf.math.reduce_variance(initial_data.observations)
kernel = gpflow.kernels.Matern52(variance=empirical_variance, lengthscales=[0.2, 0.2])
likelihood = gpflow.likelihoods.Gaussian(1e-3)
vgp = gpflow.models.VGP(initial_data.astuple(), kernel, likelihood)
gpflow.utilities.set_trainable(vgp.likelihood, False)
model = VariationalGaussianProcess(vgp, **model_args)
elif model_type is SparseVariational:
svgp = build_svgp(initial_data, search_space, num_inducing_points=50)
model = SparseVariational(
svgp,
**model_args,
inducing_point_selector=ConditionalImprovementReduction(),
)
elif model_type is DeepGaussianProcess:
model = DeepGaussianProcess(
partial(build_vanilla_deep_gp, initial_data, search_space), **model_args
)
elif model_type is DeepEnsemble:
keras_ensemble = build_keras_ensemble(initial_data, 5, 3, 25, "selu")
fit_args = {
"batch_size": 20,
"epochs": 200,
"callbacks": [
tf.keras.callbacks.EarlyStopping(
monitor="loss", patience=25, restore_best_weights=True
)
],
"verbose": 0,
}
de_optimizer = KerasOptimizer(tf.keras.optimizers.Adam(0.01), fit_args)
model = DeepEnsemble(keras_ensemble, de_optimizer, **model_args)
else:
raise ValueError(f"Unsupported model_type '{model_type}'")
with tempfile.TemporaryDirectory() as tmpdirname:
summary_writer = tf.summary.create_file_writer(tmpdirname)
with tensorboard_writer(summary_writer):
result = BayesianOptimizer(observer, search_space).optimize(
num_steps or 2,
initial_data,
cast(TrainableProbabilisticModelType, model),
acquisition_rule,
track_state=True,
track_path=Path(tmpdirname) / "history",
early_stop_callback=stop_at_minimum(
# stop as soon as we find the minimum (but always run at least one step)
minima,
minimizers,
minimum_rtol=rtol_level,
minimum_step_number=2,
),
fit_model=not isinstance(acquisition_rule, TURBO),
fit_initial_model=False,
)
# check history saved ok
assert len(result.history) <= (num_steps or 2)
assert len(result.loaded_history) == len(result.history)
loaded_result: OptimizationResult[None] = OptimizationResult.from_path(
Path(tmpdirname) / "history"
)
assert loaded_result.final_result.is_ok
assert len(loaded_result.history) == len(result.history)
if num_steps is None:
# this test is just being run to check for crashes, not performance
pass
elif check_regret:
# this just check that the new observations are mostly better than the initial ones
assert isinstance(result.history[0], FrozenRecord)
initial_observations = result.history[0].load().dataset.observations
best_initial = tf.math.reduce_min(initial_observations)
better_than_initial = 0
num_points = len(initial_observations)
for i in range(1, len(result.history)):
step_history = result.history[i]
assert isinstance(step_history, FrozenRecord)
step_observations = step_history.load().dataset.observations
new_observations = step_observations[num_points:]
if tf.math.reduce_min(new_observations) < best_initial:
better_than_initial += 1
num_points = len(step_observations)
assert better_than_initial / len(result.history) > 0.6
else:
# this actually checks that we solved the problem
best_x, best_y, _ = result.try_get_optimal_point()
minimizer_err = tf.abs((best_x - minimizers) / minimizers)
assert tf.reduce_any(tf.reduce_all(minimizer_err < 0.05, axis=-1), axis=0)
npt.assert_allclose(best_y, minima, rtol=rtol_level)
if isinstance(acquisition_rule, EfficientGlobalOptimization):
acq_function = acquisition_rule.acquisition_function
assert acq_function is not None
# check that acquisition functions defined as classes aren't retraced unnecessarily
# they should be retraced for the optimizer's starting grid, L-BFGS, and logging
# (and possibly once more due to variable creation)
if isinstance(acq_function, (AcquisitionFunctionClass, TrajectoryFunctionClass)):
assert acq_function.__call__._get_tracing_count() in {3, 4} # type: ignore
# update trajectory function if necessary, so we can test it
if isinstance(acq_function, TrajectoryFunctionClass):
sampler = (
acquisition_rule._builder.single_builder._trajectory_sampler # type: ignore
)
sampler.update_trajectory(acq_function)
# check that acquisition functions can be saved and reloaded
acq_function_copy = dill.loads(dill.dumps(acq_function))
# and that the copy gives the same values as the original
batch_size = (
1
if isinstance(acquisition_rule._builder, GreedyAcquisitionFunctionBuilder)
else acquisition_rule._num_query_points
)
random_batch = tf.expand_dims(search_space.sample(batch_size), 0)
npt.assert_allclose(
acq_function(random_batch), acq_function_copy(random_batch), rtol=5e-7
)
| 26,051 | 36.058321 | 100 | py |
trieste-develop | trieste-develop/tests/integration/models/multifidelity/test_multifidelity_models.py | import gpflow
import numpy as np
import numpy.testing as npt
import tensorflow as tf
from tensorflow.keras.metrics import mean_squared_error
import trieste
from trieste.data import (
Dataset,
add_fidelity_column,
check_and_extract_fidelity_query_points,
split_dataset_by_fidelity,
)
from trieste.models.gpflow import GaussianProcessRegression
from trieste.models.gpflow.builders import (
build_gpr,
build_multifidelity_autoregressive_models,
build_multifidelity_nonlinear_autoregressive_models,
)
from trieste.models.gpflow.models import (
MultifidelityAutoregressive,
MultifidelityNonlinearAutoregressive,
)
from trieste.objectives.utils import mk_observer
from trieste.space import Box
from trieste.types import TensorType
def noisy_linear_multifidelity(x: TensorType) -> TensorType:
x_input, x_fidelity = check_and_extract_fidelity_query_points(x)
f = 0.5 * ((6.0 * x_input - 2.0) ** 2) * tf.math.sin(12.0 * x_input - 4.0) + 10.0 * (
x_input - 1.0
)
f = f + x_fidelity * (f - 20.0 * (x_input - 1.0))
noise = tf.random.normal(f.shape, stddev=1e-1, dtype=f.dtype)
f = tf.where(x_fidelity > 0, f + noise, f)
return f
def noisy_nonlinear_multifidelity(x: TensorType) -> TensorType:
x_input, x_fidelity = check_and_extract_fidelity_query_points(x)
# Check we only have fidelity = 0 or 1
# 1 if element is not 0 or 1
bad_fidelities = tf.math.logical_and(x_fidelity != 0, x_fidelity != 1)
if tf.math.count_nonzero(bad_fidelities) > 0:
raise ValueError("Nonlinear simulator only supports 2 fidelities (0 and 1)")
else:
f = tf.math.sin(8 * np.pi * x_input)
fh = (x_input - tf.sqrt(tf.Variable(2.0, dtype=tf.float64))) * tf.square(f)
f = tf.where(x_fidelity > 0, fh, f)
noise = tf.random.normal(f.shape, stddev=1e-2, dtype=f.dtype)
f = tf.where(x_fidelity > 0, f + noise, f)
return f
def test_multifidelity_autoregressive_results_close() -> None:
input_dim = 1
lb = np.zeros(input_dim)
ub = np.ones(input_dim)
n_fidelities = 4
input_search_space = trieste.space.Box(lb, ub)
n_samples_per_fidelity = [
2 ** ((n_fidelities - fidelity) + 1) + 3 for fidelity in range(n_fidelities)
]
xs = [tf.linspace(0, 1, samples)[:, None] for samples in n_samples_per_fidelity]
initial_samples_list = [tf.concat([x, tf.ones_like(x) * i], 1) for i, x in enumerate(xs)]
initial_sample = tf.concat(initial_samples_list, 0)
observer = mk_observer(noisy_linear_multifidelity)
initial_data = observer(initial_sample)
data = split_dataset_by_fidelity(initial_data, n_fidelities)
gprs = [
GaussianProcessRegression(
build_gpr(
data[fidelity], input_search_space, likelihood_variance=1e-6, kernel_priors=False
)
)
for fidelity in range(n_fidelities)
]
model = MultifidelityAutoregressive(gprs)
model.update(initial_data)
model.optimize(initial_data)
test_xs = tf.linspace(0, 1, 11)[:, None]
test_xs_w_fid = add_fidelity_column(test_xs, fidelity=3)
predictions = model.predict(test_xs_w_fid)[0]
gt_obs = observer(test_xs_w_fid).observations
npt.assert_allclose(predictions, gt_obs, rtol=0.20)
def test_multifidelity_nonlinear_autoregressive_results_better_than_linear() -> None:
input_dim = 1
lb = np.zeros(input_dim)
ub = np.ones(input_dim)
n_fidelities = 2
input_search_space = trieste.space.Box(lb, ub)
n_samples_per_fidelity = [
2 ** ((n_fidelities - fidelity) + 1) + 10 for fidelity in range(n_fidelities)
]
xs = [tf.linspace(0, 1, samples)[:, None] for samples in n_samples_per_fidelity]
initial_samples_list = [tf.concat([x, tf.ones_like(x) * i], 1) for i, x in enumerate(xs)]
initial_sample = tf.concat(initial_samples_list, 0)
observer = mk_observer(noisy_nonlinear_multifidelity)
initial_data = observer(initial_sample)
nonlinear_model = MultifidelityNonlinearAutoregressive(
build_multifidelity_nonlinear_autoregressive_models(
initial_data, n_fidelities, input_search_space
)
)
linear_model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(initial_data, n_fidelities, input_search_space)
)
mses = list()
for model in [nonlinear_model, linear_model]:
model.update(initial_data)
model.optimize(initial_data)
test_xs = tf.linspace(0, 1, 111)[:, None]
test_xs_w_fid = add_fidelity_column(test_xs, fidelity=1)
predictions = model.predict(test_xs_w_fid)[0]
gt_obs = observer(test_xs_w_fid).observations
mses.append(tf.reduce_sum(mean_squared_error(gt_obs, predictions)))
assert mses[0] < mses[1]
def test_multifidelity_autoregressive_gets_expected_rhos() -> None:
input_dim = 1
lb = np.zeros(input_dim)
ub = np.ones(input_dim)
n_fidelities = 4
input_search_space = trieste.space.Box(lb, ub)
n_samples_per_fidelity = [
2 ** ((n_fidelities - fidelity) + 1) + 3 for fidelity in range(n_fidelities)
]
xs = [tf.linspace(0, 1, samples)[:, None] for samples in n_samples_per_fidelity]
initial_samples_list = [tf.concat([x, tf.ones_like(x) * i], 1) for i, x in enumerate(xs)]
initial_sample = tf.concat(initial_samples_list, 0)
observer = mk_observer(noisy_linear_multifidelity)
initial_data = observer(initial_sample)
model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(initial_data, n_fidelities, input_search_space)
)
model.update(initial_data)
model.optimize(initial_data)
expected_rho = [1.0] + [(fidelity + 1) / fidelity for fidelity in range(1, n_fidelities)]
rhos = [float(rho.numpy()) for rho in model.rho]
npt.assert_allclose(np.array(expected_rho), np.array(rhos), rtol=0.30)
def test_multifidelity_autoregressive_predict_lf_are_consistent_with_multiple_fidelities() -> None:
xs_low = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
xs_high = tf.Variable(np.linspace(0, 10, 10), dtype=tf.float64)[:, None]
lf_obs = tf.sin(xs_low)
hf_obs = 2 * tf.sin(xs_high) + tf.random.normal(
xs_high.shape, mean=0, stddev=1e-1, dtype=tf.float64
)
lf_query_points = add_fidelity_column(xs_low, 0)
hf_query_points = add_fidelity_column(xs_high, 1)
lf_dataset = Dataset(lf_query_points, lf_obs)
hf_dataset = Dataset(hf_query_points, hf_obs)
dataset = lf_dataset + hf_dataset
search_space = Box([0.0], [10.0])
model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(
dataset, num_fidelities=2, input_search_space=search_space
)
)
model.update(dataset)
# Add some high fidelity points to check that predict on different fids works
test_locations_30 = tf.Variable(np.linspace(0, 10, 60), dtype=tf.float64)[:, None]
lf_test_locations = add_fidelity_column(test_locations_30, 0)
test_locations_32 = tf.Variable(np.linspace(0, 10, 32), dtype=tf.float64)[:, None]
hf_test_locations = add_fidelity_column(test_locations_32, 1)
second_batch = tf.Variable(np.linspace(0.5, 10.5, 92), dtype=tf.float64)[:, None]
second_batch_test_locations = add_fidelity_column(second_batch, 1)
concat_test_locations = tf.concat([lf_test_locations, hf_test_locations], axis=0)
concat_multibatch_test_locations = tf.concat(
[concat_test_locations[None, ...], second_batch_test_locations[None, ...]], axis=0
)
prediction_mean, prediction_var = model.predict(concat_multibatch_test_locations)
lf_prediction_mean, lf_prediction_var = (
prediction_mean[0, :60],
prediction_var[0, :60],
)
(
lf_prediction_direct_mean,
lf_prediction_direct_var,
) = model.lowest_fidelity_signal_model.predict(test_locations_30)
npt.assert_allclose(lf_prediction_mean, lf_prediction_direct_mean, rtol=1e-7)
npt.assert_allclose(lf_prediction_var, lf_prediction_direct_var, rtol=1e-7)
def test_multifidelity_autoregressive_predict_hf_is_consistent_when_rho_zero() -> None:
xs_low = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
xs_high = tf.Variable(np.linspace(0, 10, 10), dtype=tf.float64)[:, None]
lf_obs = tf.sin(xs_low)
hf_obs = 2 * tf.sin(xs_high) + tf.random.normal(
xs_high.shape, mean=0, stddev=1e-1, dtype=tf.float64
)
lf_query_points = add_fidelity_column(xs_low, 0)
hf_query_points = add_fidelity_column(xs_high, 1)
lf_dataset = Dataset(lf_query_points, lf_obs)
hf_dataset = Dataset(hf_query_points, hf_obs)
dataset = lf_dataset + hf_dataset
search_space = Box([0.0], [10.0])
model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(
dataset, num_fidelities=2, input_search_space=search_space
)
)
model.update(dataset)
model.rho[1] = 0.0 # type: ignore
test_locations = tf.Variable(np.linspace(0, 10, 32), dtype=tf.float64)[:, None]
hf_test_locations = add_fidelity_column(test_locations, 1)
hf_prediction = model.predict(hf_test_locations)
hf_prediction_direct = model.fidelity_residual_models[1].predict(test_locations)
npt.assert_array_equal(hf_prediction, hf_prediction_direct)
def test_multifidelity_autoregressive_predict_hf_is_consistent_when_lf_is_flat() -> None:
xs_low = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
xs_high = tf.Variable(np.linspace(0, 10, 10), dtype=tf.float64)[:, None]
lf_obs = tf.sin(xs_low)
hf_obs = 2 * tf.sin(xs_high) + tf.random.normal(
xs_high.shape, mean=0, stddev=1e-1, dtype=tf.float64
)
lf_query_points = add_fidelity_column(xs_low, 0)
hf_query_points = add_fidelity_column(xs_high, 1)
lf_dataset = Dataset(lf_query_points, lf_obs)
hf_dataset = Dataset(hf_query_points, hf_obs)
dataset = lf_dataset + hf_dataset
search_space = Box([0.0], [10.0])
model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(
dataset, num_fidelities=2, input_search_space=search_space
)
)
model.update(dataset)
flat_dataset_qps = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
flat_dataset_obs = tf.zeros_like(flat_dataset_qps)
flat_dataset = Dataset(flat_dataset_qps, flat_dataset_obs)
kernel = gpflow.kernels.Matern52()
gpr = gpflow.models.GPR(flat_dataset.astuple(), kernel, noise_variance=1e-5)
model.lowest_fidelity_signal_model = GaussianProcessRegression(gpr)
# Add some low fidelity points to check that predict on different fids works
test_locations_30 = tf.Variable(np.linspace(0, 10, 30), dtype=tf.float64)[:, None]
lf_test_locations = add_fidelity_column(test_locations_30, 0)
test_locations_32 = tf.Variable(np.linspace(0, 10, 32), dtype=tf.float64)[:, None]
hf_test_locations = add_fidelity_column(test_locations_32, 1)
concatenated_test_locations = tf.concat([lf_test_locations, hf_test_locations], axis=0)
concat_prediction, _ = model.predict(concatenated_test_locations)
hf_prediction = concat_prediction[30:]
hf_prediction_direct, _ = model.fidelity_residual_models[1].predict(test_locations_32)
npt.assert_allclose(hf_prediction, hf_prediction_direct)
def test_multifidelity_autoregressive_predict_hf_is_consistent_when_hf_residual_is_flat() -> None:
xs_low = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
xs_high = tf.Variable(np.linspace(0, 10, 10), dtype=tf.float64)[:, None]
lf_obs = tf.sin(xs_low)
hf_obs = 2 * tf.sin(xs_high) + tf.random.normal(
xs_high.shape, mean=0, stddev=1e-1, dtype=tf.float64
)
lf_query_points = add_fidelity_column(xs_low, 0)
hf_query_points = add_fidelity_column(xs_high, 1)
lf_dataset = Dataset(lf_query_points, lf_obs)
hf_dataset = Dataset(hf_query_points, hf_obs)
dataset = lf_dataset + hf_dataset
search_space = Box([0.0], [10.0])
model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(
dataset, num_fidelities=2, input_search_space=search_space
)
)
model.update(dataset)
flat_dataset_qps = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
flat_dataset_obs = tf.zeros_like(flat_dataset_qps)
flat_dataset = Dataset(flat_dataset_qps, flat_dataset_obs)
kernel = gpflow.kernels.Matern52()
gpr = gpflow.models.GPR(flat_dataset.astuple(), kernel, noise_variance=1e-5)
model.fidelity_residual_models[1] = GaussianProcessRegression(gpr) # type: ignore
test_locations = tf.Variable(np.linspace(0, 10, 32), dtype=tf.float64)[:, None]
hf_test_locations = add_fidelity_column(test_locations, 1)
hf_prediction, _ = model.predict(hf_test_locations)
hf_prediction_direct = (
model.rho[1] * model.lowest_fidelity_signal_model.predict(test_locations)[0]
)
npt.assert_allclose(hf_prediction, hf_prediction_direct)
def test_multifidelity_autoregressive_sample_lf_are_consistent_with_multiple_fidelities() -> None:
xs_low = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
xs_high = tf.Variable(np.linspace(0, 10, 10), dtype=tf.float64)[:, None]
lf_obs = tf.sin(xs_low)
hf_obs = 2 * tf.sin(xs_high) + tf.random.normal(
xs_high.shape, mean=0, stddev=1e-1, dtype=tf.float64
)
lf_query_points = add_fidelity_column(xs_low, 0)
hf_query_points = add_fidelity_column(xs_high, 1)
lf_dataset = Dataset(lf_query_points, lf_obs)
hf_dataset = Dataset(hf_query_points, hf_obs)
dataset = lf_dataset + hf_dataset
search_space = Box([0.0], [10.0])
model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(
dataset, num_fidelities=2, input_search_space=search_space
)
)
model.update(dataset)
# Add some high fidelity points to check that predict on different fids works
test_locations_31 = tf.Variable(np.linspace(0, 10, 31), dtype=tf.float64)[:, None]
lf_test_locations = add_fidelity_column(test_locations_31, 0)
test_locations_32 = tf.Variable(np.linspace(0, 10, 32), dtype=tf.float64)[:, None]
hf_test_locations = add_fidelity_column(test_locations_32, 1)
second_batch = tf.Variable(np.linspace(0.5, 10.5, 63), dtype=tf.float64)[:, None]
second_batch_test_locations = add_fidelity_column(second_batch, 1)
concat_test_locations = tf.concat([lf_test_locations, hf_test_locations], axis=0)
concat_multibatch_test_locations = tf.concat(
[concat_test_locations[None, ...], second_batch_test_locations[None, ...]], axis=0
)
concat_samples = model.sample(concat_multibatch_test_locations, 100_000)
lf_samples = concat_samples[0, :, :31]
lf_samples_direct = model.lowest_fidelity_signal_model.sample(test_locations_31, 100_000)
lf_samples_mean = tf.reduce_mean(lf_samples, axis=0)
lf_samples_var = tf.math.reduce_variance(lf_samples, axis=0)
lf_samples_direct_mean = tf.reduce_mean(lf_samples_direct, axis=0)
lf_samples_direct_var = tf.math.reduce_variance(lf_samples_direct, axis=0)
npt.assert_allclose(lf_samples_mean, lf_samples_direct_mean, atol=1e-4)
npt.assert_allclose(lf_samples_var, lf_samples_direct_var, atol=1e-4)
def test_multifidelity_autoregressive_sample_hf_is_consistent_when_rho_zero() -> None:
xs_low = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
xs_high = tf.Variable(np.linspace(0, 10, 10), dtype=tf.float64)[:, None]
lf_obs = tf.sin(xs_low)
hf_obs = 2 * tf.sin(xs_high) + tf.random.normal(
xs_high.shape, mean=0, stddev=1e-1, dtype=tf.float64
)
lf_query_points = add_fidelity_column(xs_low, 0)
hf_query_points = add_fidelity_column(xs_high, 1)
lf_dataset = Dataset(lf_query_points, lf_obs)
hf_dataset = Dataset(hf_query_points, hf_obs)
dataset = lf_dataset + hf_dataset
search_space = Box([0.0], [10.0])
model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(
dataset, num_fidelities=2, input_search_space=search_space
)
)
model.update(dataset)
model.rho[1] = 0.0 # type: ignore
test_locations = tf.Variable(np.linspace(0, 10, 32), dtype=tf.float64)[:, None]
hf_test_locations = add_fidelity_column(test_locations, 1)
hf_samples = model.sample(hf_test_locations, 100_000)
hf_samples_direct = model.fidelity_residual_models[1].sample(test_locations, 100_000)
hf_samples_mean = tf.reduce_mean(hf_samples, axis=0)
hf_samples_var = tf.math.reduce_variance(hf_samples, axis=0)
hf_samples_direct_mean = tf.reduce_mean(hf_samples_direct, axis=0)
hf_samples_direct_var = tf.math.reduce_variance(hf_samples_direct, axis=0)
npt.assert_allclose(hf_samples_mean, hf_samples_direct_mean, atol=1e-2)
npt.assert_allclose(hf_samples_var, hf_samples_direct_var, atol=1e-2)
def test_multifidelity_autoregressive_sample_hf_is_consistent_when_lf_is_flat() -> None:
xs_low = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
xs_high = tf.Variable(np.linspace(0, 10, 10), dtype=tf.float64)[:, None]
lf_obs = tf.sin(xs_low)
hf_obs = 2 * tf.sin(xs_high) + tf.random.normal(
xs_high.shape, mean=0, stddev=1e-1, dtype=tf.float64
)
lf_query_points = add_fidelity_column(xs_low, 0)
hf_query_points = add_fidelity_column(xs_high, 1)
lf_dataset = Dataset(lf_query_points, lf_obs)
hf_dataset = Dataset(hf_query_points, hf_obs)
dataset = lf_dataset + hf_dataset
search_space = Box([0.0], [10.0])
model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(
dataset, num_fidelities=2, input_search_space=search_space
)
)
model.update(dataset)
flat_dataset_qps = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
flat_dataset_obs = tf.zeros_like(flat_dataset_qps)
flat_dataset = Dataset(flat_dataset_qps, flat_dataset_obs)
kernel = gpflow.kernels.Matern52()
gpr = gpflow.models.GPR(flat_dataset.astuple(), kernel, noise_variance=1e-5)
model.lowest_fidelity_signal_model = GaussianProcessRegression(gpr)
# Add some low fidelity points to check that predict on different fids works
test_locations_30 = tf.Variable(np.linspace(0, 10, 30), dtype=tf.float64)[:, None]
lf_test_locations = add_fidelity_column(test_locations_30, 0)
test_locations_32 = tf.Variable(np.linspace(0, 10, 32), dtype=tf.float64)[:, None]
hf_test_locations = add_fidelity_column(test_locations_32, 1)
concatenated_test_locations = tf.concat([lf_test_locations, hf_test_locations], axis=0)
concat_samples = model.sample(concatenated_test_locations, 100_000)
hf_samples = concat_samples[:, 30:]
hf_samples_direct = model.fidelity_residual_models[1].sample(test_locations_32, 100_000)
hf_samples_mean = tf.reduce_mean(hf_samples, axis=0)
hf_samples_var = tf.math.reduce_variance(hf_samples, axis=0)
hf_samples_direct_mean = tf.reduce_mean(hf_samples_direct, axis=0)
hf_samples_direct_var = tf.math.reduce_variance(hf_samples_direct, axis=0)
npt.assert_allclose(hf_samples_mean, hf_samples_direct_mean, atol=1e-2)
npt.assert_allclose(hf_samples_var, hf_samples_direct_var, atol=1e-2)
def test_multifidelity_autoregressive_sample_hf_is_consistent_when_hf_residual_is_flat() -> None:
xs_low = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
xs_high = tf.Variable(np.linspace(0, 10, 10), dtype=tf.float64)[:, None]
lf_obs = tf.sin(xs_low)
hf_obs = 2 * tf.sin(xs_high) + tf.random.normal(
xs_high.shape, mean=0, stddev=1e-1, dtype=tf.float64
)
lf_query_points = add_fidelity_column(xs_low, 0)
hf_query_points = add_fidelity_column(xs_high, 1)
lf_dataset = Dataset(lf_query_points, lf_obs)
hf_dataset = Dataset(hf_query_points, hf_obs)
dataset = lf_dataset + hf_dataset
search_space = Box([0.0], [10.0])
model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(
dataset, num_fidelities=2, input_search_space=search_space
)
)
model.update(dataset)
flat_dataset_qps = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
flat_dataset_obs = tf.zeros_like(flat_dataset_qps)
flat_dataset = Dataset(flat_dataset_qps, flat_dataset_obs)
kernel = gpflow.kernels.Matern52()
gpr = gpflow.models.GPR(flat_dataset.astuple(), kernel, noise_variance=1e-5)
model.fidelity_residual_models[1] = GaussianProcessRegression(gpr) # type: ignore
test_locations = tf.Variable(np.linspace(0, 10, 32), dtype=tf.float64)[:, None]
hf_test_locations = add_fidelity_column(test_locations, 1)
hf_samples = model.sample(hf_test_locations, 100_000)
hf_samples_direct = model.rho[1] * model.lowest_fidelity_signal_model.sample(
test_locations, 100_000
)
hf_samples_mean = tf.reduce_mean(hf_samples, axis=0)
hf_samples_var = tf.math.reduce_variance(hf_samples, axis=0)
hf_samples_direct_mean = tf.reduce_mean(hf_samples_direct, axis=0)
hf_samples_direct_var = tf.math.reduce_variance(hf_samples_direct, axis=0)
npt.assert_allclose(hf_samples_mean, hf_samples_direct_mean, atol=1e-4)
npt.assert_allclose(hf_samples_var, hf_samples_direct_var, atol=1e-4)
| 21,553 | 37.148673 | 99 | py |
trieste-develop | trieste-develop/tests/integration/models/keras/test_predictions.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import numpy as np
import pytest
import tensorflow as tf
from tests.util.misc import hartmann_6_dataset, random_seed
from trieste.models.keras import DeepEnsemble, build_keras_ensemble
from trieste.models.optimizer import KerasOptimizer
@pytest.mark.slow
@random_seed
def test_neural_network_ensemble_predictions_close_to_actuals() -> None:
dataset_size = 2000
example_data = hartmann_6_dataset(dataset_size)
keras_ensemble = build_keras_ensemble(example_data, 5, 3, 250)
fit_args = {
"batch_size": 128,
"epochs": 1500,
"callbacks": [
tf.keras.callbacks.EarlyStopping(
monitor="loss", patience=100, restore_best_weights=True
)
],
"verbose": 0,
}
model = DeepEnsemble(
keras_ensemble,
KerasOptimizer(tf.keras.optimizers.Adam(), fit_args),
)
model.optimize(example_data)
predicted_means, _ = model.predict(example_data.query_points)
np.testing.assert_allclose(predicted_means, example_data.observations, atol=0.2, rtol=0.2)
| 1,688 | 32.117647 | 94 | py |
trieste-develop | trieste-develop/docs/notebooks/deep_ensembles.pct.py | # %% [markdown]
# # Bayesian optimization with deep ensembles
#
# Gaussian processes as a surrogate models are hard to beat on smaller datasets and optimization budgets. However, they scale poorly with amount of data, cannot easily capture non-stationarities and they are rather slow at prediction time. Here we show how uncertainty-aware neural networks can be effective alternative to Gaussian processes in Bayesian optimisation, in particular for large budgets, non-stationary objective functions or when predictions need to be made quickly.
#
# Check out our tutorial on [Deep Gaussian Processes for Bayesian optimization](deep_gaussian_processes.ipynb) as another alternative model type supported by Trieste that can model non-stationary functions (but also deal well with small datasets).
#
# Let's start by importing some essential packages and modules.
# %%
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import numpy as np
import tensorflow as tf
import trieste
# silence TF warnings and info messages, only print errors
# https://stackoverflow.com/questions/35911252/disable-tensorflow-debugging-information
tf.get_logger().setLevel("ERROR")
np.random.seed(1794)
tf.random.set_seed(1794)
# %% [markdown]
# ## Deep ensembles
#
# Deep neural networks typically output only mean predictions, not posterior distributions as probabilistic models such as Gaussian processes do. Posterior distributions encode mean predictions, but also *epistemic* uncertainty - type of uncertainty that stems from model misspecification, and which can be eliminated with further data. Aleatoric uncertainty that stems from stochasticity of the data generating process is not contained in the posterior, but can be learned from the data. Bayesian optimization requires probabilistic models because epistemic uncertainty plays a key role in balancing between exploration and exploitation.
#
# Recently, however, there has been some development of uncertainty-aware deep neural networks. Ensembles of deep neural networks, introduced recently by <cite data-cite="lakshminarayanan2016simple"/>, is a type of such networks. Main ingredients are probabilistic feed-forward networks as members of the ensemble, where the final layers is a Gaussian distribution, training with maximum likelihood instead of typical root mean square error, and different random initialization of weights for generating diversity among the networks.
#
# Monte carlo dropout (<cite data-cite="gal2016dropout"/>), Bayes-by-backprop (<cite data-cite="blundell2015weight"/>) or evidential deep regression (<cite data-cite="amini2019deep"/>) are some of the other types of uncertainty-aware deep neural networks. Systematic comparisons however show that deep ensembles represent the uncertainty the best and are probably the simplest of the major alternatives (see, for example, <cite data-cite="osband2021epistemic"/>). Good estimates of uncertainty makes deep ensembles a potentially attractive model for Bayesian optimization.
# %% [markdown]
# ### How good is uncertainty representation of deep ensembles?
#
# We will use a simple one-dimensional toy problem introduced by <cite data-cite="hernandez2015probabilistic"/>, which was used in <cite data-cite="lakshminarayanan2016simple"/> to provide some illustrative evidence that deep ensembles do a good job of estimating uncertainty. We will replicate this exercise here.
#
# The toy problem is a simple cubic function with some Normally distributed noise around it. We will randomly sample 20 input points from [-4,4] interval that we will use as a training data later on.
# %%
from trieste.space import Box
from trieste.data import Dataset
def objective(x, error=True):
y = tf.pow(x, 3)
if error:
y += tf.random.normal(x.shape, 0, 3, dtype=x.dtype)
return y
num_points = 20
# we define the [-4,4] interval using a `Box` search space that has convenient sampling methods
search_space = Box([-4], [4])
inputs = search_space.sample_sobol(num_points)
outputs = objective(inputs)
data = Dataset(inputs, outputs)
# %% [markdown]
# Next we define a deep ensemble model and train it. Trieste supports neural network models defined as TensorFlow's Keras models. Since creating ensemble models in Keras can be somewhat involved, Trieste provides some basic architectures. Here we use the `build_keras_ensemble` function which builds a simple ensemble of neural networks in Keras where each network has the same architecture: number of hidden layers, nodes in hidden layers and activation function. It uses sensible defaults for many parameters and finally returns a model of `KerasEnsemble` class.
#
# As with other supported types of models (e.g. Gaussian process models from GPflow), we cannot use `KerasEnsemble` directly in Bayesian optimization routines, we need to pass it through an appropriate wrapper, `DeepEnsemble` wrapper in this case. One difference with respect to other model types is that we need to use a Keras specific optimizer wrapper `KerasOptimizer` where we need to specify a stochastic optimizer (Adam is used by default, but we can use other stochastic optimizers from TensorFlow), objective function (here negative log likelihood) and we can provide custom arguments for the Keras `fit` method (here we modify the default arguments; check [Keras API documentation](https://keras.io/api/models/model_training_apis/#fit-method) for a list of possible arguments).
#
# For the cubic function toy problem we use the same architecture as in <cite data-cite="lakshminarayanan2016simple"/>: ensemble size of 5 networks, where each network has one hidden layer with 100 nodes. All other implementation details were missing and we used sensible choices, as well as details about training the network.
# %%
from trieste.models.keras import (
DeepEnsemble,
KerasPredictor,
build_keras_ensemble,
)
from trieste.models.optimizer import KerasOptimizer
def build_cubic_model(data: Dataset) -> DeepEnsemble:
ensemble_size = 5
num_hidden_layers = 1
num_nodes = 100
keras_ensemble = build_keras_ensemble(
data, ensemble_size, num_hidden_layers, num_nodes
)
fit_args = {
"batch_size": 10,
"epochs": 1000,
"verbose": 0,
}
optimizer = KerasOptimizer(tf.keras.optimizers.Adam(0.01), fit_args)
return DeepEnsemble(keras_ensemble, optimizer)
# building and optimizing the model
model = build_cubic_model(data)
model.optimize(data)
# %% [markdown]
# Let's illustrate the results of the model training. We create a test set that includes points outside the interval on which the model has been trained. These extrapolation points are a good test of model's representation of uncertainty. What would we expect to see? Bayesian inference provides a reference frame. Predictive uncertainty should increase the farther we are from the training data and the predictive mean should start returning to the prior mean (assuming standard zero mean function).
#
# We can see in the figure below that predictive distribution of deep ensembles indeed exhibits these features. The figure also replicates fairly well Figure 1 (rightmost panel) from <cite data-cite="lakshminarayanan2016simple"/> and provides a reasonable match to Bayesian neural network trained on same toy problem with Hamiltonian Monte Carlo (golden standard that is usually very expensive) as illustrated in Figure 1 (upper right panel) <cite data-cite="hernandez2015probabilistic"/>. This gives us some assurance that deep ensembles might provide uncertainty that is good enough for trading off between exploration and exploitation in Bayesian optimization.
# %%
import matplotlib.pyplot as plt
# test data that includes extrapolation points
test_points = tf.linspace(-6, 6, 1000)
# generating a plot with ground truth function, mean prediction and 3 standard
# deviations around it
plt.scatter(inputs, outputs, marker=".", alpha=0.6, color="red", label="data")
plt.plot(
test_points, objective(test_points, False), color="blue", label="function"
)
y_hat, y_var = model.predict(test_points)
y_hat_minus_3sd = y_hat - 3 * tf.math.sqrt(y_var)
y_hat_plus_3sd = y_hat + 3 * tf.math.sqrt(y_var)
plt.plot(test_points, y_hat, color="gray", label="model $\mu$")
plt.fill_between(
test_points,
tf.squeeze(y_hat_minus_3sd),
tf.squeeze(y_hat_plus_3sd),
color="gray",
alpha=0.5,
label="$\mu -/+ 3SD$",
)
plt.ylim([-100, 100])
plt.show()
# %% [markdown]
# ## Non-stationary toy problem
#
# Now we turn to a somewhat more serious synthetic optimization problem. We want to find the minimum of the two-dimensional version of the [Michalewicz function](https://www.sfu.ca/~ssurjano/michal.html). Even though we stated that deep ensembles should be used with larger budget sizes, here we will show them on a small dataset to provide a problem that is feasible for the scope of the tutorial.
# The Michalewicz function is defined on the search space of $[0, \pi]^2$. Below we plot the function over this space. The Michalewicz function is interesting case for deep ensembles as it features sharp ridges that are difficult to capture with Gaussian processes. This occurs because lengthscale parameters in typical kernels cannot easily capture both ridges (requiring smaller lengthscales) and fairly flat areas everywhere else (requiring larger lengthscales).
# %%
from trieste.objectives import Michalewicz2
from trieste.experimental.plotting import plot_function_plotly
search_space = Michalewicz2.search_space
function = Michalewicz2.objective
MINIMUM = Michalewicz2.minimum
MINIMIZER = Michalewicz2.minimum
# we illustrate the 2-dimensional Michalewicz function
fig = plot_function_plotly(
function, search_space.lower, search_space.upper, grid_density=20
)
fig.show()
# %% [markdown]
# ## Initial design
#
# We set up the observer as usual, using Sobol sampling to sample the initial points.
# %%
from trieste.objectives.utils import mk_observer
num_initial_points = 20
initial_query_points = search_space.sample(num_initial_points)
observer = trieste.objectives.utils.mk_observer(function)
initial_data = observer(initial_query_points)
# %% [markdown]
# ## Modelling the objective function
#
# The Bayesian optimization procedure estimates the next best points to query by using a probabilistic model of the objective. Here we use a deep ensemble instead of a typical probabilistic model. Same as above we use the `build_keras_ensemble` function to build a simple ensemble of neural networks in Keras and wrap it with a `DeepEnsemble` wrapper so it can be used in Trieste's Bayesian optimization loop.
#
# Some notes on choosing the model architecture are necessary. Unfortunately, choosing an architecture that works well for small datasets, a common setting in Bayesian optimization, is not easy. Here we do demonstrate it can work with smaller datasets, but choosing the architecture and model optimization parameters was a lengthy process that does not necessarily generalize to other problems. Hence, we advise to use deep ensembles with larger datasets and ideally large batches so that the model is not retrained after adding a single point.
#
# We can offer some practical advices, however. Architecture parameters like the ensemble size, the number of hidden layers, the number of nodes in the layers and so on affect the capacity of the model. If the model is too large for the amount of data, it will be difficult to train the model and result will be a poor model that cannot be used for optimizing the objective function. Hence, with small datasets like the one used here, we advise to always err on the smaller size, one or two hidden layers, and up to 25 nodes per layer. If we suspect the objective function is more complex these numbers should be increased slightly. With regards to model optimization we advise using a lot of epochs, typically at least 1000, and potentially higher learning rates. Ideally, every once in a while capacity should be increased to be able to use larger amount of data more effectively. Unfortunately, there is almost no research literature that would guide us in how to do this properly.
#
# Interesting alternative to a manual architecture search is to use a separate Bayesian optimization process to optimize the architecture and model optimizer parameters (see recent work by <cite data-cite="kadra2021well"/>). This optimization is much faster as it optimizes model performance. It would slow down the original optimization, so its worthwhile only if optimizing the objective function is much more costly.
#
# Below we change the `build_model` function to adapt the model slightly for the Michalewicz function. Since it's a more complex function we increase the number of hidden layers but keep the number of nodes per layer on the lower side. Note the large number of epochs
# %%
def build_model(data: Dataset) -> DeepEnsemble:
ensemble_size = 5
num_hidden_layers = 3
num_nodes = 25
keras_ensemble = build_keras_ensemble(
data, ensemble_size, num_hidden_layers, num_nodes
)
fit_args = {
"batch_size": 10,
"epochs": 1000,
"callbacks": [
tf.keras.callbacks.EarlyStopping(monitor="loss", patience=100)
],
"verbose": 0,
}
optimizer = KerasOptimizer(tf.keras.optimizers.Adam(0.001), fit_args)
return DeepEnsemble(keras_ensemble, optimizer)
# building and optimizing the model
model = build_model(initial_data)
# %% [markdown]
# ## Run the optimization loop
#
# In Bayesian optimization we use an acquisition function to choose where in the search space to evaluate the objective function in each optimization step. Deep ensemble model uses probabilistic neural networks whose output is at the end approximated with a single Gaussian distribution, which acts as a predictive posterior distribution. This means that any acquisition function can be used that requires only predictive mean and variance. For example, predictive mean and variance is sufficient for standard acquisition functions such as Expected improvement (see `ExpectedImprovement`), Lower confidence bound (see `NegativeLowerConfidenceBound`) or Thompson sampling (see `ExactThompsonSampling`). Some acquisition functions have additional requirements and these cannot be used (e.g. covariance between sets of query points, as in an entropy-based acquisition function `GIBBON`).
#
# Here we will illustrate Deep ensembles with a Thompson sampling acquisition function. We use a discrete Thompson sampling strategy that samples a fixed number of points (`grid_size`) from the search space and takes a certain number of samples at each point based on the model posterior (`num_samples`, if more than 1 then this is a batch strategy).
# %%
from trieste.acquisition.rule import DiscreteThompsonSampling
grid_size = 2000
num_samples = 4
# note that `DiscreteThompsonSampling` by default uses `ExactThompsonSampler`
acquisition_rule = DiscreteThompsonSampling(grid_size, num_samples)
# %% [markdown]
# We can now run the Bayesian optimization loop by defining a `BayesianOptimizer` and calling its `optimize` method.
#
# Note that the optimization might take a while!
# %%
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
num_steps = 25
# The Keras interface does not currently support using `track_state=True` which saves the model
# in each iteration. This will be addressed in a future update.
result = bo.optimize(
num_steps,
initial_data,
model,
acquisition_rule=acquisition_rule,
track_state=False,
)
dataset = result.try_get_final_dataset()
# %% [markdown]
# ## Explore the results
#
# We can now get the best point found by the optimizer. Note this isn't necessarily the point that was last evaluated.
# %%
query_points = dataset.query_points.numpy()
observations = dataset.observations.numpy()
arg_min_idx = tf.squeeze(tf.argmin(observations, axis=0))
print(f"Minimizer query point: {query_points[arg_min_idx, :]}")
print(f"Minimum observation: {observations[arg_min_idx, :]}")
print(f"True minimum: {MINIMUM}")
# %% [markdown]
# We can visualise how the optimizer performed as a three-dimensional plot. Crosses mark the initial data points while dots mark the points chosen during the Bayesian optimization run. You can see that there are some samples on the flat regions of the space, while most of the points are exploring the ridges, in particular in the vicinity of the minimum point.
# %%
from trieste.experimental.plotting import add_bo_points_plotly
fig = plot_function_plotly(
function,
search_space.lower,
search_space.upper,
alpha=0.7,
)
fig = add_bo_points_plotly(
x=query_points[:, 0],
y=query_points[:, 1],
z=observations[:, 0],
num_init=num_initial_points,
idx_best=arg_min_idx,
fig=fig,
)
fig.show()
# %% [markdown]
# We can visualise the model over the objective function by plotting the mean and 95% confidence intervals of its predictive distribution. Since it is not easy to choose the architecture of the deep ensemble we advise to always check with these types of plots whether the model seems to be doing a good job at modelling the objective function. In this case we can see that the model was able to capture the relevant parts of the objective function.
# %%
import matplotlib.pyplot as plt
from trieste.experimental.plotting import plot_model_predictions_plotly
fig = plot_model_predictions_plotly(
result.try_get_final_model(),
search_space.lower,
search_space.upper,
)
fig = add_bo_points_plotly(
x=query_points[:, 0],
y=query_points[:, 1],
z=observations[:, 0],
num_init=num_initial_points,
idx_best=arg_min_idx,
fig=fig,
figrow=1,
figcol=1,
)
fig.show()
# %% [markdown]
# Finally, let's plot the regret over time, i.e. difference between the minimum of the objective function and lowest observations found by the Bayesian optimization over time. Below you can see two plots. The left hand plot shows the regret over time: the observations (crosses and dots), the current best (orange line), and the start of the optimization loop (blue line). The right hand plot is a two-dimensional search space that shows where in the search space initial points were located (crosses again) and where Bayesian optimization allocated samples (dots). The best point is shown in each (purple dot) and on the left plot you can see that we come very close to 0 which is the minimum of the objective function.
# %%
from trieste.experimental.plotting import plot_regret, plot_bo_points
suboptimality = observations - MINIMUM.numpy()
fig, ax = plt.subplots(1, 2)
plot_regret(
suboptimality,
ax[0],
num_init=num_initial_points,
idx_best=arg_min_idx,
)
plot_bo_points(
query_points, ax[1], num_init=num_initial_points, idx_best=arg_min_idx
)
ax[0].set_title("Minimum achieved")
ax[0].set_ylabel("Regret")
ax[0].set_xlabel("# evaluations")
ax[1].set_ylabel("$x_2$")
ax[1].set_xlabel("$x_1$")
ax[1].set_title("Points in the search space")
fig.show()
# %% [markdown]
# ## LICENSE
#
# [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE)
| 19,205 | 52.798319 | 984 | py |
tensiometer | tensiometer-master/tensiometer/mcmc_tension/flow.py | """
"""
###############################################################################
# initial imports and set-up:
import os
import time
import gc
from numba import jit
import numpy as np
import getdist.chains as gchains
gchains.print_load_details = False
from getdist import MCSamples, WeightedSamples
import scipy
from scipy.linalg import sqrtm
from scipy.integrate import simps
from scipy.spatial import cKDTree
import scipy.stats
import pickle
from collections.abc import Iterable
from matplotlib import pyplot as plt
from .. import utilities as utils
from .. import gaussian_tension
try:
import tensorflow as tf
import tensorflow_probability as tfp
tfb = tfp.bijectors
tfd = tfp.distributions
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.callbacks import Callback
HAS_FLOW = True
except Exception as e:
print("Could not import tensorflow or tensorflow_probability: ", e)
Callback = object
HAS_FLOW = False
try:
from IPython.display import clear_output, set_matplotlib_formats
except ModuleNotFoundError:
pass
###############################################################################
# helper class to build a masked-autoregressive flow:
class SimpleMAF(object):
"""
A class to implement a simple Masked AutoRegressive Flow (MAF) using the implementation :class:`tfp.bijectors.AutoregressiveNetwork` from from `Tensorflow Probability <https://www.tensorflow.org/probability/>`_. Additionally, this class provides utilities to load/save models, including random permutations.
:param num_params: number of parameters, ie the dimension of the space of which the bijector is defined.
:type num_params: int
:param n_maf: number of MAFs to stack. Defaults to None, in which case it is set to `2*num_params`.
:type n_maf: int, optional
:param hidden_units: a list of the number of nodes per hidden layers. Defaults to None, in which case it is set to `[num_params*2]*2`.
:type hidden_units: list, optional
:param permutations: whether to use shuffle dimensions between stacked MAFs, defaults to True.
:type permutations: bool, optional
:param activation: activation function to use in all layers, defaults to :func:`tf.math.asinh`.
:type activation: optional
:param kernel_initializer: kernel initializer, defaults to 'glorot_uniform'.
:type kernel_initializer: str, optional
:param feedback: print the model architecture, defaults to 0.
:type feedback: int, optional
:reference: George Papamakarios, Theo Pavlakou, Iain Murray (2017). Masked Autoregressive Flow for Density Estimation. `arXiv:1705.07057 <https://arxiv.org/abs/1705.07057>`_
"""
def __init__(self, num_params, n_maf=None, hidden_units=None, permutations=True, activation=tf.math.asinh, kernel_initializer='glorot_uniform', feedback=0, **kwargs):
if n_maf is None:
n_maf = 2*num_params
event_shape = (num_params,)
if hidden_units is None:
hidden_units = [num_params*2]*2
if permutations is None:
_permutations = False
elif isinstance(permutations, Iterable):
assert len(permutations) == n_maf
_permutations = permutations
elif isinstance(permutations, bool):
if permutations:
_permutations = [np.random.permutation(num_params) for _ in range(n_maf)]
else:
_permutations = False
self.permutations = _permutations
# Build transformed distribution
bijectors = []
for i in range(n_maf):
if _permutations:
bijectors.append(tfb.Permute(_permutations[i].astype(np.int32)))
made = tfb.AutoregressiveNetwork(params=2, event_shape=event_shape, hidden_units=hidden_units, activation=activation, kernel_initializer=kernel_initializer, **kwargs)
bijectors.append(tfb.MaskedAutoregressiveFlow(shift_and_log_scale_fn=made))
self.bijector = tfb.Chain(bijectors)
if feedback > 0:
print("Building MAF")
print(" - number of MAFs:", n_maf)
print(" - activation:", activation)
print(" - hidden_units:", hidden_units)
def save(self, path):
"""
Save a `SimpleMAF` object.
:param path: path of the directory where to save.
:type path: str
"""
checkpoint = tf.train.Checkpoint(bijector=self.bijector)
checkpoint.write(path)
pickle.dump(self.permutations, open(path+'_permutations.pickle', 'wb'))
@classmethod
def load(cls, num_params, path, **kwargs):
"""
Load a saved `SimpleMAF` object. The number of parameters and all other keyword arguments (except for `permutations`) must be included as the MAF is first created with random weights and then these weights are restored.
:param num_params: number of parameters, ie the dimension of the space of which the bijector is defined.
:type num_params: int
:param path: path of the directory from which to load.
:type path: str
:return: a :class:`~.SimpleMAF`.
"""
permutations = pickle.load(open(path+'_permutations.pickle', 'rb'))
maf = SimpleMAF(num_params=num_params, permutations=permutations, **kwargs)
checkpoint = tf.train.Checkpoint(bijector=maf.bijector)
checkpoint.read(path)
return maf
###############################################################################
# main class to compute NF-based tension:
class DiffFlowCallback(Callback):
"""
A class to compute the normalizing flow estimate of the probability of a parameter shift given an input parameter difference chain.
A normalizing flow is trained to approximate the difference distribution and then used to numerically evaluate the probablity of a parameter shift (see REF). To do so, it defines a bijective mapping that is optimized to gaussianize the difference chain samples. This mapping is performed in two steps, using the gaussian approximation as pre-whitening. The notations used in the code are:
* `X` designates samples in the original parameter difference space;
* `Y` designates samples in the gaussian approximation space, `Y` is obtained by shifting and scaling `X` by its mean and covariance (like a PCA);
* `Z` designates samples in the gaussianized space, connected to `Y` with a normalizing flow denoted `Z2Y_bijector`.
The user may provide the `Z2Y_bijector` as a :class:`~tfp.bijectors.Bijector` object from `Tensorflow Probability <https://www.tensorflow.org/probability/>`_ or make use of the utility class :class:`~.SimpleMAF` to instantiate a Masked Autoregressive Flow (with `Z2Y_bijector='MAF'`).
This class derives from :class:`~tf.keras.callbacks.Callback` from Keras, which allows for visualization during training. The normalizing flows (X->Y->Z) are implemented as :class:`~tfp.bijectors.Bijector` objects and encapsulated in a Keras :class:`~tf.keras.Model`.
Here is an example:
.. code-block:: python
# Initialize the flow and model
diff_flow_callback = DiffFlowCallback(diff_chain, Z2Y_bijector='MAF')
# Train the model
diff_flow_callback.train()
# Compute the shift probability and confidence interval
p, p_low, p_high = diff_flow_callback.estimate_shift_significance()
:param diff_chain: input parameter difference chain.
:type diff_chain: :class:`~getdist.mcsamples.MCSamples`
:param param_names: parameter names of the parameters to be used
in the calculation. By default all running parameters.
:type param_names: list, optional
:param Z2Y_bijector: either a :class:`~tfp.bijectors.Bijector` object
representing the mapping from `Z` to `Y`, or 'MAF' to instantiate a :class:`~.SimpleMAF`, defaults to 'MAF'.
:type Z2Y_bijector: optional
:param pregauss_bijector: not implemented yet, defaults to None.
:type pregauss_bijector: optional
:param learning_rate: initial learning rate, defaults to 1e-3.
:type learning_rate: float, optional
:param feedback: feedback level, defaults to 1.
:type feedback: int, optional
:param validation_split: fraction of samples to use for the validation sample, defaults to 0.1
:type validation_split: float, optional
:param early_stop_nsigma: absolute error on the tension at the zero-shift point to be used
as an approximate convergence criterion for early stopping, defaults to 0.
:type early_stop_nsigma: float, optional
:param early_stop_patience: minimum number of epochs to use when `early_stop_nsigma` is non-zero, defaults to 10.
:type early_stop_patience: int, optional
:raises NotImplementedError: if `pregauss_bijector` is not None.
:reference: George Papamakarios, Theo Pavlakou, Iain Murray (2017). Masked Autoregressive Flow for Density Estimation. `arXiv:1705.07057 <https://arxiv.org/abs/1705.07057>`_
"""
def __init__(self, diff_chain, param_names=None, Z2Y_bijector='MAF', pregauss_bijector=None, learning_rate=1e-3, feedback=1, validation_split=0.1, early_stop_nsigma=0., early_stop_patience=10, **kwargs):
self.feedback = feedback
# Chain
self._init_diff_chain(diff_chain, param_names=param_names, validation_split=validation_split)
# Transformed distribution
self._init_transf_dist(Z2Y_bijector, learning_rate=learning_rate, **kwargs)
if feedback > 0:
print("Building flow")
print(" - trainable parameters:", self.model.count_params())
# Metrics
keys = ["loss", "val_loss", "shift0_chi2", "shift0_pval", "shift0_nsigma", "chi2Z_ks", "chi2Z_ks_p"]
self.log = {_k: [] for _k in keys}
self.chi2Y = np.sum(self.Y_test**2, axis=1)
self.chi2Y_ks, self.chi2Y_ks_p = scipy.stats.kstest(self.chi2Y, 'chi2', args=(self.num_params,))
# Options
self.early_stop_nsigma = early_stop_nsigma
self.early_stop_patience = early_stop_patience
# Pre-gaussianization
if pregauss_bijector is not None:
# The idea is to introduce yet another step of deterministic gaussianization, eg using the prior CDF
# or double prior (convolved with itself, eg a triangular distribution)
raise NotImplementedError
def _init_diff_chain(self, diff_chain, param_names=None, validation_split=0.1):
# initialize param names:
if param_names is None:
param_names = diff_chain.getParamNames().getRunningNames()
else:
chain_params = diff_chain.getParamNames().list()
if not np.all([name in chain_params for name in param_names]):
raise ValueError('Input parameter is not in the diff chain.\n',
'Input parameters ', param_names, '\n'
'Possible parameters', chain_params)
# indexes:
ind = [diff_chain.index[name] for name in param_names]
self.num_params = len(ind)
# Gaussian approximation (full chain)
mcsamples_gaussian_approx = gaussian_tension.gaussian_approximation(diff_chain, param_names=param_names)
self.dist_gaussian_approx = tfd.MultivariateNormalTriL(loc=mcsamples_gaussian_approx.means[0].astype(np.float32), scale_tril=tf.linalg.cholesky(mcsamples_gaussian_approx.covs[0].astype(np.float32)))
self.Y2X_bijector = self.dist_gaussian_approx.bijector
# Samples
# Split training/test
n = diff_chain.samples.shape[0]
indices = np.random.permutation(n)
n_split = int(validation_split*n)
test_idx, training_idx = indices[:n_split], indices[n_split:]
# Training
self.X = diff_chain.samples[training_idx, :][:, ind]
self.weights = diff_chain.weights[training_idx]
self.weights *= len(self.weights) / np.sum(self.weights) # weights normalized to number of samples
self.has_weights = np.any(self.weights != self.weights[0])
self.Y = np.array(self.Y2X_bijector.inverse(self.X.astype(np.float32)))
assert not np.any(np.isnan(self.Y))
self.num_samples = len(self.X)
# Test
self.X_test = diff_chain.samples[test_idx, :][:, ind]
self.Y_test = np.array(self.Y2X_bijector.inverse(self.X_test.astype(np.float32)))
self.weights_test = diff_chain.weights[test_idx]
self.weights_test *= len(self.weights_test) / np.sum(self.weights_test) # weights normalized to number of samples
# Training sample generator
Y_ds = tf.data.Dataset.from_tensor_slices((self.Y.astype(np.float32), # input
np.zeros(self.num_samples, dtype=np.float32), # output (dummy zero)
self.weights.astype(np.float32),)) # weights
Y_ds = Y_ds.prefetch(tf.data.experimental.AUTOTUNE).cache()
self.Y_ds = Y_ds.shuffle(self.num_samples, reshuffle_each_iteration=True).repeat()
if self.feedback:
print("Building training/test samples")
if self.has_weights:
print(" - {}/{} training/test samples and non-uniform weights.".format(self.num_samples, self.X_test.shape[0]))
else:
print(" - {}/{} training/test samples and uniform weights.".format(self.num_samples, self.X_test.shape[0]))
def _init_transf_dist(self, Z2Y_bijector, learning_rate=1e-4, **kwargs):
# Model
if Z2Y_bijector == 'MAF':
self.MAF = SimpleMAF(self.num_params, feedback=self.feedback, **kwargs)
Z2Y_bijector = self.MAF.bijector
assert isinstance(Z2Y_bijector, tfp.bijectors.Bijector)
# Bijector and transformed distribution
self.Z2Y_bijector = Z2Y_bijector
self.dist_transformed = tfd.TransformedDistribution(distribution=tfd.MultivariateNormalDiag(np.zeros(self.num_params, dtype=np.float32), np.ones(self.num_params, dtype=np.float32)), bijector=Z2Y_bijector)
# Full bijector
self.Z2X_bijector = tfb.Chain([self.Y2X_bijector, self.Z2Y_bijector])
# Full distribution
self.dist_learned = tfd.TransformedDistribution(distribution=tfd.MultivariateNormalDiag(np.zeros(self.num_params, dtype=np.float32), np.ones(self.num_params, dtype=np.float32)), bijector=self.Z2X_bijector) # samples from std gaussian mapped to X
# Construct model
x_ = Input(shape=(self.num_params,), dtype=tf.float32)
log_prob_ = self.dist_transformed.log_prob(x_)
self.model = Model(x_, log_prob_)
loss = lambda _, log_prob: -log_prob
self.model.compile(optimizer=tf.optimizers.Adam(learning_rate=learning_rate), loss=loss)
def train(self, epochs=100, batch_size=None, steps_per_epoch=None, callbacks=[], verbose=1, **kwargs):
"""
Train the normalizing flow model. Internallay, this runs the fit method of the Keras :class:`~tf.keras.Model`, to which `**kwargs are passed`.
:param epochs: number of training epochs, defaults to 100.
:type epochs: int, optional
:param batch_size: number of samples per batch, defaults to None. If None, the training sample is divided into `steps_per_epoch` batches.
:type batch_size: int, optional
:param steps_per_epoch: number of steps per epoch, defaults to None. If None and `batch_size` is also None, then `steps_per_epoch` is set to 100.
:type steps_per_epoch: int, optional
:param callbacks: a list of additional Keras callbacks, such as :class:`~tf.keras.callbacks.ReduceLROnPlateau`, defaults to [].
:type callbacks: list, optional
:param verbose: verbosity level, defaults to 1.
:type verbose: int, optional
:return: A :class:`~tf.keras.callbacks.History` object. Its `history` attribute is a dictionary of training and validation loss values and metrics values at successive epochs: `"shift0_chi2"` is the squared norm of the zero-shift point in the gaussianized space, with the probability-to-exceed and corresponding tension in `"shift0_pval"` and `"shift0_nsigma"`; `"chi2Z_ks"` and `"chi2Z_ks_p"` contain the :math:`D_n` statistic and probability-to-exceed of the Kolmogorov-Smironov test that squared norms of the transformed samples `Z` are :math:`\\chi^2` distributed (with a number of degrees of freedom equal to the number of parameters).
"""
# We're trying to loop through the full sample each epoch
if batch_size is None:
if steps_per_epoch is None:
steps_per_epoch = 100
batch_size = int(self.num_samples/steps_per_epoch)
else:
if steps_per_epoch is None:
steps_per_epoch = int(self.num_samples/batch_size)
# Run !
hist = self.model.fit(x=self.Y_ds.batch(batch_size),
batch_size=batch_size,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
validation_data=(self.Y_test, np.zeros(len(self.Y_test), dtype=np.float32), self.weights_test),
verbose=verbose,
callbacks=[tf.keras.callbacks.TerminateOnNaN(), self]+callbacks,
**kwargs)
return hist
def estimate_shift(self, tol=0.05, max_iter=1000, step=100000):
"""
Compute the normalizing flow estimate of the probability of a parameter shift given the input parameter difference chain. This is done with a Monte Carlo estimate by comparing the probability density at the zero-shift point to that at samples drawn from the normalizing flow approximation of the distribution.
:param tol: absolute tolerance on the shift significance, defaults to 0.05.
:type tol: float, optional
:param max_iter: maximum number of sampling steps, defaults to 1000.
:type max_iter: int, optional
:param step: number of samples per step, defaults to 100000.
:type step: int, optional
:return: probability value and error estimate.
"""
err = np.inf
counter = max_iter
_thres = self.dist_learned.log_prob(np.zeros(self.num_params, dtype=np.float32))
_num_filtered = 0
_num_samples = 0
while err > tol and counter >= 0:
counter -= 1
_s = self.dist_learned.sample(step)
_s_prob = self.dist_learned.log_prob(_s)
_t = np.array(_s_prob > _thres)
_num_filtered += np.sum(_t)
_num_samples += step
_P = float(_num_filtered)/float(_num_samples)
_low, _upper = utils.clopper_pearson_binomial_trial(float(_num_filtered),
float(_num_samples),
alpha=0.32)
err = np.abs(utils.from_confidence_to_sigma(_upper)-utils.from_confidence_to_sigma(_low))
return _P, _low, _upper
def _compute_shift_proba(self):
zero = np.array(self.Z2X_bijector.inverse(np.zeros(self.num_params, dtype=np.float32)))
chi2Z0 = np.sum(zero**2)
pval = scipy.stats.chi2.cdf(chi2Z0, df=self.num_params)
nsigma = utils.from_confidence_to_sigma(pval)
return zero, chi2Z0, pval, nsigma
def _plot_loss(self, ax, logs={}):
self.log["loss"].append(logs.get('loss'))
self.log["val_loss"].append(logs.get('val_loss'))
if ax is not None:
ax.plot(self.log["loss"], label='Training')
ax.plot(self.log["val_loss"], label='Testing')
ax.set_title("Training Loss")
ax.set_xlabel("Epoch #")
ax.set_ylabel("Loss")
ax.legend()
def _plot_shift_proba(self, ax, logs={}):
# Compute chi2 at zero shift
zero, chi2Z0, pval, nsigma = self._compute_shift_proba()
self.log["shift0_chi2"].append(chi2Z0)
self.log["shift0_pval"].append(pval)
self.log["shift0_nsigma"].append(nsigma)
# Plot
if ax is not None:
ax.plot(self.log["shift0_chi2"])
ax.set_title(r"$\chi^2$ at zero-shift")
ax.set_xlabel("Epoch #")
ax.set_ylabel(r"$\chi^2$")
def _plot_chi2_dist(self, ax, logs={}):
# Compute chi2 and make sure some are finite
chi2Z = np.sum(np.array(self.Z2Y_bijector.inverse(self.Y_test))**2, axis=1)
_s = np.isfinite(chi2Z)
assert np.any(_s)
chi2Z = chi2Z[_s]
# Run KS test
try:
# Note that scipy.stats.kstest does not handle weights yet so we need to resample.
if self.has_weights:
chi2Z = np.random.choice(chi2Z, size=len(chi2Z), replace=True, p=self.weights_test[_s]/np.sum(self.weights_test[_s]))
chi2Z_ks, chi2Z_ks_p = scipy.stats.kstest(chi2Z, 'chi2', args=(self.num_params,))
except:
chi2Z_ks, chi2Z_ks_p = 0., 0.
self.log["chi2Z_ks"].append(chi2Z_ks)
self.log["chi2Z_ks_p"].append(chi2Z_ks_p)
xx = np.linspace(0, self.num_params*4, 1000)
bins = np.linspace(0, self.num_params*4, 100)
# Plot
if ax is not None:
ax.plot(xx, scipy.stats.chi2.pdf(xx, df=self.num_params), label='$\\chi^2_{{{}}}$ PDF'.format(self.num_params), c='k', lw=1)
ax.hist(self.chi2Y, bins=bins, density=True, histtype='step', weights=self.weights_test, label='Pre-gauss ($D_n$={:.3f})'.format(self.chi2Y_ks))
ax.hist(chi2Z, bins=bins, density=True, histtype='step', weights=self.weights_test[_s], label='Post-gauss ($D_n$={:.3f})'.format(chi2Z_ks))
ax.set_title(r'$\chi^2_{{{}}}$ PDF'.format(self.num_params))
ax.set_xlabel(r'$\chi^2$')
ax.legend(fontsize=8)
def _plot_chi2_ks_p(self, ax, logs={}):
# Plot
if ax is not None:
ln1 = ax.plot(self.log["chi2Z_ks_p"], label='$p$')
ax.set_title(r"KS test ($\chi^2$)")
ax.set_xlabel("Epoch #")
ax.set_ylabel(r"$p$-value")
ax2 = ax.twinx()
ln2 = ax2.plot(self.log["chi2Z_ks"], ls='--', label='$D_n$')
ax2.set_ylabel('r$D_n$')
lns = ln1+ln2
labs = [l.get_label() for l in lns]
ax2.legend(lns, labs, loc=1)
def on_epoch_end(self, epoch, logs={}):
"""
This method is used by Keras to show progress during training if `feedback` is True.
"""
if self.feedback:
if isinstance(self.feedback, int):
if epoch % self.feedback:
return
clear_output(wait=True)
fig, axes = plt.subplots(1, 4, figsize=(16, 3))
else:
axes = [None]*4
self._plot_loss(axes[0], logs=logs)
self._plot_shift_proba(axes[1], logs=logs)
self._plot_chi2_dist(axes[2], logs=logs)
self._plot_chi2_ks_p(axes[3], logs=logs)
for k in self.log.keys():
logs[k] = self.log[k][-1]
if self.early_stop_nsigma > 0.:
if len(self.log["shift0_nsigma"]) > self.early_stop_patience and \
np.std(self.log["shift0_nsigma"][-self.early_stop_patience:]) < self.early_stop_nsigma and \
self.log["chi2Z_ks_p"][-1] > 1e-6:
self.model.stop_training = True
if self.feedback:
plt.tight_layout()
plt.show()
return fig
###############################################################################
# helper function to compute tension with default MAF:
def flow_parameter_shift(diff_chain, param_names=None, epochs=100, batch_size=None, steps_per_epoch=None, callbacks=[], verbose=1, tol=0.05, max_iter=1000, step=100000, **kwargs):
"""
Wrapper function to compute a normalizing flow estimate of the probability of a parameter shift given the input parameter difference chain with a standard MAF. It creates a :class:`~.DiffFlowCallback` object with a :class:`~.SimpleMAF` model (to which kwargs are passed), trains the model and returns the estimated shift probability.
:param diff_chain: input parameter difference chain.
:type diff_chain: :class:`~getdist.mcsamples.MCSamples`
:param param_names: parameter names of the parameters to be used
in the calculation. By default all running parameters.
:type param_names: list, optional
:param epochs: number of training epochs, defaults to 100.
:type epochs: int, optional
:param batch_size: number of samples per batch, defaults to None. If None, the training sample is divided into `steps_per_epoch` batches.
:type batch_size: int, optional
:param steps_per_epoch: number of steps per epoch, defaults to None. If None and `batch_size` is also None, then `steps_per_epoch` is set to 100.
:type steps_per_epoch: int, optional
:param callbacks: a list of additional Keras callbacks, such as :class:`~tf.keras.callbacks.ReduceLROnPlateau`, defaults to [].
:type callbacks: list, optional
:param verbose: verbosity level, defaults to 1.
:type verbose: int, optional
:param tol: absolute tolerance on the shift significance, defaults to 0.05.
:type tol: float, optional
:param max_iter: maximum number of sampling steps, defaults to 1000.
:type max_iter: int, optional
:param step: number of samples per step, defaults to 100000.
:type step: int, optional
:return: probability value and error estimate.
"""
# Callback/model handler
diff_flow_callback = DiffFlowCallback(diff_chain, param_names=param_names, **kwargs)
# Train model
diff_flow_callback.train(epochs=epochs, batch_size=batch_size, steps_per_epoch=steps_per_epoch, callbacks=callbacks, verbose=verbose)
# Compute tension
return diff_flow_callback.estimate_shift(tol=tol, max_iter=max_iter, step=step)
| 26,170 | 49.040153 | 648 | py |
white_box_rarl | white_box_rarl-main/wbrarl.py | import sys
import os
import time
import random
import argparse
import multiprocessing
import pickle
import copy
from multiprocessing import freeze_support
import numpy as np
import torch
import gym
from stable_baselines3.ppo import PPO
from stable_baselines3.sac import SAC
from stable_baselines3.common.vec_env import SubprocVecEnv, VecNormalize
from stable_baselines3.common.utils import set_random_seed
import warnings
LAST_LAYER_DIM = 256
HYPERS_SAC = {'Hopper-v3': {'learning_starts': 4000, 'learning_rate': 0.0002}}
HYPERS_PPO = {'HalfCheetah-v3': {'batch_size': 64,
'ent_coef': 0.0025,
'n_steps': 128, # orig was 512, made smaller because n_envs is high
'gamma': 0.98,
'learning_rate': 2.0633e-05,
'gae_lambda': 0.92,
'n_epochs': 12, # orig was 20
'max_grad_norm': 0.5,
'vf_coef': 0.58096,
'clip_range': 0.06,
'policy_kwargs': {'log_std_init': -2.0, 'ortho_init': False,
'activation_fn': torch.nn.ReLU,
'net_arch': dict(pi=[256, 256], vf=[256, 256])}}}
ADV_HYPERS_SAC = {'Hopper-v3': {'ent_coef': 0.15, 'learning_starts': 4000}}
ADV_HYPERS_PPO = {'HalfCheetah-v3': {'ent_coef': 0.0075}}
COEF_DICT = {'HalfCheetah-v3': {'mass': [0.2, 0.3, 0.4, 0.5, 1.5, 2.0, 2.5, 3.0],
'friction': [0.05, 0.1, 0.2, 0.3, 1.3, 1.5, 1.7, 1.9]},
'Hopper-v3': {'mass': [0.2, 0.3, 0.4, 0.5, 1.05, 1.1, 1.15, 1.2],
'friction': [0.2, 0.3, 0.4, 0.5, 1.4, 1.6, 1.8, 2.0]},
}
np.set_printoptions(suppress=True)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--experiment_type', type=str, default='') # 'ctrl', 'rarl', or 'rarl' with 'act', 'val', and/or 'lat' as prefixes
parser.add_argument('--agent_ckpt', type=str, default='')
parser.add_argument('--env_ckpt', type=str, default='')
parser.add_argument('--env', type=str, default='HalfCheetah-v3')
parser.add_argument('--id', type=int, default=0)
parser.add_argument('--model_dir', type=str, default='./models/')
parser.add_argument('--results_dir', type=str, default='./results/')
parser.add_argument('--n_test_episodes', type=int, default=10)
parser.add_argument('--n_envs', type=int, default=16) # epoch size is n_steps * n_envs
parser.add_argument('--n_train', type=int, default=int(2e6))
parser.add_argument('--n_train_per_iter', type=int, default=10000) # how often to switch advs and report results
parser.add_argument('--test_each', type=int, default=2) # how often to test
parser.add_argument('--start_adv_training', type=int, default=200000) # when to start the adv
parser.add_argument('--n_advs', type=int, default=1) # how many adversaries to train in an ensemble
parser.add_argument('--delta_action', type=float, default=0.075) # how much to let the adv maximally perturb
parser.add_argument('--lam', type=float, default=0.05) # how much to penalize the adversary's action L1 norm
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--mode', type=str, default='train')
parser.add_argument('--perturb_style', type=str, default='action') # 'body' or 'action' depending on what the adversary perturbs
parser.add_argument('--n_report', type=int, default=2)
args = parser.parse_args()
return args
def get_seed():
# gets a random seed from the current time
return int(str(time.time()).replace('.', '')[-5:])
class DummyAdvEnv(gym.Wrapper):
# this is used for initializing adversarial policies
def __init__(self, env, lat, act, val, act_space):
self.env = env
obs_dict = {'ob': self.env.observation_space}
if lat:
lat_size = LAST_LAYER_DIM
obs_dict['lat'] = gym.spaces.Box(np.float32(-np.inf * np.ones(lat_size)),
np.float32(np.inf * np.ones(lat_size)))
if act:
obs_dict['act'] = self.env.action_space
if val:
obs_dict['val'] = gym.spaces.Box(np.float32(np.array([-np.inf])), np.float32(np.array([np.inf])))
self.observation_space = gym.spaces.Dict(obs_dict)
self.action_space = act_space
class RARLEnv(gym.Wrapper):
# this can be an env for either the protagonist or adversary depending on whether agent_mode or adv_mode is called
def __init__(self, env, args, agent_ckpt, adv_ckpts, mode, obs_mean=0, obs_var=1):
super().__init__(env)
self.env = env
self.args = copy.deepcopy(args)
self.sd = get_seed()
self.lat = 'lat' in self.args.experiment_type
self.act = 'act' in self.args.experiment_type
self.val = 'val' in self.args.experiment_type
self.observation = None
self.agent_action = None
self.agent_ckpt = agent_ckpt
if isinstance(adv_ckpts, str):
adv_ckpts = [adv_ckpts]
self.adv_ckpts = adv_ckpts
self.obs_mean = obs_mean
self.obs_var = obs_var
if mode == 'agent':
self.agent_mode()
elif mode == 'adv':
self.adv_mode()
def agent_mode(self):
# get observation space, action space, agents, step, and reset
self.observation_space = self.env.observation_space
self.action_space = self.env.action_space
if self.adv_ckpts[0]:
self.advs = [args.alg.load(self.args.model_dir + self.adv_ckpts[i], device='cpu')
for i in range(args.n_advs)]
else:
dummy_adv_env = DummyAdvEnv(copy.deepcopy(self.env), self.lat, self.act, self.val, self.get_adv_action_space())
self.advs = [self.args.alg('MultiInputPolicy', dummy_adv_env, seed=self.sd, device='cpu', **self.args.adv_hypers[self.args.env])
for _ in range(args.n_advs)]
if self.agent_ckpt:
self.agent = self.args.alg.load(self.args.model_dir + self.agent_ckpt, device='cpu')
else:
self.agent = self.args.alg('MlpPolicy', self, device='cpu', seed=self.sd, **self.args.hypers[self.args.env])
self.step = self.step_agent
self.reset = self.reset_agent
self.adv_i = 0
def adv_mode(self):
# get observation space, action space, agents, step, and reset
obs_dict = {'ob': self.env.observation_space}
if self.lat:
lat_size = LAST_LAYER_DIM
obs_dict['lat'] = gym.spaces.Box(np.float32(-np.inf * np.ones(lat_size)),
np.float32(np.inf * np.ones(lat_size)))
if self.act:
obs_dict['act'] = self.env.action_space
if self.val:
obs_dict['val'] = gym.spaces.Box(np.float32(np.array([-np.inf])), np.float32(np.array([np.inf])))
self.observation_space = gym.spaces.Dict(obs_dict)
self.action_space = self.get_adv_action_space()
if self.agent_ckpt:
self.agent = self.args.alg.load(self.args.model_dir + self.agent_ckpt, device='cpu')
else:
self.agent = self.args.alg('MlpPolicy', self.env, device='cpu', seed=self.sd, **self.args.hypers[self.args.env])
self.step = self.step_adv
self.reset = self.reset_adv
def reset_agent(self):
self.observation = self.env.reset()
self.adv_i = random.randint(0, len(self.advs)-1)
return self.observation
def reset_adv(self):
self.observation = self.env.reset()
self.agent_action = self.agent.predict(self.observation, deterministic=True)[0]
return self.get_adv_obs(self.agent_action)
def get_adv_obs(self, agent_action):
obs = {'ob': self.observation}
if self.lat:
tens_ob = torch.unsqueeze(torch.from_numpy(self.observation), dim=0).float()
if self.args.alg == SAC:
latent_pi_val = self.agent.policy.actor.latent_pi(tens_ob)
else:
features = self.agent.policy.extract_features(tens_ob)
latent_pi_val, _ = self.agent.policy.mlp_extractor(features)
self.agent_latent = latent_pi_val.detach().numpy()
obs['lat'] = np.squeeze(self.agent_latent)
if self.act:
obs['act'] = agent_action
if self.val:
raise NotImplementedError
return obs
def step_agent(self, agent_action):
adv_obs = self.get_adv_obs(agent_action)
adv_action = self.advs[self.adv_i].predict(adv_obs, deterministic=False)[0]
if self.args.perturb_style == 'body':
self.adv_to_xfrc(adv_action)
if self.args.perturb_style == 'action':
agent_action += adv_action
agent_action = np.clip(agent_action, self.env.action_space.low, self.env.action_space.high)
obs, reward, done, info = self.env.step(agent_action)
return obs, reward, done, info
def step_adv(self, adv_action):
if self.args.perturb_style == 'body':
self.adv_to_xfrc(adv_action)
self.observation, reward, done, infos = self.env.step(self.agent_action)
norm_penalty = self.args.lam * np.mean(np.abs(adv_action))
adv_reward = -1 * reward - norm_penalty
norm_obs = np.clip((self.observation - self.obs_mean) / np.sqrt(self.obs_var + 1e-8), -10, 10)
self.agent_action = self.agent.predict(norm_obs, deterministic=False)[0]
if self.args.perturb_style == 'action':
self.agent_action += adv_action
self.agent_action = np.clip(self.agent_action, self.env.action_space.low, self.env.action_space.high)
obs = self.get_adv_obs(self.agent_action)
return obs, adv_reward, done, infos
def get_adv_action_space(self):
if self.args.perturb_style == 'body':
high_adv = np.float32(np.ones(self.n_dim * len(self.body_idx)) * self.args.delta_body)
return gym.spaces.Box(-high_adv, high_adv)
elif self.args.perturb_style == 'action':
high_adv = self.env.action_space.high * self.args.delta_action
return gym.spaces.Box(-high_adv, high_adv)
else:
raise NotImplementedError
def make_rarl_env(wrapper, args, agent_ckpt, adv_ckpts, mode, obs_mean, obs_var, rank):
def _init():
gym_env = gym.make(args.env)
env = wrapper(gym_env, args, agent_ckpt, adv_ckpts, mode, obs_mean, obs_var)
env.seed(rank)
return env
set_random_seed(rank)
return _init
def make_env(args, rank, mc=1.0, fc=1.0):
def _init():
env = gym.make(args.env)
env.seed(rank)
body_mass = env.model.body_mass * mc
env.model.body_mass[:] = body_mass
geom_friction = env.model.geom_friction * fc
env.model.geom_friction[:] = geom_friction
return env
set_random_seed(rank)
return _init
def get_save_suff(args, iter):
savename = f'rarl_{args.env}_{iter * args.n_train_per_iter}_id={args.id}'
if 'act' in args.experiment_type:
savename = 'act_' + savename
if 'val' in args.experiment_type:
savename = 'val_' + savename
if 'lat' in args.experiment_type:
savename = 'lat_' + savename
return savename
def simple_eval(policy, eval_env, n_episodes):
all_rewards = []
observation = eval_env.reset()
for _ in range(n_episodes):
done = False
ep_reward = 0.0
while not done:
action = policy.predict(observation=observation, deterministic=False)[0]
observation, reward, done, infos = eval_env.step(action)
done = done[0]
ep_reward += reward[0]
all_rewards.append(ep_reward)
observation = eval_env.reset()
return sum(all_rewards) / n_episodes
def train_rarl(args):
env_wrapper = RARLEnv
n_iters = (args.n_train // args.n_train_per_iter)
sd = get_seed()
agent_rewards = []
adv_improvements = []
last_saved_agent = ''
last_saved_adv = ''
best_mean_reward = -np.inf
obs_mean = 0
obs_var = 1
adv_envs_raw = [SubprocVecEnv([make_rarl_env(env_wrapper, args, last_saved_agent, last_saved_adv,
'adv', obs_mean, obs_var, sd + i)
for i in range(args.n_envs)]) for _ in range(args.n_advs)]
adv_envs = [VecNormalize(adv_envs_raw[j], norm_reward=False) for j in range(args.n_advs)]
adv_policies = [args.alg('MultiInputPolicy', adv_envs[j], device=args.device, seed=sd, **args.adv_hypers[args.env]) for j in range(args.n_advs)]
agent_env_raw = SubprocVecEnv([make_rarl_env(env_wrapper, args, last_saved_agent, last_saved_adv,
'agent', obs_mean, obs_var, sd + i)
for i in range(args.n_envs)])
agent_env = VecNormalize(agent_env_raw, norm_reward=False)
agent_policy = args.alg('MlpPolicy', agent_env, device=args.device, seed=sd, **args.hypers[args.env])
last_saved_agent = 'agent_' + get_save_suff(args, 0)
agent_policy.save(args.model_dir + last_saved_agent + '.zip')
adv_eval_envs_raw = [SubprocVecEnv([make_rarl_env(env_wrapper, args, last_saved_agent, last_saved_adv,
'adv', obs_mean, obs_var, 42)])
for _ in range(args.n_advs)]
adv_eval_envs = [VecNormalize(adv_eval_envs_raw[j], norm_reward=False) for j in range(args.n_advs)]
agent_eval_env_raw = SubprocVecEnv([make_env(args, 42)])
agent_eval_env = VecNormalize(agent_eval_env_raw, norm_reward=False)
last_saved_advs = [] # for deleting files no longer needed
for i in range(1, n_iters + 1):
save_suff = get_save_suff(args, i)
n_train_this_iter = args.n_train_per_iter + args.hypers[args.env].get('learning_starts', 0)
# train adv
if ((args.perturb_style == 'body' and args.delta_body > 0.0) or
(args.perturb_style == 'action' and args.delta_action > 0.0)) and \
args.n_train_per_iter * i > args.start_adv_training:
obs_mean = agent_env.obs_rms.mean
obs_var = agent_env.obs_rms.var
for adv_policy, adv_env, adv_eval_env in zip(adv_policies, adv_envs, adv_eval_envs):
adv_env_raw = SubprocVecEnv([make_rarl_env(env_wrapper, args, last_saved_agent, last_saved_adv,
'adv', obs_mean, obs_var, sd + i)
for i in range(args.n_envs)])
adv_env_state = adv_env.__getstate__()
adv_env.__setstate__(adv_env_state)
adv_env.set_venv(adv_env_raw)
adv_policy.env = adv_env
adv_eval_env_raw = SubprocVecEnv([make_rarl_env(env_wrapper, args, last_saved_agent, adv_policy,
'adv', obs_mean, obs_var, 42)])
adv_eval_env.__setstate__(adv_env_state)
adv_eval_env.set_venv(adv_eval_env_raw)
if (i - 1) % args.test_each == 0:
mean_rewards_pre = [simple_eval(adv_policy, adv_eval_envs[j], args.n_test_episodes) for j, adv_policy in enumerate(adv_policies)]
else:
mean_rewards_pre = 0
for adv_policy in adv_policies:
adv_policy.learn(n_train_this_iter)
for adv_policy, adv_env, adv_eval_env in zip(adv_policies, adv_envs, adv_eval_envs):
adv_env_state = adv_env.__getstate__()
adv_eval_env_raw = SubprocVecEnv([make_rarl_env(env_wrapper, args, last_saved_agent, adv_policy,
'adv', obs_mean, obs_var, 42)])
adv_eval_env.__setstate__(adv_env_state)
adv_eval_env.set_venv(adv_eval_env_raw)
if (i - 1) % args.test_each == 0:
mean_rewards_post = [simple_eval(adv_policy, adv_eval_envs[j], args.n_test_episodes) for j, adv_policy in enumerate(adv_policies)]
adv_improvements.append(round((sum(mean_rewards_post) - sum(mean_rewards_pre)) / args.n_advs))
if i % args.n_report == 0:
print(f'{args.experiment_type} id={args.id} adv_improvements:', adv_improvements, sum(adv_improvements))
sys.stdout.flush()
for lsa in last_saved_advs:
os.remove(args.model_dir + lsa + '.zip')
last_saved_advs = [f'adv{j}_' + save_suff for j in range(args.n_advs)]
for i_policy, adv_policy in enumerate(adv_policies):
adv_policy.save(args.model_dir + last_saved_advs[i_policy] + '.zip')
# train agent
agent_env_raw = SubprocVecEnv([make_rarl_env(env_wrapper, args, last_saved_agent, last_saved_advs,
'agent', obs_mean, obs_var, sd + j)
for j in range(args.n_envs)])
agent_env_state = agent_env.__getstate__()
agent_env.__setstate__(agent_env_state)
agent_env.set_venv(agent_env_raw)
agent_policy.env = agent_env
agent_policy.learn(n_train_this_iter)
agent_env_state = agent_env.__getstate__()
agent_eval_env_raw = SubprocVecEnv([make_env(args, 42)])
agent_eval_env.__setstate__(agent_env_state)
agent_eval_env.set_venv(agent_eval_env_raw)
if (i - 1) % args.test_each == 0:
mean_reward = simple_eval(agent_policy, agent_eval_env, args.n_test_episodes)
if mean_reward >= best_mean_reward:
best_mean_reward = mean_reward
best_save_suff = get_save_suff(args, n_iters)
agent_savename = 'best_agent_' + best_save_suff
agent_policy.save(args.model_dir + agent_savename + '.zip')
agent_rewards.append(round(mean_reward))
if i % args.n_report == 0:
print(f'{args.env} {args.experiment_type} id={args.id} timestep: {i * args.n_train_per_iter}, mean agent rewards: {agent_rewards}')
sys.stdout.flush()
os.remove(args.model_dir + last_saved_agent + '.zip')
last_saved_agent = 'agent_' + save_suff
agent_policy.save(args.model_dir + last_saved_agent + '.zip')
savename = 'agent_' + get_save_suff(args, n_iters)
agent_policy.save(args.model_dir + savename + '.zip')
with open(args.results_dir + savename + '_rewards.pkl', 'wb') as f:
pickle.dump(agent_rewards, f)
agent_eval_env.save(args.model_dir + savename + '_eval_env')
def train_control(args):
n_iters = (args.n_train // args.n_train_per_iter)
sd = get_seed()
env = VecNormalize(SubprocVecEnv([make_env(args, sd + i) for i in range(args.n_envs)]), norm_reward=False)
eval_env = VecNormalize(SubprocVecEnv([make_env(args, 42)]), norm_reward=False)
policy = args.alg('MlpPolicy', env, device=args.device, seed=sd, **args.hypers[args.env])
best_mean_reward = -np.inf
savename = f'best_agent_control_{args.env}_{args.n_train}_id={args.id}'
rewards = []
for i in range(1, n_iters + 1):
n_train_this_iter = args.n_train_per_iter + args.hypers[args.env].get('learning_starts', 0)
policy.learn(n_train_this_iter)
# update the state of the eval env to be the same as the regular env
env_state = env.__getstate__()
eval_env_raw = SubprocVecEnv([make_env(args, 42)])
eval_env.__setstate__(env_state)
eval_env.set_venv(eval_env_raw)
if i % args.n_report == 0:
mean_reward = simple_eval(policy, eval_env, args.n_test_episodes)
rewards.append(round(mean_reward))
if mean_reward >= best_mean_reward:
best_mean_reward = mean_reward
policy.save(args.model_dir + savename + '.zip')
if i % args.n_report == 0:
print(f'{args.env} {args.experiment_type} id={args.id} timestep: {i * args.n_train_per_iter}, mean agent rewards: {rewards}')
sys.stdout.flush()
with open(args.results_dir + f'agent_control_{args.env}_{args.n_train}_id={args.id}' + '_rewards.pkl', 'wb') as f:
pickle.dump(rewards, f)
eval_env.save(args.model_dir + f'agent_control_{args.env}_{args.n_train}_id={args.id}_eval_env')
env.close()
eval_env.close()
def eval_agent_grid(args):
mass_coeffs = COEF_DICT[args.env]['mass']
friction_coeffs = COEF_DICT[args.env]['friction']
assert args.agent_ckpt, 'Must give --agent_ckpt to test an agent'
assert args.env_ckpt, 'Must give --env_ckpt to test an agent'
all_mean_rewards = []
for mc in mass_coeffs:
all_mean_rewards.append([])
for fc in friction_coeffs:
eval_env = SubprocVecEnv([make_env(args, 42, mc, fc)])
eval_env = VecNormalize.load(args.model_dir + args.env_ckpt, eval_env)
agent_policy = args.alg.load(args.model_dir + args.agent_ckpt, device=args.device)
mean_reward = simple_eval(agent_policy, eval_env, 16)
print(f'{args.agent_ckpt} mass={mc} friction={fc} mean eval reward: {mean_reward}')
all_mean_rewards[-1].append(mean_reward)
with open(args.results_dir + args.agent_ckpt + f'_eval.pkl', 'wb') as f:
pickle.dump(all_mean_rewards, f)
def eval_adv(args):
args.lam = 0
env_wrapper = RARLEnv
n_iters = (args.n_train // args.n_train_per_iter)
sd = get_seed()
assert args.agent_ckpt, 'Must give --agent_ckpt to test an agent'
assert args.env_ckpt, 'Must give --env_ckpt to test an agent'
agent_env = SubprocVecEnv([make_env(args, 42)])
agent_env = VecNormalize.load(args.model_dir + args.env_ckpt, agent_env)
obs_mean = agent_env.obs_rms.mean
obs_var = agent_env.obs_rms.var
adv_env_raw = SubprocVecEnv([make_rarl_env(env_wrapper, args, args.agent_ckpt, '',
'adv', obs_mean, obs_var, sd + i)
for i in range(args.n_envs)])
adv_eval_env_raw = SubprocVecEnv([make_rarl_env(env_wrapper, args, args.agent_ckpt, '',
'adv', obs_mean, obs_var, 42)])
adv_env = VecNormalize(adv_env_raw, norm_reward=False)
adv_eval_env = VecNormalize(adv_eval_env_raw, norm_reward=False)
adv_env_state = adv_env.__getstate__()
agent_env_state = agent_env.__getstate__()
adv_env_state['obs_rms']['ob'] = agent_env_state['obs_rms']
adv_env_state['ret_rms'] = agent_env_state['ret_rms']
adv_env.__setstate__(adv_env_state)
adv_env_raw = SubprocVecEnv([make_rarl_env(env_wrapper, args, args.agent_ckpt, '',
'adv', obs_mean, obs_var, sd + i)
for i in range(args.n_envs)])
adv_env.set_venv(adv_env_raw)
adv_policy = args.alg('MultiInputPolicy', adv_env, device=args.device, seed=sd, **args.adv_hypers[args.env])
n_train_per_iter = args.n_train_per_iter + args.hypers[args.env].get('learning_starts', 0)
for i in range(1, n_iters + 1):
adv_policy.learn(n_train_per_iter)
if (i - 1) % args.test_each == 0:
adv_eval_env_raw = SubprocVecEnv([make_rarl_env(env_wrapper, args, args.agent_ckpt, '',
'adv', obs_mean, obs_var, 42)])
adv_env_state = adv_env.__getstate__()
adv_eval_env.__setstate__(adv_env_state)
adv_eval_env.set_venv(adv_eval_env_raw)
mean_adv_reward = simple_eval(adv_policy, adv_eval_env, args.n_test_episodes)
print(f'adv eval id={args.id} mean_adv_reward:', mean_adv_reward)
sys.stdout.flush()
# TODO save
if __name__ == '__main__':
warnings.filterwarnings("ignore")
freeze_support()
multiprocessing.set_start_method('spawn')
args = parse_args()
if 'HalfCheetah' in args.env:
args.alg = PPO
args.hypers = HYPERS_PPO
args.adv_hypers = ADV_HYPERS_PPO
else:
args.alg = SAC
args.hypers = HYPERS_SAC
args.adv_hypers = ADV_HYPERS_SAC
if args.mode == 'eval':
eval_agent_grid(args)
elif args.mode == 'eval_adv':
eval_adv(args)
elif 'rarl' in args.experiment_type:
train_rarl(args)
elif args.experiment_type == 'ctrl':
train_control(args)
else:
raise NotImplementedError()
print('Done :)')
| 24,786 | 43.341682 | 148 | py |
neurotron_experiments | neurotron_experiments-main/neurotron_torch.py | # %% [markdown]
# # Settings
# %%
import torch
import matplotlib.pyplot as plt
import numpy as np
import torch.nn as nn
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from torch.utils.data import DataLoader, Dataset
# %% [markdown]
# # The NeuroTron class
# %%
class NeuroTron(nn.Module):
def __init__(self, n, r, h, activation=nn.functional.relu, w_init='const', dtype=torch.float32):
"""
Arguments:
n: number of input features
r: number of parameters
h: hidden layer width
activation: activation function
"""
super().__init__()
self.w = nn.Parameter(torch.empty(r, dtype=dtype), requires_grad=False)
self.M = torch.randn(r, n, dtype=dtype)
self.set_A(n, r, h, dtype=dtype)
self.set_w(w_init)
self.activation = activation
def set_A(self, n, r, h, dtype=torch.float32):
self.A = torch.empty(h, r, n, dtype=dtype)
C = torch.randn(r, n, dtype=dtype)
k = h // 2
i = 0
for factor in range(-k, k+1):
if factor != 0:
Z = self.M + factor * C
self.A[i, :, :] = Z
i += 1
def set_w(self, init):
if init == 'const':
nn.init.constant_(self.w, 1.)
elif init == 'unif':
nn.init.uniform_(self.w)
def num_A(self):
return self.A.shape[0]
def forward(self, x):
postactivation = 0.
for i in range(self.num_A()):
preactivation = torch.matmul(torch.matmul(self.w, self.A[i, :, :]), x.t())
postactivation += self.activation(preactivation)
return postactivation / self.num_A()
def gradient(self, x, output, y):
return torch.matmul(self.M, torch.matmul(y - output, x) / x.shape[0])
def update_parameters(self, x, output, y, stepsize):
self.w.data.add_(stepsize * self.gradient(x, output, y))
def train(self, train_loader, stepsize, loss, log_step=200, test_loader=None):
train_losses, test_losses = [], []
for train_batch_idx, (train_data, train_targets) in enumerate(train_loader):
train_output = self.forward(train_data)
self.update_parameters(train_data, train_output, train_targets, stepsize)
if (train_batch_idx % log_step == 0):
train_losses.append(loss(train_targets, train_output))
if (test_loader is not None):
test_data, test_targets = next(iter(test_loader))
test_output = self.forward(test_data)
test_losses.append(loss(test_targets, self.forward(test_data)))
if (test_loader is not None):
test_losses = torch.stack(test_losses)
return torch.stack(train_losses), test_losses
# %% [markdown]
# # The PoisonedDataset class
# %%
class PoisonedDataset(Dataset):
def __init__(self, x, y, beta, theta):
self.x = x
self.y = y
self.beta = beta
self.theta = theta
def attack(self, y):
a = torch.bernoulli(torch.full_like(y, self.beta))
xi = torch.distributions.uniform.Uniform(torch.full_like(y, -self.theta), torch.full_like(y, self.theta)).sample()
return y + a * xi
def __repr__(self):
return f'PoisonedDataset'
def __len__(self):
return len(self.x)
def __getitem__(self, i):
return self.x[i], self.attack(self.y[i])
# %% [markdown]
# # Standard normal example
# %% [markdown]
# ## Prepare the data
# %%
num_samples = 125000
num_features = 100
sampling_distribution = torch.distributions.multivariate_normal.MultivariateNormal(
torch.zeros(num_features, dtype=torch.float32), torch.eye(num_features, dtype=torch.float32)
)
normal_data = sampling_distribution.sample([num_samples])
normal_targets = torch.stack([sampling_distribution.log_prob(normal_data[i, :]).exp() for i in range(num_samples)], dim=0)
# normal_targets = normal_data.norm(p=2, dim=1)
print(normal_data.shape, normal_targets.shape)
# %%
x_train, x_test, y_train, y_test = train_test_split(normal_data, normal_targets, test_size=0.2)
print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
# %%
beta = 0.5
theta = 0.125
train_dataset = PoisonedDataset(x_train, y_train, beta=beta, theta=theta)
test_dataset = PoisonedDataset(x_test, y_test, beta=beta, theta=theta)
# %%
train_batch_size = 16
test_batch_size = 3 * train_batch_size
train_loader = DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=test_batch_size, shuffle=True)
# %% [markdown]
# ## Instantiate NeuroTron class
# %%
neurotron = NeuroTron(n=num_features, r=25, h=10, dtype=torch.float32)
# %% [markdown]
# ## Training
# %%
num_epochs = 2
train_losses = []
test_losses = []
verbose_msg = 'Train epoch {:' + str(len(str(num_epochs))) + '} of {:' + str(len(str(num_epochs))) +'}'
for epoch in range(num_epochs):
print(verbose_msg.format(epoch+1, num_epochs))
train_losses_in_epoch, test_losses_in_epoch = neurotron.train(
train_loader, stepsize=0.0001, loss=nn.MSELoss(reduction='mean'), log_step=10, test_loader=test_loader
)
train_losses.append(train_losses_in_epoch)
test_losses.append(test_losses_in_epoch)
train_losses = torch.stack(train_losses, dim=0)
test_losses = torch.stack(test_losses, dim=0)
# %% [markdown]
# ## Plotting training and test loss
# %%
plt.plot(torch.flatten(train_losses), label='Train loss')
plt.plot(torch.flatten(test_losses), label='Test loss')
plt.yscale('log')
plt.legend(loc='upper right')
# %% [markdown]
# # California housing example
# %% [markdown]
# ## Prepare the data
# %%
california_housing = fetch_california_housing(as_frame=True)
# california_housing.frame
# california_housing.data
# california_housing.target
# %%
x_train, x_test, y_train, y_test = train_test_split(california_housing.data, california_housing.target, test_size=0.25)
# %%
x_train = StandardScaler().fit_transform(x_train.to_numpy(dtype=np.float32))
x_test = StandardScaler().fit_transform(x_test.to_numpy(dtype=np.float32))
y_train = y_train.to_numpy(dtype=np.float32)
y_test = y_test.to_numpy(dtype=np.float32)
print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
# %%
beta = 0.
theta = 0.01
train_dataset = PoisonedDataset(torch.from_numpy(x_train), torch.from_numpy(y_train), beta=beta, theta=theta)
# test_dataset = PoisonedDataset(torch.from_numpy(x_test), torch.from_numpy(y_test), beta=beta, theta=theta)
# %%
train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True)
# test_loader = DataLoader(test_dataset, batch_size=len(test_dataset), shuffle=False)
# %% [markdown]
# ## Instantiate NeuroTron class
# %%
neurotron = NeuroTron(n=8, r=6, h=10, dtype=torch.float32)
# %% [markdown]
# ## Training
# %%
num_epochs = 10
train_losses = []
verbose_msg = 'Train epoch {:' + str(len(str(num_epochs))) + '} of {:' + str(len(str(num_epochs))) +'}'
for epoch in range(num_epochs):
print(verbose_msg.format(epoch+1, num_epochs))
train_losses_in_epoch, _ = neurotron.train(
train_loader, stepsize=0.00001, loss=nn.MSELoss(reduction='mean'), log_step=10, test_loader=None
)
train_losses.append(train_losses_in_epoch)
train_losses = torch.stack(train_losses, dim=0)
# %% [markdown]
# ## Plotting training and test loss
# %%
plt.plot(torch.flatten(train_losses), label="Train loss")
plt.yscale('log')
plt.legend(loc='upper right')
# %% [markdown]
# ## Printing dimensions of various tensors
# %%
x, y = next(iter(train_loader))
# %%
x.shape, x.shape[0], x.shape[1], y.shape
# %%
neurotron.w.shape, neurotron.A.shape, neurotron.M.shape
# %%
output = neurotron.forward(x)
# %%
output.shape
# %%
neurotron.w.shape, neurotron.A[0, :, :].shape, x.t().shape, x.shape
# %%
torch.matmul(neurotron.w, neurotron.A[0, :, :]).shape
# %%
torch.matmul(torch.matmul(neurotron.w, neurotron.A[0, :, :]), x.t()).shape
# %%
| 8,128 | 25.478827 | 122 | py |
CTAB-GAN-Plus | CTAB-GAN-Plus-main/model/synthesizer/ctabgan_synthesizer.py | import numpy as np
import pandas as pd
import torch
import torch.utils.data
import torch.optim as optim
from torch.optim import Adam
from torch.nn import functional as F
from torch.nn import (Dropout, LeakyReLU, Linear, Module, ReLU, Sequential,
Conv2d, ConvTranspose2d, Sigmoid, init, BCELoss, CrossEntropyLoss,SmoothL1Loss,LayerNorm)
from model.synthesizer.transformer import ImageTransformer,DataTransformer
from model.privacy_utils.rdp_accountant import compute_rdp, get_privacy_spent
from tqdm import tqdm
class Classifier(Module):
def __init__(self,input_dim, dis_dims,st_ed):
super(Classifier,self).__init__()
dim = input_dim-(st_ed[1]-st_ed[0])
seq = []
self.str_end = st_ed
for item in list(dis_dims):
seq += [
Linear(dim, item),
LeakyReLU(0.2),
Dropout(0.5)
]
dim = item
if (st_ed[1]-st_ed[0])==1:
seq += [Linear(dim, 1)]
elif (st_ed[1]-st_ed[0])==2:
seq += [Linear(dim, 1),Sigmoid()]
else:
seq += [Linear(dim,(st_ed[1]-st_ed[0]))]
self.seq = Sequential(*seq)
def forward(self, input):
label=None
if (self.str_end[1]-self.str_end[0])==1:
label = input[:, self.str_end[0]:self.str_end[1]]
else:
label = torch.argmax(input[:, self.str_end[0]:self.str_end[1]], axis=-1)
new_imp = torch.cat((input[:,:self.str_end[0]],input[:,self.str_end[1]:]),1)
if ((self.str_end[1]-self.str_end[0])==2) | ((self.str_end[1]-self.str_end[0])==1):
return self.seq(new_imp).view(-1), label
else:
return self.seq(new_imp), label
def apply_activate(data, output_info):
data_t = []
st = 0
for item in output_info:
if item[1] == 'tanh':
ed = st + item[0]
data_t.append(torch.tanh(data[:, st:ed]))
st = ed
elif item[1] == 'softmax':
ed = st + item[0]
data_t.append(F.gumbel_softmax(data[:, st:ed], tau=0.2))
st = ed
return torch.cat(data_t, dim=1)
def get_st_ed(target_col_index,output_info):
st = 0
c= 0
tc= 0
for item in output_info:
if c==target_col_index:
break
if item[1]=='tanh':
st += item[0]
if item[2] == 'yes_g':
c+=1
elif item[1] == 'softmax':
st += item[0]
c+=1
tc+=1
ed= st+output_info[tc][0]
return (st,ed)
def random_choice_prob_index_sampling(probs,col_idx):
option_list = []
for i in col_idx:
pp = probs[i]
option_list.append(np.random.choice(np.arange(len(probs[i])), p=pp))
return np.array(option_list).reshape(col_idx.shape)
def random_choice_prob_index(a, axis=1):
r = np.expand_dims(np.random.rand(a.shape[1 - axis]), axis=axis)
return (a.cumsum(axis=axis) > r).argmax(axis=axis)
def maximum_interval(output_info):
max_interval = 0
for item in output_info:
max_interval = max(max_interval, item[0])
return max_interval
class Cond(object):
def __init__(self, data, output_info):
self.model = []
st = 0
counter = 0
for item in output_info:
if item[1] == 'tanh':
st += item[0]
continue
elif item[1] == 'softmax':
ed = st + item[0]
counter += 1
self.model.append(np.argmax(data[:, st:ed], axis=-1))
st = ed
self.interval = []
self.n_col = 0
self.n_opt = 0
st = 0
self.p = np.zeros((counter, maximum_interval(output_info)))
self.p_sampling = []
for item in output_info:
if item[1] == 'tanh':
st += item[0]
continue
elif item[1] == 'softmax':
ed = st + item[0]
tmp = np.sum(data[:, st:ed], axis=0)
tmp_sampling = np.sum(data[:, st:ed], axis=0)
tmp = np.log(tmp + 1)
tmp = tmp / np.sum(tmp)
tmp_sampling = tmp_sampling / np.sum(tmp_sampling)
self.p_sampling.append(tmp_sampling)
self.p[self.n_col, :item[0]] = tmp
self.interval.append((self.n_opt, item[0]))
self.n_opt += item[0]
self.n_col += 1
st = ed
self.interval = np.asarray(self.interval)
def sample_train(self, batch):
if self.n_col == 0:
return None
batch = batch
idx = np.random.choice(np.arange(self.n_col), batch)
vec = np.zeros((batch, self.n_opt), dtype='float32')
mask = np.zeros((batch, self.n_col), dtype='float32')
mask[np.arange(batch), idx] = 1
opt1prime = random_choice_prob_index(self.p[idx])
for i in np.arange(batch):
vec[i, self.interval[idx[i], 0] + opt1prime[i]] = 1
return vec, mask, idx, opt1prime
def sample(self, batch):
if self.n_col == 0:
return None
batch = batch
idx = np.random.choice(np.arange(self.n_col), batch)
vec = np.zeros((batch, self.n_opt), dtype='float32')
opt1prime = random_choice_prob_index_sampling(self.p_sampling,idx)
for i in np.arange(batch):
vec[i, self.interval[idx[i], 0] + opt1prime[i]] = 1
return vec
def cond_loss(data, output_info, c, m):
loss = []
st = 0
st_c = 0
for item in output_info:
if item[1] == 'tanh':
st += item[0]
continue
elif item[1] == 'softmax':
ed = st + item[0]
ed_c = st_c + item[0]
tmp = F.cross_entropy(
data[:, st:ed],
torch.argmax(c[:, st_c:ed_c], dim=1),
reduction='none')
loss.append(tmp)
st = ed
st_c = ed_c
loss = torch.stack(loss, dim=1)
return (loss * m).sum() / data.size()[0]
class Sampler(object):
def __init__(self, data, output_info):
super(Sampler, self).__init__()
self.data = data
self.model = []
self.n = len(data)
st = 0
for item in output_info:
if item[1] == 'tanh':
st += item[0]
continue
elif item[1] == 'softmax':
ed = st + item[0]
tmp = []
for j in range(item[0]):
tmp.append(np.nonzero(data[:, st + j])[0])
self.model.append(tmp)
st = ed
def sample(self, n, col, opt):
if col is None:
idx = np.random.choice(np.arange(self.n), n)
return self.data[idx]
idx = []
for c, o in zip(col, opt):
idx.append(np.random.choice(self.model[c][o]))
return self.data[idx]
class Discriminator(Module):
def __init__(self, side, layers):
super(Discriminator, self).__init__()
self.side = side
info = len(layers)-2
self.seq = Sequential(*layers)
self.seq_info = Sequential(*layers[:info])
def forward(self, input):
return (self.seq(input)), self.seq_info(input)
class Generator(Module):
def __init__(self, side, layers):
super(Generator, self).__init__()
self.side = side
self.seq = Sequential(*layers)
def forward(self, input_):
return self.seq(input_)
def determine_layers_disc(side, num_channels):
assert side >= 4 and side <= 64
layer_dims = [(1, side), (num_channels, side // 2)]
while layer_dims[-1][1] > 3 and len(layer_dims) < 4:
layer_dims.append((layer_dims[-1][0] * 2, layer_dims[-1][1] // 2))
layerNorms = []
num_c = num_channels
num_s = side / 2
for l in range(len(layer_dims) - 1):
layerNorms.append([int(num_c), int(num_s), int(num_s)])
num_c = num_c * 2
num_s = num_s / 2
layers_D = []
for prev, curr, ln in zip(layer_dims, layer_dims[1:], layerNorms):
layers_D += [
Conv2d(prev[0], curr[0], 4, 2, 1, bias=False),
LayerNorm(ln),
LeakyReLU(0.2, inplace=True),
]
layers_D += [Conv2d(layer_dims[-1][0], 1, layer_dims[-1][1], 1, 0), ReLU(True)]
return layers_D
def determine_layers_gen(side, random_dim, num_channels):
assert side >= 4 and side <= 64
layer_dims = [(1, side), (num_channels, side // 2)]
while layer_dims[-1][1] > 3 and len(layer_dims) < 4:
layer_dims.append((layer_dims[-1][0] * 2, layer_dims[-1][1] // 2))
layerNorms = []
num_c = num_channels * (2 ** (len(layer_dims) - 2))
num_s = int(side / (2 ** (len(layer_dims) - 1)))
for l in range(len(layer_dims) - 1):
layerNorms.append([int(num_c), int(num_s), int(num_s)])
num_c = num_c / 2
num_s = num_s * 2
layers_G = [ConvTranspose2d(random_dim, layer_dims[-1][0], layer_dims[-1][1], 1, 0, output_padding=0, bias=False)]
for prev, curr, ln in zip(reversed(layer_dims), reversed(layer_dims[:-1]), layerNorms):
layers_G += [LayerNorm(ln), ReLU(True), ConvTranspose2d(prev[0], curr[0], 4, 2, 1, output_padding=0, bias=True)]
return layers_G
def slerp(val, low, high):
low_norm = low/torch.norm(low, dim=1, keepdim=True)
high_norm = high/torch.norm(high, dim=1, keepdim=True)
omega = torch.acos((low_norm*high_norm).sum(1)).view(val.size(0), 1)
so = torch.sin(omega)
res = (torch.sin((1.0-val)*omega)/so)*low + (torch.sin(val*omega)/so) * high
return res
def calc_gradient_penalty_slerp(netD, real_data, fake_data, transformer, device='cpu', lambda_=10):
batchsize = real_data.shape[0]
alpha = torch.rand(batchsize, 1, device=device)
interpolates = slerp(alpha, real_data, fake_data)
interpolates = interpolates.to(device)
interpolates = transformer.transform(interpolates)
interpolates = torch.autograd.Variable(interpolates, requires_grad=True)
disc_interpolates,_ = netD(interpolates)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients_norm = gradients.norm(2, dim=1)
gradient_penalty = ((gradients_norm - 1) ** 2).mean() * lambda_
return gradient_penalty
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0)
class CTABGANSynthesizer:
def __init__(self,
class_dim=(256, 256, 256, 256),
random_dim=100,
num_channels=64,
l2scale=1e-5,
batch_size=500,
epochs=150):
self.random_dim = random_dim
self.class_dim = class_dim
self.num_channels = num_channels
self.dside = None
self.gside = None
self.l2scale = l2scale
self.batch_size = batch_size
self.epochs = epochs
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def fit(self, train_data=pd.DataFrame, categorical=[], mixed={}, general=[], non_categorical=[], type={}):
problem_type = None
target_index=None
if type:
problem_type = list(type.keys())[0]
if problem_type:
target_index = train_data.columns.get_loc(type[problem_type])
self.transformer = DataTransformer(train_data=train_data, categorical_list=categorical, mixed_dict=mixed, general_list=general, non_categorical_list=non_categorical)
self.transformer.fit()
train_data = self.transformer.transform(train_data.values)
data_sampler = Sampler(train_data, self.transformer.output_info)
data_dim = self.transformer.output_dim
self.cond_generator = Cond(train_data, self.transformer.output_info)
sides = [4, 8, 16, 24, 32, 64]
col_size_d = data_dim + self.cond_generator.n_opt
for i in sides:
if i * i >= col_size_d:
self.dside = i
break
sides = [4, 8, 16, 24, 32, 64]
col_size_g = data_dim
for i in sides:
if i * i >= col_size_g:
self.gside = i
break
layers_G = determine_layers_gen(self.gside, self.random_dim+self.cond_generator.n_opt, self.num_channels)
layers_D = determine_layers_disc(self.dside, self.num_channels)
self.generator = Generator(self.gside, layers_G).to(self.device)
discriminator = Discriminator(self.dside, layers_D).to(self.device)
optimizer_params = dict(lr=2e-4, betas=(0.5, 0.9), eps=1e-3, weight_decay=self.l2scale)
optimizerG = Adam(self.generator.parameters(), **optimizer_params)
optimizerD = Adam(discriminator.parameters(), **optimizer_params)
st_ed = None
classifier=None
optimizerC= None
if target_index != None:
st_ed= get_st_ed(target_index,self.transformer.output_info)
classifier = Classifier(data_dim,self.class_dim,st_ed).to(self.device)
optimizerC = optim.Adam(classifier.parameters(),**optimizer_params)
self.generator.apply(weights_init)
discriminator.apply(weights_init)
self.Gtransformer = ImageTransformer(self.gside)
self.Dtransformer = ImageTransformer(self.dside)
epsilon = 0
epoch = 0
steps = 0
ci = 1
steps_per_epoch = max(1, len(train_data) // self.batch_size)
for i in tqdm(range(self.epochs)):
for id_ in range(steps_per_epoch):
for _ in range(ci):
noisez = torch.randn(self.batch_size, self.random_dim, device=self.device)
condvec = self.cond_generator.sample_train(self.batch_size)
c, m, col, opt = condvec
c = torch.from_numpy(c).to(self.device)
m = torch.from_numpy(m).to(self.device)
noisez = torch.cat([noisez, c], dim=1)
noisez = noisez.view(self.batch_size,self.random_dim+self.cond_generator.n_opt,1,1)
perm = np.arange(self.batch_size)
np.random.shuffle(perm)
real = data_sampler.sample(self.batch_size, col[perm], opt[perm])
c_perm = c[perm]
real = torch.from_numpy(real.astype('float32')).to(self.device)
fake = self.generator(noisez)
faket = self.Gtransformer.inverse_transform(fake)
fakeact = apply_activate(faket, self.transformer.output_info)
fake_cat = torch.cat([fakeact, c], dim=1)
real_cat = torch.cat([real, c_perm], dim=1)
real_cat_d = self.Dtransformer.transform(real_cat)
fake_cat_d = self.Dtransformer.transform(fake_cat)
optimizerD.zero_grad()
d_real,_ = discriminator(real_cat_d)
d_real = -torch.mean(d_real)
d_real.backward()
d_fake,_ = discriminator(fake_cat_d)
d_fake = torch.mean(d_fake)
d_fake.backward()
pen = calc_gradient_penalty_slerp(discriminator, real_cat, fake_cat, self.Dtransformer , self.device)
pen.backward()
optimizerD.step()
noisez = torch.randn(self.batch_size, self.random_dim, device=self.device)
condvec = self.cond_generator.sample_train(self.batch_size)
c, m, col, opt = condvec
c = torch.from_numpy(c).to(self.device)
m = torch.from_numpy(m).to(self.device)
noisez = torch.cat([noisez, c], dim=1)
noisez = noisez.view(self.batch_size,self.random_dim+self.cond_generator.n_opt,1,1)
optimizerG.zero_grad()
fake = self.generator(noisez)
faket = self.Gtransformer.inverse_transform(fake)
fakeact = apply_activate(faket, self.transformer.output_info)
fake_cat = torch.cat([fakeact, c], dim=1)
fake_cat = self.Dtransformer.transform(fake_cat)
y_fake,info_fake = discriminator(fake_cat)
cross_entropy = cond_loss(faket, self.transformer.output_info, c, m)
_,info_real = discriminator(real_cat_d)
g = -torch.mean(y_fake) + cross_entropy
g.backward(retain_graph=True)
loss_mean = torch.norm(torch.mean(info_fake.view(self.batch_size,-1), dim=0) - torch.mean(info_real.view(self.batch_size,-1), dim=0), 1)
loss_std = torch.norm(torch.std(info_fake.view(self.batch_size,-1), dim=0) - torch.std(info_real.view(self.batch_size,-1), dim=0), 1)
loss_info = loss_mean + loss_std
loss_info.backward()
optimizerG.step()
if problem_type:
fake = self.generator(noisez)
faket = self.Gtransformer.inverse_transform(fake)
fakeact = apply_activate(faket, self.transformer.output_info)
real_pre, real_label = classifier(real)
fake_pre, fake_label = classifier(fakeact)
c_loss = CrossEntropyLoss()
if (st_ed[1] - st_ed[0])==1:
c_loss= SmoothL1Loss()
real_label = real_label.type_as(real_pre)
fake_label = fake_label.type_as(fake_pre)
real_label = torch.reshape(real_label,real_pre.size())
fake_label = torch.reshape(fake_label,fake_pre.size())
elif (st_ed[1] - st_ed[0])==2:
c_loss = BCELoss()
real_label = real_label.type_as(real_pre)
fake_label = fake_label.type_as(fake_pre)
loss_cc = c_loss(real_pre, real_label)
loss_cg = c_loss(fake_pre, fake_label)
optimizerG.zero_grad()
loss_cg.backward()
optimizerG.step()
optimizerC.zero_grad()
loss_cc.backward()
optimizerC.step()
epoch += 1
def sample(self, n):
self.generator.eval()
output_info = self.transformer.output_info
steps = n // self.batch_size + 1
data = []
for i in range(steps):
noisez = torch.randn(self.batch_size, self.random_dim, device=self.device)
condvec = self.cond_generator.sample(self.batch_size)
c = condvec
c = torch.from_numpy(c).to(self.device)
noisez = torch.cat([noisez, c], dim=1)
noisez = noisez.view(self.batch_size,self.random_dim+self.cond_generator.n_opt,1,1)
fake = self.generator(noisez)
faket = self.Gtransformer.inverse_transform(fake)
fakeact = apply_activate(faket,output_info)
data.append(fakeact.detach().cpu().numpy())
data = np.concatenate(data, axis=0)
result,resample = self.transformer.inverse_transform(data)
while len(result) < n:
data_resample = []
steps_left = resample// self.batch_size + 1
for i in range(steps_left):
noisez = torch.randn(self.batch_size, self.random_dim, device=self.device)
condvec = self.cond_generator.sample(self.batch_size)
c = condvec
c = torch.from_numpy(c).to(self.device)
noisez = torch.cat([noisez, c], dim=1)
noisez = noisez.view(self.batch_size,self.random_dim+self.cond_generator.n_opt,1,1)
fake = self.generator(noisez)
faket = self.Gtransformer.inverse_transform(fake)
fakeact = apply_activate(faket, output_info)
data_resample.append(fakeact.detach().cpu().numpy())
data_resample = np.concatenate(data_resample, axis=0)
res,resample = self.transformer.inverse_transform(data_resample)
result = np.concatenate([result,res],axis=0)
return result[0:n]
| 21,711 | 35.186667 | 173 | py |
CTAB-GAN-Plus | CTAB-GAN-Plus-main/model/synthesizer/transformer.py | import numpy as np
import pandas as pd
import torch
from sklearn.mixture import BayesianGaussianMixture
class DataTransformer():
def __init__(self, train_data=pd.DataFrame, categorical_list=[], mixed_dict={}, general_list=[], non_categorical_list=[], n_clusters=10, eps=0.005):
self.meta = None
self.n_clusters = n_clusters
self.eps = eps
self.train_data = train_data
self.categorical_columns= categorical_list
self.mixed_columns= mixed_dict
self.general_columns = general_list
self.non_categorical_columns= non_categorical_list
def get_metadata(self):
meta = []
for index in range(self.train_data.shape[1]):
column = self.train_data.iloc[:,index]
if index in self.categorical_columns:
if index in self.non_categorical_columns:
meta.append({
"name": index,
"type": "continuous",
"min": column.min(),
"max": column.max(),
})
else:
mapper = column.value_counts().index.tolist()
meta.append({
"name": index,
"type": "categorical",
"size": len(mapper),
"i2s": mapper
})
elif index in self.mixed_columns.keys():
meta.append({
"name": index,
"type": "mixed",
"min": column.min(),
"max": column.max(),
"modal": self.mixed_columns[index]
})
else:
meta.append({
"name": index,
"type": "continuous",
"min": column.min(),
"max": column.max(),
})
return meta
def fit(self):
data = self.train_data.values
self.meta = self.get_metadata()
model = []
self.ordering = []
self.output_info = []
self.output_dim = 0
self.components = []
self.filter_arr = []
for id_, info in enumerate(self.meta):
if info['type'] == "continuous":
if id_ not in self.general_columns:
gm = BayesianGaussianMixture(
n_components = self.n_clusters,
weight_concentration_prior_type='dirichlet_process',
weight_concentration_prior=0.001,
max_iter=100,n_init=1, random_state=42)
gm.fit(data[:, id_].reshape([-1, 1]))
mode_freq = (pd.Series(gm.predict(data[:, id_].reshape([-1, 1]))).value_counts().keys())
model.append(gm)
old_comp = gm.weights_ > self.eps
comp = []
for i in range(self.n_clusters):
if (i in (mode_freq)) & old_comp[i]:
comp.append(True)
else:
comp.append(False)
self.components.append(comp)
self.output_info += [(1, 'tanh','no_g'), (np.sum(comp), 'softmax')]
self.output_dim += 1 + np.sum(comp)
else:
model.append(None)
self.components.append(None)
self.output_info += [(1, 'tanh','yes_g')]
self.output_dim += 1
elif info['type'] == "mixed":
gm1 = BayesianGaussianMixture(
n_components = self.n_clusters,
weight_concentration_prior_type='dirichlet_process',
weight_concentration_prior=0.001, max_iter=100,
n_init=1,random_state=42)
gm2 = BayesianGaussianMixture(
n_components = self.n_clusters,
weight_concentration_prior_type='dirichlet_process',
weight_concentration_prior=0.001, max_iter=100,
n_init=1,random_state=42)
gm1.fit(data[:, id_].reshape([-1, 1]))
filter_arr = []
for element in data[:, id_]:
if element not in info['modal']:
filter_arr.append(True)
else:
filter_arr.append(False)
gm2.fit(data[:, id_][filter_arr].reshape([-1, 1]))
mode_freq = (pd.Series(gm2.predict(data[:, id_][filter_arr].reshape([-1, 1]))).value_counts().keys())
self.filter_arr.append(filter_arr)
model.append((gm1,gm2))
old_comp = gm2.weights_ > self.eps
comp = []
for i in range(self.n_clusters):
if (i in (mode_freq)) & old_comp[i]:
comp.append(True)
else:
comp.append(False)
self.components.append(comp)
self.output_info += [(1, 'tanh',"no_g"), (np.sum(comp) + len(info['modal']), 'softmax')]
self.output_dim += 1 + np.sum(comp) + len(info['modal'])
else:
model.append(None)
self.components.append(None)
self.output_info += [(info['size'], 'softmax')]
self.output_dim += info['size']
self.model = model
def transform(self, data, ispositive = False, positive_list = None):
values = []
mixed_counter = 0
for id_, info in enumerate(self.meta):
current = data[:, id_]
if info['type'] == "continuous":
if id_ not in self.general_columns:
current = current.reshape([-1, 1])
means = self.model[id_].means_.reshape((1, self.n_clusters))
stds = np.sqrt(self.model[id_].covariances_).reshape((1, self.n_clusters))
features = np.empty(shape=(len(current),self.n_clusters))
if ispositive == True:
if id_ in positive_list:
features = np.abs(current - means) / (4 * stds)
else:
features = (current - means) / (4 * stds)
probs = self.model[id_].predict_proba(current.reshape([-1, 1]))
n_opts = sum(self.components[id_])
features = features[:, self.components[id_]]
probs = probs[:, self.components[id_]]
opt_sel = np.zeros(len(data), dtype='int')
for i in range(len(data)):
pp = probs[i] + 1e-6
pp = pp / sum(pp)
opt_sel[i] = np.random.choice(np.arange(n_opts), p=pp)
idx = np.arange((len(features)))
features = features[idx, opt_sel].reshape([-1, 1])
features = np.clip(features, -.99, .99)
probs_onehot = np.zeros_like(probs)
probs_onehot[np.arange(len(probs)), opt_sel] = 1
re_ordered_phot = np.zeros_like(probs_onehot)
col_sums = probs_onehot.sum(axis=0)
n = probs_onehot.shape[1]
largest_indices = np.argsort(-1*col_sums)[:n]
self.ordering.append(largest_indices)
for id,val in enumerate(largest_indices):
re_ordered_phot[:,id] = probs_onehot[:,val]
values += [features, re_ordered_phot]
else:
self.ordering.append(None)
if id_ in self.non_categorical_columns:
info['min'] = -1e-3
info['max'] = info['max'] + 1e-3
current = (current - (info['min'])) / (info['max'] - info['min'])
current = current * 2 - 1
current = current.reshape([-1, 1])
values.append(current)
elif info['type'] == "mixed":
means_0 = self.model[id_][0].means_.reshape([-1])
stds_0 = np.sqrt(self.model[id_][0].covariances_).reshape([-1])
zero_std_list = []
means_needed = []
stds_needed = []
for mode in info['modal']:
if mode!=-9999999:
dist = []
for idx,val in enumerate(list(means_0.flatten())):
dist.append(abs(mode-val))
index_min = np.argmin(np.array(dist))
zero_std_list.append(index_min)
else: continue
for idx in zero_std_list:
means_needed.append(means_0[idx])
stds_needed.append(stds_0[idx])
mode_vals = []
for i,j,k in zip(info['modal'],means_needed,stds_needed):
this_val = np.abs(i - j) / (4*k)
mode_vals.append(this_val)
if -9999999 in info["modal"]:
mode_vals.append(0)
current = current.reshape([-1, 1])
filter_arr = self.filter_arr[mixed_counter]
current = current[filter_arr]
means = self.model[id_][1].means_.reshape((1, self.n_clusters))
stds = np.sqrt(self.model[id_][1].covariances_).reshape((1, self.n_clusters))
features = np.empty(shape=(len(current),self.n_clusters))
if ispositive == True:
if id_ in positive_list:
features = np.abs(current - means) / (4 * stds)
else:
features = (current - means) / (4 * stds)
probs = self.model[id_][1].predict_proba(current.reshape([-1, 1]))
n_opts = sum(self.components[id_]) # 8
features = features[:, self.components[id_]]
probs = probs[:, self.components[id_]]
opt_sel = np.zeros(len(current), dtype='int')
for i in range(len(current)):
pp = probs[i] + 1e-6
pp = pp / sum(pp)
opt_sel[i] = np.random.choice(np.arange(n_opts), p=pp)
idx = np.arange((len(features)))
features = features[idx, opt_sel].reshape([-1, 1])
features = np.clip(features, -.99, .99)
probs_onehot = np.zeros_like(probs)
probs_onehot[np.arange(len(probs)), opt_sel] = 1
extra_bits = np.zeros([len(current), len(info['modal'])])
temp_probs_onehot = np.concatenate([extra_bits,probs_onehot], axis = 1)
final = np.zeros([len(data), 1 + probs_onehot.shape[1] + len(info['modal'])])
features_curser = 0
for idx, val in enumerate(data[:, id_]):
if val in info['modal']:
category_ = list(map(info['modal'].index, [val]))[0]
final[idx, 0] = mode_vals[category_]
final[idx, (category_+1)] = 1
else:
final[idx, 0] = features[features_curser]
final[idx, (1+len(info['modal'])):] = temp_probs_onehot[features_curser][len(info['modal']):]
features_curser = features_curser + 1
just_onehot = final[:,1:]
re_ordered_jhot= np.zeros_like(just_onehot)
n = just_onehot.shape[1]
col_sums = just_onehot.sum(axis=0)
largest_indices = np.argsort(-1*col_sums)[:n]
self.ordering.append(largest_indices)
for id,val in enumerate(largest_indices):
re_ordered_jhot[:,id] = just_onehot[:,val]
final_features = final[:,0].reshape([-1, 1])
values += [final_features, re_ordered_jhot]
mixed_counter = mixed_counter + 1
else:
self.ordering.append(None)
col_t = np.zeros([len(data), info['size']])
idx = list(map(info['i2s'].index, current))
col_t[np.arange(len(data)), idx] = 1
values.append(col_t)
return np.concatenate(values, axis=1)
def inverse_transform(self, data):
data_t = np.zeros([len(data), len(self.meta)])
invalid_ids = []
st = 0
for id_, info in enumerate(self.meta):
if info['type'] == "continuous":
if id_ not in self.general_columns:
u = data[:, st]
v = data[:, st + 1:st + 1 + np.sum(self.components[id_])]
order = self.ordering[id_]
v_re_ordered = np.zeros_like(v)
for id,val in enumerate(order):
v_re_ordered[:,val] = v[:,id]
v = v_re_ordered
u = np.clip(u, -1, 1)
v_t = np.ones((data.shape[0], self.n_clusters)) * -100
v_t[:, self.components[id_]] = v
v = v_t
st += 1 + np.sum(self.components[id_])
means = self.model[id_].means_.reshape([-1])
stds = np.sqrt(self.model[id_].covariances_).reshape([-1])
p_argmax = np.argmax(v, axis=1)
std_t = stds[p_argmax]
mean_t = means[p_argmax]
tmp = u * 4 * std_t + mean_t
for idx,val in enumerate(tmp):
if (val < info["min"]) | (val > info['max']):
invalid_ids.append(idx)
if id_ in self.non_categorical_columns:
tmp = np.round(tmp)
data_t[:, id_] = tmp
else:
u = data[:, st]
u = (u + 1) / 2
u = np.clip(u, 0, 1)
u = u * (info['max'] - info['min']) + info['min']
if id_ in self.non_categorical_columns:
data_t[:, id_] = np.round(u)
else: data_t[:, id_] = u
st += 1
elif info['type'] == "mixed":
u = data[:, st]
full_v = data[:,(st+1):(st+1)+len(info['modal'])+np.sum(self.components[id_])]
order = self.ordering[id_]
full_v_re_ordered = np.zeros_like(full_v)
for id,val in enumerate(order):
full_v_re_ordered[:,val] = full_v[:,id]
full_v = full_v_re_ordered
mixed_v = full_v[:,:len(info['modal'])]
v = full_v[:,-np.sum(self.components[id_]):]
u = np.clip(u, -1, 1)
v_t = np.ones((data.shape[0], self.n_clusters)) * -100
v_t[:, self.components[id_]] = v
v = np.concatenate([mixed_v,v_t], axis=1)
st += 1 + np.sum(self.components[id_]) + len(info['modal'])
means = self.model[id_][1].means_.reshape([-1])
stds = np.sqrt(self.model[id_][1].covariances_).reshape([-1])
p_argmax = np.argmax(v, axis=1)
result = np.zeros_like(u)
for idx in range(len(data)):
if p_argmax[idx] < len(info['modal']):
argmax_value = p_argmax[idx]
result[idx] = float(list(map(info['modal'].__getitem__, [argmax_value]))[0])
else:
std_t = stds[(p_argmax[idx]-len(info['modal']))]
mean_t = means[(p_argmax[idx]-len(info['modal']))]
result[idx] = u[idx] * 4 * std_t + mean_t
for idx,val in enumerate(result):
if (val < info["min"]) | (val > info['max']):
invalid_ids.append(idx)
data_t[:, id_] = result
else:
current = data[:, st:st + info['size']]
st += info['size']
idx = np.argmax(current, axis=1)
data_t[:, id_] = list(map(info['i2s'].__getitem__, idx))
invalid_ids = np.unique(np.array(invalid_ids))
all_ids = np.arange(0,len(data))
valid_ids = list(set(all_ids) - set(invalid_ids))
return data_t[valid_ids],len(invalid_ids)
class ImageTransformer():
def __init__(self, side):
self.height = side
def transform(self, data):
if self.height * self.height > len(data[0]):
padding = torch.zeros((len(data), self.height * self.height - len(data[0]))).to(data.device)
data = torch.cat([data, padding], axis=1)
return data.view(-1, 1, self.height, self.height)
def inverse_transform(self, data):
data = data.view(-1, self.height * self.height)
return data
| 17,809 | 40.418605 | 152 | py |
AutoCO | AutoCO-main/exp_simulate/fm_nas/simulate.py | import io
import os
import time
import pickle
import random
import logging
import datetime
import argparse
import coloredlogs
import numpy as np
import pandas as pd
from torch.utils.data import WeightedRandomSampler
from policy import FmEGreedy, Random, Greedy, FmGreedy, LinUCB, FmThompson, TS
logger = logging.getLogger(__name__)
coloredlogs.install(level='DEBUG')
def click(x, k, item_ctr, r_t):
"return the reward for impression"
if r_t == "click":
t = random.uniform(0,1)
if item_ctr[x][k] > t:
return 1
return 0
else:
return item_ctr[x][k]
def process_batch(features_list, memory, policy, item_ctr):
"recommend items under current policy"
# data: the impression items in current batch
data = []
for x in memory:
t = [x]
t.extend(features_list[x])
data.append(t)
# policy recommend creative
if policy.name == 'Greedy':
res = policy.recommend_batch(data, item_ctr)
else:
res = policy.recommend_batch(data)
return res
def evaluate(policy, params):
"process of simulation"
# initial reward recorder var
record_arr = [0.5,1,2,3,4,5,6,7,8,9,10,12,14,16,18,20,22,24,26,28,30,32,36,40,45,50,60,70,80,90,
100,110,120,130,140,150,160,170,180,190,200,210,220,230,240,250,
260,280,300,330,360,400,500]
score, impressions = 0.0, 1.0
ctr_reward, auc_reward = [], []
# initial data recorder var
memory, record, r_list, eg_ind = [], [], [], []
cnt = 0
initial = 0
# initial background information
f = open(params["feature_list"], 'rb')
f_list = pickle.load(f)
f = open(params["pro_list"], 'rb')
pro_list = pickle.load(f)
f = open(params['creative_list'], 'rb')
c_list = pickle.load(f)
f = open(params['item_candidates'], 'rb')
item_candi = pickle.load(f)
f = open(params['item_ctr'], 'rb')
item_ctr = pickle.load(f)
f_len = len(f_list[0])
leng = f_len+ len(c_list[0])
item_cnt = params['batch']
df = pd.read_pickle(params["random_file"])
warm_start = list(df.to_numpy())
record = warm_start[200000:202000]
# the main process of simulation
while impressions <= params["iter"]:
cnt += 1
item_cnt += 1
# decide which item to display
if item_cnt >= params['batch']:
item_list = list(WeightedRandomSampler(pro_list, params['batch']))
item_cnt = 0
x = item_list[item_cnt]
# if cnt < params["batch"]:
# line = f_list[x].copy()
# k = np.random.randint(0, len(item_candi[x]))
# line.extend(c_list[item_candi[x][k]])
# line.append(click(x, k, item_ctr, params["simulate_type"]))
# record.append(line)
# eg_ind.append(x)
# continue
# update policy with batch data
if len(record) >= params['batch']-3 or initial == 0:
initial = 1
auc_reward.append(policy.update(record, eg_ind))
record = []
eg_ind = []
# collect reward in current batch
memory.append(x)
if len(memory) % params['s_batch']== 0 and len(memory)>0 :
r_list = process_batch(f_list, memory, policy, item_ctr)
for i in range(len(r_list)):
line = f_list[memory[i]].copy()
t= item_candi[memory[i]][r_list[i]]
line.extend(c_list[t])
reward = click(memory[i], r_list[i], item_ctr, params["simulate_type"])
line.append(reward)
record.append(line)
eg_ind.append(memory[i])
score += reward
impressions += 1
if impressions%10000 == 0:
logger.debug('{} behaviour has been generated, Ctr is {}!!!'.format(impressions, score/(impressions)))
print(ctr_reward)
if impressions/10000 in record_arr:
ctr_reward.append(score/impressions)
# if impressions%1000000 == 0:
# policy.update_in_log()
memory.clear()
# policy.print()
score /= impressions
print("CTR achieved by the policy: %.5f" % score)
return ctr_reward, auc_reward
def run(params):
model = params['model_ee']
if model == 'fmeg':
policy = FmEGreedy(params)
elif model == 'fmgreedy':
policy = FmGreedy(params)
elif model == 'random':
policy = Random(params)
elif model == 'greedy':
policy = Greedy(params)
elif model == 'ts':
policy = TS(params)
elif model == 'linucb':
policy = LinUCB(params)
elif model == "fmts":
policy = FmThompson(params)
else:
print("No model named ", model, " !!!")
return
res, auc_res = evaluate(policy, params)
return res, auc_res
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--info_path', type=str, default='dataset/data_nas')
parser.add_argument('--data_name', type=str, default="rand_0.15")
parser.add_argument('--simulate_type', type=str, default="click")
parser.add_argument('--iter', type=int, default=200000)
parser.add_argument('--dim', type=int, default=8)
parser.add_argument('--model_ee', type=str, default='random')
parser.add_argument('--model_nas', type=str, default="fm")
parser.add_argument('--oper', type=str, default='multiply')
parser.add_argument('--model_struct', type=int, default=0)
parser.add_argument('--epoch', type=int, default=1)
parser.add_argument('--decay', type=float, default=0.0001)
parser.add_argument('--learning', type=float, default=0.001)
parser.add_argument('--batch', type=int, default=5000)
parser.add_argument('--s_batch', type=int, default=5000)
parser.add_argument('--alpha', type=float, default=0.01)
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--times', type=int, default=1)
parser.add_argument('--update', type=int, default=0)
parser.add_argument('--data_size', type=int, default=-1)
parser.add_argument('--sample', type=int, default=0)
parser.add_argument('--record', type=int, default=0)
parser.add_argument('--optimizer', type=str, default='adam')
parser.add_argument('--trick', type=int, default=0)
parser.add_argument('--first_order', type=int, default=0)
parser.add_argument('--calcu_dense', type=int, default=0)
parser.add_argument('--ts_trick', type=int, default=0)
parser.add_argument('--auc_record', type=int, default=0)
args = parser.parse_args()
params = {"warm_file": os.path.join(args.info_path, args.data_name,"warm_start.pkl"),
"creative_list": os.path.join(args.info_path, "creative_list.pkl"),
"item_candidates": os.path.join(args.info_path, "item_candidates.pkl"),
"pro_list": os.path.join(args.info_path, "pro_list.pkl"),
"feature_list": os.path.join(args.info_path, "feature_list.pkl"),
"feature_size": os.path.join(args.info_path, "feature_size.pkl"),
"item_ctr": os.path.join(args.info_path, args.data_name,"item_ctr.pkl"),
"random_file": os.path.join(args.info_path, args.data_name,"random_log.pkl"),
"model_ee": args.model_ee,
'learning': args.learning,
"batch": args.batch,
"dim": args.dim,
"simulate_type": args.simulate_type,
"ts_trick": args.ts_trick,
"arch": [0,1,0,3,0,3,0,1,4,2,0,4,3,2,0],
"trick": args.trick,
"model_struct": args.model_struct,
"model_nas": args.model_nas,
"operator": args.oper,
"s_batch": args.s_batch,
"epoch": args.epoch,
"decay": args.decay,
"device": args.device,
"times": args.times,
"iter": args.iter,
"first_order": args.first_order,
"update": args.update,
"auc_record": args.auc_record,
"data_size": args.data_size,
"sample": args.sample,
"alpha": args.alpha,
"record": args.record,
'optimizer': args.optimizer,
'calcu_dense': args.calcu_dense,
'dense': [0,6,0],
"early_fix_arch": False,
}
ISOTIMEFORMAT = '%m%d-%H%M%S'
timestamp = str(datetime.datetime.now().strftime(ISOTIMEFORMAT))
params["model_path"] = os.path.join(args.info_path, args.data_name,"model",timestamp+'.pt')
import json
params_str = json.dumps(params)
score, auc_score = [], []
for i in range(params['times']):
res, auc_res = run(params)
score.append(res)
filename = os.path.join(args.info_path, args.data_name,"res",params['model_ee']+"-"+timestamp+'.txt')
# filename = 'data/res/'+params['model']+"-"+timestamp+'.txt'
if params["record"] == 0:
with open(filename, 'w') as f:
f.write(params_str)
f.write('\n')
for i in range(len(score)):
s = [str(reward) for reward in score[i]]
f.write("time"+str(i)+": "+" ".join(s)+"\n")
if params["auc_record"] == 1:
auc_score.append(auc_res)
filename = os.path.join(args.info_path, args.data_name,"auc_res",params['model_ee']+"-"+timestamp+'.txt')
# filename = 'data/res/'+params['model']+"-"+timestamp+'.txt'
with open(filename, 'w') as f:
f.write(params_str)
f.write('\n')
for i in range(len(auc_score)):
s = [str(reward) for reward in auc_score[i]]
f.write("time"+str(i)+": "+" ".join(s)+"\n") | 9,990 | 37.724806 | 122 | py |
AutoCO | AutoCO-main/exp_simulate/fm_nas/utils.py | from collections import defaultdict
import torch
from torch.optim.optimizer import Optimizer
class FTRL(Optimizer):
""" Implements FTRL online learning algorithm.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
alpha (float, optional): alpha parameter (default: 1.0)
beta (float, optional): beta parameter (default: 1.0)
l1 (float, optional): L1 regularization parameter (default: 1.0)
l2 (float, optional): L2 regularization parameter (default: 1.0)
.. _Ad Click Prediction: a View from the Trenches:
https://www.eecs.tufts.edu/%7Edsculley/papers/ad-click-prediction.pdf
"""
def __init__(self, params, alpha=1.0, beta=1.0, l1=1.0, l2=1.0):
if not 0.0 < alpha:
raise ValueError("Invalid alpha parameter: {}".format(alpha))
if not 0.0 < beta:
raise ValueError("Invalid beta parameter: {}".format(beta))
if not 0.0 <= l1:
raise ValueError("Invalid l1 parameter: {}".format(l1))
if not 0.0 <= l2:
raise ValueError("Invalid l2 parameter: {}".format(l2))
defaults = dict(alpha=alpha, beta=beta, l1=l1, l2=l2)
super(FTRL, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
if len(state) == 0:
state["z"] = torch.zeros_like(p.data)
state["n"] = torch.zeros_like(p.data)
z, n = state["z"], state["n"]
theta = (n + grad ** 2).sqrt() / group["alpha"] - n.sqrt()
z.add_(grad - theta * p.data)
n.add_(grad ** 2)
p.data = (
-1
/ (group["l2"] + (group["beta"] + n.sqrt()) / group["alpha"])
* (z - group["l1"] * z.sign())
)
p.data[z.abs() < group["l1"]] = 0
return loss
class DataPrefetcher():
def __init__(self, loader, device):
self.loader = iter(loader)
self.device = device
self.stream = torch.cuda.Stream()
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.mean = self.mean.half()
# self.std = self.std.half()
self.preload()
def preload(self):
try:
self.batch = next(self.loader)
except StopIteration:
self.batch = None
return
with torch.cuda.stream(self.stream):
for k in range(len(self.batch)):
if k != 'meta':
self.batch[k] = self.batch[k].to(device=self.device, non_blocking=True)
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.next_input = self.next_input.half()
# else:
# self.next_input = self.next_input.float()
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
batch = self.batch
self.preload()
return batch
def cal_group_auc(labels, preds, user_id_list):
if len(user_id_list) != len(labels):
raise ValueError(
"impression id num should equal to the sample num," \
"impression id num is {0}".format(len(user_id_list)))
group_score = defaultdict(lambda: [])
group_truth = defaultdict(lambda: [])
for idx, truth in enumerate(labels):
user_id = user_id_list[idx]
score = preds[idx]
truth = labels[idx]
group_score[user_id].append(score)
group_truth[user_id].append(truth)
group_flag = defaultdict(lambda: False)
for user_id in set(user_id_list):
truths = group_truth[user_id]
flag = False
for i in range(len(truths) - 1):
if truths[i] != truths[i + 1]:
flag = True
break
group_flag[user_id] = flag
impression_total = 0
total_auc = 0
#
for user_id in group_flag:
if group_flag[user_id]:
auc = roc_auc_score(np.asarray(group_truth[user_id]), np.asarray(group_score[user_id]))
total_auc += auc * len(group_truth[user_id])
impression_total += len(group_truth[user_id])
group_auc = float(total_auc) / impression_total
group_auc = round(group_auc, 4)
return group_auc
| 4,886 | 33.907143 | 99 | py |
AutoCO | AutoCO-main/exp_simulate/fm_nas/model.py | import torch
import torch.nn as nn
import torch.distributions.normal as normal
import torch.nn.functional as F
import math
import time
import numpy as np
import random
from torch.autograd import Variable
PRIMITIVES = ['concat', 'multiply', 'max', 'min', 'plus']
# PRIMITIVES = ['zero', 'max', 'min', 'multiply', 'plus', 'minus']
OPS = {
'zero': lambda p,q: torch.zeros_like(p).sum(2),
'plus': lambda p,q: (p + q).sum(2),
'minus': lambda p,q: (p - q).sum(2),
'multiply': lambda p, q: (p * q).sum(2),
'max': lambda p, q: torch.max(torch.stack((p, q)), dim=0)[0].sum(2),
'min': lambda p, q: torch.min(torch.stack((p, q)), dim=0)[0].sum(2),
'concat': lambda p, q: torch.cat([p, q], dim=-1).sum(2)
}
OPS_V = {
'zero': lambda p,q: torch.zeros_like(p),
'plus': lambda p,q: (p + q),
'minus': lambda p,q: (p - q),
'multiply': lambda p, q: (p * q),
'max': lambda p, q: torch.max(torch.stack((p, q)), dim=0)[0],
'min': lambda p, q: torch.min(torch.stack((p, q)), dim=0)[0],
'concat': lambda p, q: torch.cat([p, q], dim=-1)
}
def MixedBinary(embedding_p, embedding_q, weights, flag, dim=1, FC=None, o_type=0, others=0):
# print(weights)
# embedding_p = MLP[0](embedding_p.view(-1,1)).view(embedding_p.size())
# embedding_q = MLP[1](embedding_q.view(-1,1)).view(embedding_q.size())
if flag == 0:
max_ind = weights.argmax().item()
if o_type==0:
t = OPS[PRIMITIVES[max_ind]](embedding_p, embedding_q)
elif o_type==1:
begin, end = max_ind*dim, max_ind*dim+dim
t = OPS[PRIMITIVES[max_ind]](embedding_p[:,:,begin:end], embedding_q[:,:,begin:end])
elif o_type==2:
t = (FC[max_ind](OPS_V[PRIMITIVES[max_ind]](embedding_p, embedding_q))).squeeze(1)
elif o_type==3:
begin, end = max_ind*dim, max_ind*dim+dim
t = (FC[max_ind](OPS_V[PRIMITIVES[max_ind]](embedding_p[:,:,begin:end], embedding_q[:,:,begin:end]))).squeeze(1)
if others == 1:
t = t*weights[max_ind]
else:
if o_type==0:
t = torch.sum(torch.stack([w * (OPS[primitive](embedding_p, embedding_q)) for w,primitive in zip(weights, PRIMITIVES)]), 0)
elif o_type==1:
t = 0
for i in range(len(PRIMITIVES)):
begin, end = i*dim, i*dim+dim
t += weights[i] * (OPS[PRIMITIVES[i]](embedding_p[:,:,begin:end], embedding_q[:,:,begin:end]))
elif o_type==2:
t = 0
for i in range(len(PRIMITIVES)):
t += weights[i] * (FC[i](OPS_V[PRIMITIVES[i]](embedding_p, embedding_q))).squeeze(1)
elif o_type==3:
t = 0
for i in range(len(PRIMITIVES)):
begin, end = i*dim, i*dim+dim
t += weights[i] * (FC[i](OPS_V[PRIMITIVES[i]](embedding_p[:,:,begin:end], embedding_q[:,:,begin:end]))).squeeze(1)
return t
class OFM(nn.Module):
def __init__(self, feature_size=None, k=None, params=None, m_type=0):
super().__init__()
print(feature_size)
self.name = "OFM"
self.field_size = len(feature_size)
print(feature_size)
self.feature_sizes = feature_size
self.dim = k
self.device = params["device"]
self.dense = params["dense"]
self.first_order = params["first_order"]
self.type = m_type
self.f_len = params["f_len"]
self.alpha = params["alpha"]
# self.nas_flag = nas_flag
# init bias
self.bias = nn.Parameter(torch.normal(torch.ones(1), 1),requires_grad=True)
self.params = params
# init first order
if self.first_order == 1:
fm_first_order_Linears = nn.ModuleList(
[nn.Linear(feature_size, 1, bias=False) for feature_size in self.feature_sizes[:self.dense[0]]])
fm_first_order_embeddings = nn.ModuleList(
[nn.Embedding(feature_size, 1) for feature_size in self.feature_sizes[self.dense[0]:self.dense[0]+self.dense[1]]])
fm_first_order_multi = nn.Embedding(self.dense[2], 1)
self.fm_first_order_models = fm_first_order_Linears.extend(fm_first_order_embeddings).append(fm_first_order_multi)
# self.bias = 0
# init second order
self._FC = None
if self.type == 0:
leng = self.dim
elif self.type == 1:
leng = self.dim*len(PRIMITIVES)
elif self.type == 2:
leng = self.dim
# leng = self.dim
self._FC = nn.ModuleList()
for primitive in PRIMITIVES:
if primitive == "concat":
self._FC.append(nn.Linear(2*self.dim, 1, bias=False))
else:
self._FC.append(nn.Linear(self.dim, 1, bias=False))
elif self.type == 3:
leng = self.dim*len(PRIMITIVES)
# leng = self.dim
self._FC = nn.ModuleList()
for primitive in PRIMITIVES:
if primitive == "concat":
self._FC.append(nn.Linear(2*self.dim, 1, bias=False))
else:
self._FC.append(nn.Linear(self.dim, 1, bias=False))
fm_second_order_Linears = nn.ModuleList(
[nn.Linear(feature_size, leng, bias=False) for feature_size in self.feature_sizes[:self.dense[0]]])
fm_second_order_embeddings = nn.ModuleList(
[nn.Embedding(feature_size, leng) for feature_size in self.feature_sizes[self.dense[0]:self.dense[0]+self.dense[1]]])
fm_second_order_multi = nn.Embedding(self.dense[2], leng)
self.fm_second_order_models = fm_second_order_Linears.extend(fm_second_order_embeddings).append(fm_second_order_multi)
# calcu the num of operation
if self.dense[2] != 0:
self.multi_len = int(self.dense[1]*(self.dense[1]+1)/2)+1
else:
self.multi_len = int(self.dense[1]*(self.dense[1]-1)/2)
# init arch parameters
self.inter_arr = [i for i in range(self.multi_len)]
self._arch_parameters = {}
self._arch_parameters['binary'] = Variable(torch.ones((self.multi_len, len(PRIMITIVES)), dtype=torch.float, device=params['device']) / 2, requires_grad=True)
self._arch_parameters['binary'].data.add_(
torch.randn_like(self._arch_parameters['binary'])*1e-3)
self.cost = [0 for _ in range(6)]
def arch_parameters(self):
return [self._arch_parameters['binary']]
def set_rand(self, num):
self.inter_arr = random.sample(self.inter_arr, num)
def forward(self, x, flag, weights=None):
if weights == None:
weights = self._arch_parameters['binary']
X = x.reshape(x.shape[0], x.shape[1], 1)
self.cost[0] -= time.time()
# calcu first order
out_first_order = 0
if self.first_order == 1:
for i, emb in enumerate(self.fm_first_order_models):
if i < self.dense[0]:
Xi_tem = X[:, i, :].to(device=self.device, dtype=torch.float)
out_first_order += torch.sum(emb(Xi_tem).unsqueeze(1), 1)
elif i < self.dense[0]+self.dense[1]:
self.cost[1] -= time.time()
Xi_tem = X[:, i, :].to(device=self.device, dtype=torch.long)
# print(i, Xi_tem, emb, self.feature_sizes)
out_first_order += torch.sum(emb(Xi_tem), 1)
self.cost[1] += time.time()
else:
self.cost[2] -= time.time()
for j in range(self.dense[2]):
Xi_tem = X[:, i+j, :].to(device=self.device, dtype=torch.long)
out_first_order += Xi_tem*emb(torch.Tensor([j]).to(device=self.device, dtype=torch.long))
self.cost[2] += time.time()
# record second order embedding
X_vector = []
for i, emb in enumerate(self.fm_second_order_models):
if i < self.dense[0]:
if self.params["calcu_dense"]==0:
continue
else:
Xi_tem = X[:, i, :].to(device=self.device, dtype=torch.float)
X_vector.append(emb(Xi_tem).unsqueeze(1))
elif i < self.dense[0]+self.dense[1]:
self.cost[3] -= time.time()
Xi_tem = X[:, i, :].to(device=self.device, dtype=torch.long)
X_vector.append(emb(Xi_tem))
# print(X_vector[-1].shape)
self.cost[3] += time.time()
else:
self.cost[4] -= time.time()
for j in range(self.dense[2]):
Xi_tem = X[:, i+j, :].to(device=self.device, dtype=torch.long)
X_vector.append((Xi_tem*emb(torch.Tensor([j]).to(device=self.device, dtype=torch.long))).unsqueeze(1))
# print(X_vector[-1].shape)
self.cost[4] += time.time()
# calcu second order
out_second_order = 0
self.cost[5] -= time.time()
cnt = 0
multi_hot_len = 0
if self.dense[2] != 0:
multi_hot_len = 1
for i in range(len(X_vector)):
for j in range(i):
if i < len(self.feature_sizes)-multi_hot_len:
tmp = cnt
elif j < len(self.feature_sizes)-multi_hot_len:
tmp = self.multi_len-len(self.feature_sizes)+j
else:
tmp = self.multi_len-1
cnt += 1
if tmp not in self.inter_arr:
continue
# if self.name != "SNAS":
# out_second_order += MixedBinary(X_vector[i], X_vector[j], weights[tmp,:], flag, dim=self.dim, FC=self._FC, o_type=self.type)
# else:
# out_second_order += MixedBinary(X_vector[i], X_vector[j], weights[tmp], flag, dim=self.dim, FC=self._FC, o_type=self.type)
if self.name == "SNAS":
out_second_order += MixedBinary(X_vector[i], X_vector[j], weights[tmp], flag, dim=self.dim, FC=self._FC, o_type=self.type)
elif self.name == "DSNAS":
out_second_order += MixedBinary(X_vector[i], X_vector[j], weights[tmp, :], flag, dim=self.dim, FC=self._FC, o_type=self.type, others=1)
else:
out_second_order += MixedBinary(X_vector[i], X_vector[j], weights[tmp, :], flag, dim=self.dim, FC=self._FC, o_type=self.type)
# print(out_second_order)
self.cost[5] += time.time()
self.cost[0] += time.time()
out = out_second_order+out_first_order+self.bias
return torch.sigmoid(out.squeeze(1))
def genotype(self):
genotype = [PRIMITIVES[self._arch_parameters['binary'][i, :].argmax().item()]
for i in range(self.multi_len)]
for i in range(self.multi_len):
if i not in self.inter_arr:
genotype[i] = "None"
print(genotype)
return genotype
# return genotype, genotype_p.cpu().detach()
def setotype(self, ops):
self.ops = ops
leng = len(self._arch_parameters['binary'][0,:])
for i in range(self.multi_len):
for j in range(leng):
if PRIMITIVES[j] == self.ops:
self._arch_parameters['binary'].data[i,j] = 1.0
else:
self._arch_parameters['binary'].data[i,j] = 0.0
def TS_initial(self):
self.rand_array = torch.randn(10000000)
self.ts_trick = self.params["ts_trick"]
if self.type == 0 or self.type == 2:
leng = self.dim
elif self.type == 1 or self.type == 3:
leng = self.dim*len(PRIMITIVES)
if self.first_order == 1:
fm_first_std_Linears = nn.ModuleList(
[nn.Linear(feature_size, 1, bias=False) for feature_size in self.feature_sizes[:self.dense[0]]])
fm_first_std_embeddings = nn.ModuleList(
[nn.Embedding(feature_size, 1) for feature_size in self.feature_sizes[self.dense[0]:self.dense[0]+self.dense[1]]])
fm_first_std_multi = nn.Embedding(self.dense[2], 1)
self.fm_first_std_models = fm_first_std_Linears.extend(fm_first_std_embeddings).append(fm_first_std_multi)
fm_second_std_Linears = nn.ModuleList(
[nn.Linear(feature_size, leng, bias=False) for feature_size in self.feature_sizes[:self.dense[0]]])
fm_second_std_embeddings = nn.ModuleList(
[nn.Embedding(feature_size, leng) for feature_size in self.feature_sizes[self.dense[0]:self.dense[0]+self.dense[1]]])
fm_second_std_multi = nn.Embedding(self.dense[2], leng)
self.fm_second_std_models = fm_second_std_Linears.extend(fm_second_std_embeddings).append(fm_second_std_multi)
def reparameterize(self, mu, std, alpha):
std = torch.log(1 + torch.exp(std)).to(self.device)
# v = torch.randn(batch, mu.shape[0], mu.shape[1]).to(self.device)
v = self.rand_array[:std.numel()].reshape(std.shape).to(self.device)
return (mu + alpha * std * v)
def KL_distance(self, mean1, mean2, std1, std2):
a = torch.log(std2/std1) + (std1*std1+(mean1-mean2)*(mean1-mean2))/2/std2/std2 - 1.0/2.0
return torch.sum(a)
def forward_ts(self, x, flag, weights=None, cal_kl=1):
if weights == None:
weights = self._arch_parameters['binary']
X = x.reshape(x.shape[0], x.shape[1], 1)
# if cal_kl==1:
# alpha = 1
# else:
# alpha = self.alpha
alpha = self.alpha
out_first_order = 0
if self.first_order == 1:
for i, emb in enumerate(self.fm_first_order_models):
if i < self.dense[0]:
Xi_tem = X[:, i, :].to(device=self.device, dtype=torch.float)
X_mean = torch.sum(emb(Xi_tem).unsqueeze(1), 1)
if i < self.f_len and self.ts_trick==0:
out_first_order += X_mean
else:
X_std = torch.sum(self.fm_first_std_models[i](Xi_tem).unsqueeze(1), 1)
out_first_order += self.reparameterize(X_mean, X_std, alpha)
elif i < self.dense[0]+self.dense[1]:
Xi_tem = X[:, i, :].to(device=self.device, dtype=torch.long)
# print(Xi_tem.shape)
X_mean = torch.sum(emb(Xi_tem), 1)
if i < self.f_len and self.ts_trick==0:
out_first_order += X_mean
else:
X_std = torch.sum(self.fm_first_std_models[i](Xi_tem), 1)
out_first_order += self.reparameterize(X_mean, X_std, alpha)
else:
for j in range(self.dense[2]):
Xi_tem = X[:, i+j, :].to(device=self.device, dtype=torch.long)
X_mean = Xi_tem*emb(torch.Tensor([j]).to(device=self.device, dtype=torch.long))
if i < self.f_len and self.ts_trick==0:
out_first_order += X_mean
else:
X_std = Xi_tem*self.fm_first_std_models[i](torch.Tensor([j]).to(device=self.device, dtype=torch.long))
out_first_order += self.reparameterize(X_mean, X_std, alpha)
X_vector = []
for i, emb in enumerate(self.fm_second_order_models):
if i < self.dense[0]:
if self.params["calcu_dense"]==0:
continue
else:
Xi_tem = X[:, i, :].to(device=self.device, dtype=torch.float)
X_mean = emb(Xi_tem).unsqueeze(1)
if i < self.f_len and self.ts_trick==0:
X_vector.append(X_mean)
else:
X_std = self.fm_second_std_models[i](Xi_tem).unsqueeze(1)
X_vector.append(self.reparameterize(X_mean, X_std, alpha))
elif i < self.dense[0]+self.dense[1]:
Xi_tem = X[:, i, :].to(device=self.device, dtype=torch.long)
X_mean = emb(Xi_tem)
if i < self.f_len and self.ts_trick==0:
X_vector.append(X_mean)
else:
X_std = self.fm_second_std_models[i](Xi_tem)
X_vector.append(self.reparameterize(X_mean, X_std, alpha))
else:
for j in range(self.dense[2]):
Xi_tem = X[:, i+j, :].to(device=self.device, dtype=torch.long)
X_mean = (Xi_tem*emb(torch.Tensor([j]).to(device=self.device, dtype=torch.long))).unsqueeze(1)
if i < self.f_len and self.ts_trick==0:
X_vector.append(X_mean)
else:
X_std = (Xi_tem*self.fm_second_std_models[i](torch.Tensor([j]).to(device=self.device, dtype=torch.long))).unsqueeze(1)
X_vector.append(self.reparameterize(X_mean, X_std, alpha))
out_second_order = 0
cnt = 0
multi_hot_len = 0
if self.dense[2] != 0:
multi_hot_len = 1
for i in range(len(X_vector)):
for j in range(i):
if i < len(self.feature_sizes)-multi_hot_len:
tmp = cnt
elif j < len(self.feature_sizes)-multi_hot_len:
tmp = -len(self.feature_sizes)+j
else:
tmp = -1
if self.name != "SNAS":
out_second_order += MixedBinary(X_vector[i], X_vector[j], weights[tmp,:], flag, dim=self.dim, FC=self._FC, o_type=self.type)
else:
out_second_order += MixedBinary(X_vector[i], X_vector[j], weights[tmp], flag, dim=self.dim, FC=self._FC, o_type=self.type)
cnt += 1
out = torch.sigmoid((out_second_order+out_first_order+self.bias).squeeze(1))
if cal_kl == 0:
return 0, out
# print(out.shape,out_second_order.shape,out_first_order.shape)
k = 0
for i, emb in enumerate(self.fm_first_order_models):
if i < self.f_len:
continue
k += self.KL_distance(emb.weight, 0*torch.ones_like(emb.weight), torch.log(1 + torch.exp(self.fm_first_std_models[i].weight)), 0.1*torch.ones_like(self.fm_first_std_models[i].weight))
for i, emb in enumerate(self.fm_second_order_models):
if i < self.f_len:
continue
k += self.KL_distance(emb.weight, 0*torch.ones_like(emb.weight), torch.log(1 + torch.exp(self.fm_second_std_models[i].weight)), 0.1*torch.ones_like(self.fm_second_std_models[i].weight))
# print(k.shape)
return k, out
class NAS(OFM):
def __init__(self, feature_size=None, k=None, params=None, m_type=0):
super().__init__(feature_size, k, params, m_type)
self.name = "NAS"
def binarize(self):
self._cache = self._arch_parameters['binary'].clone()
max_index = [self._arch_parameters['binary'][i, :].argmax().item()
for i in range(self.multi_len)]
leng = len(self._arch_parameters['binary'][0,:])
for i in range(self.multi_len):
for j in range(leng):
if j == max_index[i]:
self._arch_parameters['binary'].data[i,j] = 1.0
else:
self._arch_parameters['binary'].data[i,j] = 0.0
# print(self._arch_parameters['binary'])
def recover(self):
self._arch_parameters['binary'].data = self._cache
del self._cache
def step(self, x, labels_valid, criterion, arch_optimizer, other):
# print(len(x),len(labels_valid))
self.zero_grad()
arch_optimizer.zero_grad()
# binarize before forward propagation
self.binarize()
inferences = self(x, 1)
loss = criterion(inferences, labels_valid)
# loss = F.binary_cross_entropy(input=inferences,target=labels_valid,reduction='mean')
loss.backward()
# restore weight before updating
self.recover()
arch_optimizer.step()
return loss
def print(self):
for name, i in self.named_parameters():
print(name,i)
return
class DSNAS(OFM):
def __init__(self, feature_size=None, k=None, params=None, m_type=0, args=None):
super(DSNAS, self).__init__(feature_size, k, params, m_type)
self.log_alpha = self._arch_parameters['binary']
self.weights = Variable(torch.zeros_like(self.log_alpha))
self.fix_arch_index = {}
self.args = args
self.name = "DSNAS"
def binarize(self):
pass
def recover(self):
return
def fix_arch(self):
if self.params["early_fix_arch"]:
if len(self.fix_arch_index.keys()) > 0:
for key, value_lst in self.fix_arch_index.items():
self.log_alpha.data[key, :] = value_lst[1]
sort_log_alpha = torch.topk(F.softmax(self.log_alpha.data, dim=-1), 2)
argmax_index = (sort_log_alpha[0][:, 0] - sort_log_alpha[0][:, 1] >= 0.3)
for id in range(argmax_index.size(0)):
if argmax_index[id] == 1 and id not in self.fix_arch_index.keys():
self.fix_arch_index[id] = [sort_log_alpha[1][id, 0].item(),
self.log_alpha.detach().clone()[id, :]]
def forward(self, x, flag, weights=None):
if weights == None:
weights = torch.zeros_like(self.log_alpha).scatter_(1, torch.argmax(self.log_alpha, dim = -1).view(-1,1), 1)
return super().forward(x, flag, weights)
#only need training data for training architecture parameter and network parameters
def step(self, x, labels, criterion, arch_optimizer, optimizer):
error_loss = 0
loss_alpha = 0
self.fix_arch()
arch_optimizer.zero_grad()
optimizer.zero_grad()
if self.params["early_fix_arch"]:
if len(self.fix_arch_index.keys()) > 0:
for key, value_lst in self.fix_arch_index.items():
self.weights[key, :].zero_()
self.weights[key, value_lst[0]] = 1
self.weights = self._get_weights(self.log_alpha)
cate_prob = F.softmax(self.log_alpha, dim=-1)
self.cate_prob = cate_prob.clone().detach()
loss_alpha = torch.log(
(self.weights * F.softmax(self.log_alpha, dim=-1)).sum(-1)).sum()
self.weights.requires_grad_()
inference = self(x, 1, self.weights)
error_loss = F.binary_cross_entropy(inference, labels.float())
self.weights.grad = torch.zeros_like(self.weights)
(error_loss + loss_alpha).backward()
self.block_reward = self.weights.grad.data.sum(-1)
self.log_alpha.grad.data.mul_(self.block_reward.view(-1, 1))
arch_optimizer.step()
optimizer.step()
return error_loss
def _get_weights(self, log_alpha):
# if self.args.random_sample:
# uni = torch.ones_like(log_alpha)
# m = torch.distributions.one_hot_categorical.OneHotCategorical(uni)
# else:
m = torch.distributions.one_hot_categorical.OneHotCategorical(probs=F.softmax(log_alpha, dim=-1))
return m.sample()
class SNAS(OFM):
def __init__(self, feature_size=None, k=None, params=None, m_type=0):
super().__init__(feature_size, k, params, m_type)
self.arch_prob = [0 for _ in range(self.multi_len)]
self.name = "SNAS"
def binarize(self, temperature=0.00001):
self.g_softmax(temperature)
def recover(self):
return
def forward(self, x, flag):
return super().forward(x, flag, self.arch_prob)
def g_softmax(self, temperature):
self.temp = temperature
for i in range(self.multi_len):
# alpha = self._arch_parameters['binary'].data[i, :]
# m = torch.nn.functional.gumbel_softmax(alpha, tau=temperature, hard=False, eps=1e-10, dim=-1)
# m = torch.distributions.relaxed_categorical.RelaxedOneHotCategorical(
# torch.tensor([temperature]).to(self.device) , alpha)
# print("sam",m.sample(),"raw",self._arch_parameters['binary'].data[i, :])
self.arch_prob[i] = torch.nn.functional.gumbel_softmax(self._arch_parameters['binary'][i, :],tau=temperature, hard=False, eps=1e-10, dim=-1)
def step(self, x, labels_valid, criterion, arch_optimizer, temperature):
self.zero_grad()
arch_optimizer.zero_grad()
self.g_softmax(temperature)
inferences = self(x, 1)
loss = criterion(inferences, labels_valid)
# loss = F.binary_cross_entropy(input=inferences,target=labels_valid,reduction='mean')
loss.backward()
arch_optimizer.step()
# for parms in self.arch_parameters():
# print(parms,'-->grad_requirs:',parms.requires_grad,' -->grad_value:',parms.grad)
return loss
class OFM_TS(OFM):
def __init__(self, feature_size=None, k=None, params=None, m_type=0):
super().__init__(feature_size, k, params, m_type)
self.TS_initial()
class NAS_TS(NAS):
def __init__(self, feature_size=None, k=None, params=None, m_type=0):
super().__init__(feature_size, k, params, m_type)
self.TS_initial()
def step_ts(self, x, labels_valid, criterion, arch_optimizer, other):
# print(len(x),len(labels_valid))
self.zero_grad()
arch_optimizer.zero_grad()
# binarize before forward propagation
self.binarize()
_, inferences = self.forward_ts(x, 1, cal_kl=0)
loss = criterion(inferences, labels_valid)
# loss = F.binary_cross_entropy(input=inferences,target=labels_valid,reduction='mean')
loss.backward()
# restore weight before updating
self.recover()
arch_optimizer.step()
return loss
class SNAS_TS(SNAS):
def __init__(self, feature_size=None, k=None, params=None, m_type=0):
super().__init__(feature_size, k, params, m_type)
self.TS_initial()
class DSNAS_TS(DSNAS):
def __init__(self, feature_size=None, k=None, params=None, m_type=0):
super().__init__(feature_size, k, params, m_type)
self.TS_initial()
class Linucb(nn.Module):
def __init__(self, n=1, cnum=1, device='cuda'):
super().__init__()
self.alpha = 0.1
self.n =n
self.theta = nn.Parameter(torch.randn(cnum, n).to(device))
self.A_inv = nn.Parameter(torch.randn(cnum, n, n).to(device))
def forward(self, x):
ind = x[:,0].cpu().numpy().astype(int)
feature = x[:,1:].reshape(len(x),self.n)
mean = torch.mul(feature, self.theta[ind]).sum(1, keepdim=True)
fe1 = feature.reshape(len(x),1,self.n)
fe2 = feature.reshape(len(x),self.n,1)
std = self.alpha*torch.sqrt(torch.bmm(torch.bmm(fe1,self.A_inv[ind]),fe2).reshape(len(x),1))
return mean + std
def print():
return
| 27,434 | 44.123355 | 197 | py |
AutoCO | AutoCO-main/exp_simulate/fm_nas/policy.py | import time
import torch
import pickle
import numpy as np
import pandas as pd
from torch.utils.data import WeightedRandomSampler
from train import Core
from train_arch import Train_Arch
from model import Linucb
def get_creative(params):
"return global creative list, and item-creatives dict"
with open(params['creative_list'], 'rb') as f:
c_list = pickle.load(f)
c_list = np.array(c_list, dtype=np.float32)
with open(params['item_candidates'], 'rb') as f:
item_candi = pickle.load(f)
with open(params["feature_list"], 'rb') as f:
f_list = pickle.load(f)
return f_list, c_list, item_candi
class Base(object):
def __init__(self, params):
self.f_list, self.c_list, self.item_candi = get_creative(params)
self.name = 'Base'
self.params = params
self.c_len = len(self.c_list[0])
self.f_len = len(self.f_list[0])
self.t_len = self.f_len+self.c_len
self.params["c_len"] = self.c_len
self.params["f_len"] = self.f_len
self.log_file = None
self.log_data = []
self.batch_data = []
self.fea_index = {}
self.c_index = {}
self.cnt = 0
cnt= 0
for cre in self.c_list:
self.c_index[','.join(str(x) for x in cre)] = cnt
cnt += 1
self.clock = [0 for _ in range(10)]
def update(self, lines, size=-1):
self.log_data.extend(lines)
self.batch_data = lines
if size!=-1 and len(self.log_data)>size:
self.log_data = self.log_data[-size:]
return 0
def update_in_log(self):
"""write log into file"""
if len(self.log_data)==0:
return
df = pd.DataFrame(self.log_data)
df.to_pickle(self.log_file)
def get_recommend_info(self, features_list, flag=0):
"""
Return several array for calculate
feas: distinct feature list which have not been displayed
offset: record the beginning of feature creatives
candidates: creatives of all features waiting for ranking
"""
feas = []
num = 0
for features in features_list:
ind = features[0]
if ind not in self.fea_index.keys():
self.fea_index[ind] = self.cnt
self.cnt += 1
feas.append(features)
num += len(self.item_candi[ind])
cnt = 0
f_len = len(features_list[0][1:])
# print(len(self.c_list[0]))
if flag == 0:
leng = f_len+len(self.c_list[0])
else:
leng = f_len+len(self.c_list[0])+1
candidates = np.zeros((num, leng),dtype=np.float32)
offset = [0]
last = 0
for features in feas:
t = np.zeros(leng)
if flag == 0:
t[0:f_len] = np.array(features[1:], dtype=np.float32)
for c_feature in self.item_candi[features[0]]:
t[f_len:] = self.c_list[c_feature]
candidates[cnt] = t
cnt+=1
else:
t[1:1+f_len] = np.array(features[1:], dtype=np.float32)
for c_feature in self.item_candi[features[0]]:
t[0] = np.float32(c_feature)
t[1+f_len:] = self.c_list[c_feature]
candidates[cnt] = t
cnt+=1
last = last+len(self.item_candi[features[0]])
offset.append(last)
return feas, offset, candidates
class Random(Base):
def __init__(self, params):
super(Random, self).__init__(params)
self.name = 'Random'
self.log_file = self.params["random_file"]
def update(self, lines, ind=None):
super(Random, self).update(lines)
return 0
def recommend_batch(self, features_list):
res = []
for features in features_list:
leng = len(self.item_candi[features[0]])
res.append(np.random.randint(0,leng))
return res
class Greedy(Base):
def __init__(self, params):
super(Greedy, self).__init__(params)
self.name = 'Greedy'
def update(self, lines, ind):
return 0
def recommend_batch(self, features_list, item_ctr):
res = []
for features in features_list:
res.append(item_ctr[features[0]].index(max(item_ctr[features[0]])))
return res
class FmEGreedy(Base):
def __init__(self, params):
super(FmEGreedy, self).__init__(params)
self.log_file = 'data/log/fm_log.pkl'
self.name = 'FmEGreedy'
self.fea_index = {}
self.res = []
self.flag = 0
self.update_cnt = 0
self.greedy_flag = 0
self.epsilon = self.params["alpha"]
self.model_nas = self.params["model_nas"]
self.model_struct = self.params["model_struct"]
# intial model if nas model, use Train_Arch() , else use Core()
if self.model_nas not in ["fm", "fm_ts"]:
self.framework = Train_Arch(dim=self.params["dim"],epoch=self.params["epoch"],weight_decay=self.params["decay"],data_size=self.params["data_size"], train_scale=1, valid_scale=0, device=self.params["device"], params=self.params)
else:
self.framework = Core(dim=self.params["dim"],epoch=self.params["epoch"],weight_decay=self.params["decay"],data_size=self.params["data_size"],train_scale= 1, valid_scale=0, device=self.params["device"], params=self.params)
# get test dateset to calculate auc
if self.params["auc_record"] == 1:
_, _, self.test_data = self.framework.set_dataloader(dataset_path=self.params["random_file"], data_size=100000,train_scale=0,valid_scale=0)
def update(self, lines, ind=None):
self.update_cnt += len(lines)
if self.name == "FmTS":
self.framework.epoch = self.framework.params["epoch"]-int((len(self.log_data)/len(lines))*3.5)
print("epoch:", self.framework.epoch)
# update == 0, the struct of model will update frequently
if self.params["update"] == 0:
super(FmEGreedy, self).update(lines)
self.framework.set_dataloader(dataset=self.log_data, dataset_path=self.log_file)
self.framework.initial_model(self.model_nas, self.model_struct, optimizer=self.params["optimizer"])
self.framework.run()
# update == 1, the struct of model will update several batch a time
elif self.params["update"] == 1:
super(FmEGreedy, self).update(lines)
if self.update_cnt > 49999 or self.flag ==0:
self.framework.params["trick"]=1
self.framework.epoch = self.framework.params["epoch"]
self.framework.set_dataloader(dataset=self.log_data, dataset_path=self.log_file)
self.framework.initial_model(self.model_nas, self.model_struct, optimizer=self.params["optimizer"])
self.framework.run()
self.framework.params["arch"] = [self.framework.model._arch_parameters['binary'][i, :].argmax().item() for i in range(len(self.framework.model._arch_parameters['binary']))]
self.flag, self.update_cnt = 1, 0
else:
self.framework.epoch = 200
self.framework.params["trick"]=0
if self.name == "FmTS":
self.framework.epoch = 180-int(len(self.log_data)/len(lines))*6
self.framework.set_dataloader(dataset=self.log_data, dataset_path=self.log_file)
self.framework.initial_model(self.model_nas, self.model_struct, optimizer=self.params["optimizer"])
# if self.flag == 0:
# self.framework.params["arch"] = [1 for i in range(len(self.framework.model._arch_parameters['binary']))]
self.framework.run()
self.fea_index = {}
self.res = []
self.cnt = 0
if self.params["auc_record"] == 1:
return(self.framework.test(self.test_data))
else:
return 0
def recommend_batch(self, features_list):
feas, offset, candidates = super().get_recommend_info(features_list)
if len(candidates) != 0:
rank = self.framework.result(candidates)
for i in range(len(feas)):
k = np.argmax(rank[offset[i]:offset[i+1]])
self.res.append(k)
final = []
for features in features_list:
if np.random.rand() > self.epsilon:
final.append(self.res[self.fea_index[features[0]]])
else:
leng = len(self.item_candi[features[0]])
final.append(np.random.randint(0,leng))
return final
class FmThompson(FmEGreedy):
def __init__(self, params):
if "_ts" not in params["model_nas"]:
params["model_nas"] = params["model_nas"]+"_ts"
super(FmThompson, self).__init__(params)
self.log_file = 'data/log/fmTS_log.pkl'
self.name = 'FmTS'
self.epsilon = 0
def update(self, lines, ind=None):
return super(FmThompson, self).update(lines)
def recommend_batch(self, features):
return super(FmThompson, self).recommend_batch(features)
class FmGreedy(FmEGreedy):
def update(self, lines, ind):
if self.greedy_flag == 1:
return 0
self.log_file = self.params["random_file"]
self.greedy_flag = 1
self.framework.set_dataloader(dataset=None, dataset_path=self.log_file)
self.framework.initial_model(self.model_nas, self.model_struct, optimizer=self.params["optimizer"])
self.framework.run()
_, _, self.test_data = self.framework.set_dataloader(dataset_path=self.params["random_file"], data_size=100000,train_scale=0,valid_scale=0, flag=1)
if self.params["auc_record"] == 1:
return(self.framework.test(self.test_data))
else:
return 0
class LinUCB(Base):
def __init__(self, params):
super(LinUCB, self).__init__(params)
self.log_file = 'data/log/linucb_log.pkl'
self.name = 'Linucb'
self.device = params['device']
self.r_index = {}
self.c_num = len(self.c_list)
self.leng = self.t_len
self.alpha = self.params["alpha"]
self.cnt = 0
self.t = 0
self.Aa = np.zeros((self.c_num, self.leng, self.leng))
self.Aa_inv = np.zeros((self.c_num, self.leng, self.leng))
self.ba = np.zeros((self.c_num, self.leng, 1))
self.theta = np.zeros((self.c_num, self.leng, 1))
for i in range(self.c_num):
self.Aa[i] = np.identity(self.leng)
self.Aa_inv[i] = np.identity(self.leng)
self.ba[i] = np.zeros((self.leng, 1))
self.theta[i] = np.zeros((self.leng, 1))
self.model = Linucb(self.leng, self.c_num, self.device)
def update(self, lines, ind=None):
# super(LinUCB, self).update(lines)
used = np.zeros(self.c_num)
# print(self.c_index)
for line in lines:
curr = self.c_index[','.join(str(float(x)) for x in line[-1-self.c_len:-1])]
used[curr] = 1
x = np.array(line[0:-1]).reshape((self.leng, 1))
# print(x)
reward = line[-1]
t = np.outer(x, x)
self.Aa[curr] += t
self.ba[curr] += reward * x
self.Aa_inv[curr] = self.Aa_inv[curr] - np.matmul(self.Aa_inv[curr].dot(x),x.T.dot(self.Aa_inv[curr]))/(1 + np.matmul(x.T, self.Aa_inv[curr].dot(x)))
for curr in range(self.c_num):
if used[curr] == 1:
# self.Aa_inv[curr] = np.linalg.inv(self.Aa[curr])
self.theta[curr] = self.Aa_inv[curr].dot(self.ba[curr])
for i, (_, param) in enumerate(self.model.named_parameters()):
if i ==0:
param.data = torch.from_numpy(self.theta.reshape(self.c_num,self.leng).astype(np.float32)).to(self.device)
if i == 1:
param.data = torch.from_numpy(self.Aa_inv.astype(np.float32)).to(self.device)
self.r_index = {}
self.fea_index = {}
return 0
def recommend_batch(self, features_list):
feas, offset, candidates = super().get_recommend_info(features_list, 1)
rank = self.model(torch.from_numpy(candidates).to(self.device))
rank = rank.detach().cpu().numpy().reshape(len(rank))
for i in range(len(feas)):
k = np.argmax(rank[offset[i]:offset[i+1]])
self.r_index[feas[i][0]] = k
final = []
for features in features_list:
final.append(self.r_index[features[0]])
return final
class TS(LinUCB):
def __init__(self, params):
super(TS, self).__init__(params)
self.log_file = 'data/log/ts_log.pkl'
self.name = 'ts'
self.mean = None
self.std = None
self.alpha = self.params["alpha"]
def update(self, lines, ind=None):
for line in lines:
curr = self.c_index[','.join(str(float(x)) for x in line[-1-self.c_len:-1])]
x = np.array(line[0:-1]).reshape((self.leng, 1))
reward = line[-1]
t = np.outer(x, x)
self.Aa[curr] += t
self.ba[curr] += reward * x
self.Aa_inv[curr] = self.Aa_inv[curr] - np.matmul(self.Aa_inv[curr].dot(x),x.T.dot(self.Aa_inv[curr]))/(1 + x.T.dot(self.Aa_inv[curr]).dot(x))
t =self.Aa_inv[curr].dot(self.ba[curr])
self.theta[curr] = t
self.r_index = {}
self.fea_index = {}
self.mean = torch.from_numpy(self.theta).reshape(self.c_num, self.leng)
temp = np.array([np.diag(a) for a in self.Aa_inv])
self.std = torch.from_numpy(temp.reshape((self.c_num, self.leng)))
def recommend_batch(self, features_list):
theta = torch.normal(self.mean, self.alpha*self.std).numpy().reshape((self.c_num, self.leng))
feas, offset, candidates = super().get_recommend_info(features_list)
for i in range(len(feas)):
ind = feas[i][0]
c_num = len(self.item_candi[ind])
res = np.zeros(c_num)
for j in range(c_num):
res[j] = theta[self.item_candi[ind][j]].T.dot(candidates[offset[i]+j])
k = np.argmax(res)
self.r_index[ind] = k
final = []
for features in features_list:
final.append(self.r_index[features[0]])
return final | 14,589 | 39.082418 | 239 | py |
AutoCO | AutoCO-main/exp_simulate/fm_nas/utils_gen.py | from train import Core
from train_arch import Train_Arch
import argparse
import pickle
import numpy as np
import torch
dataset_name = "data_nas"
def gen_simulation_ctr():
c_list = None
with open("raw_data/"+dataset_name+"/creative_list.pkl", 'rb') as f:
c_list = pickle.load(f)
c_list = np.array(c_list, dtype=np.float32)
item_candi = None
with open("/raw_data/"+dataset_name+"/item_candidates.pkl", 'rb') as f:
item_candi = pickle.load(f)
f_list = None
with open("raw_data/"+dataset_name+"/feature_list.pkl", 'rb') as f:
f_list = pickle.load(f)
params = {"device": "cuda",
"alpha": 0.0,
"feature_size": "raw_data/"+dataset_name+"/feature_size.pkl",
"model_struct": 3,
"model_nas": "nas",
}
framework = Train_Arch(dim=8,
weight_decay=0.001,
data_size= -1,
device="cuda",
epoch=50,
params=params)
model_path = "raw_data/data_nas/rand_0.15_3/model/fm_nas.pt"
framework.load_model(model_path)
framework.model.genotype()
# print(framework.model.named_parameters)
# for i in range(len(framework.model._arch_parameters["binary"])):
# framework.model._arch_parameters["binary"][i, np.random.randint(0,5)] = 1
arch = [framework.model._arch_parameters['binary'][i, :].argmax().item()
for i in range(len(framework.model._arch_parameters['binary']))]
print(arch)
framework.model.genotype()
f_len = len(f_list[0])
leng = len(f_list[0])+len(c_list[0])
num = 0
for ind in range(len(f_list)):
num += len(item_candi[ind])
candidates = np.zeros((num, leng),dtype=np.float32)
cnt = 0
for ind in range(len(f_list)):
t = np.zeros(leng)
t[0:f_len] = np.array(f_list[ind], dtype=np.float32)
# print(item_candi[ind])
for c_feature in item_candi[ind]:
t[f_len:] = c_list[c_feature]
candidates[cnt] = t
cnt += 1
# print(candidates.shape)
rank = framework.result(candidates)
# print(rank)
# for name,par in framework.model.named_parameters():
# print(name,par)
ctr_list = []
item_ctr = {}
cnt = 0
for ind in range(len(f_list)):
item_ctr[ind] = []
for c_feature in item_candi[ind]:
item_ctr[ind].append(rank[cnt])
ctr_list.append(rank[cnt])
cnt += 1
ctr_list = sorted(ctr_list)
print('low: ',ctr_list[:10])
print('high: ',ctr_list[-10:])
radio = 0
for ind in range(len(f_list)):
# print(item_ctr[ind])
c = sorted(item_ctr[ind])
a = c[-1]/c[int(len(c)/2)] - 1
radio += a
radio /= len(f_list)
print(radio)
k = 2
cnt = 0
res = []
for name, parameter in framework.model.named_parameters():
parameter[:] = parameter * 1.3
cnt += 1
res.append(parameter)
rank = framework.result(candidates)
ctr_list = []
item_ctr = {}
cnt = 0
for ind in range(len(f_list)):
item_ctr[ind] = []
for c_feature in item_candi[ind]:
item_ctr[ind].append(rank[cnt])
ctr_list.append(rank[cnt])
cnt += 1
ctr_list = sorted(ctr_list)
print('low: ',ctr_list[0:10])
print('high: ',ctr_list[-10:])
radio = 0
for ind in range(len(f_list)):
c = sorted(item_ctr[ind])
a = c[-1]/c[int(len(c)/2)] - 1
radio += a
radio /= len(f_list)
print(radio)
if __name__ == "__main__":
gen_simulation_ctr() | 3,692 | 30.29661 | 88 | py |
AutoCO | AutoCO-main/exp_simulate/fm_nas/ctr.py | import numpy as np
import pandas as pd
import torch.utils.data
import itertools
import tqdm
import time
def get_index(dataset, dense_list=None):
emb_index = []
cnt = 0
dim = 0
for i in range(len(dataset)):
emb_index.append(dict())
if i in dense_list:
dim += 1
continue
ind = dataset[i].value_counts().index
size = len(ind)
for key in ind:
emb_index[i][key] = cnt
cnt += 1
dim += 1
return emb_index, dim
def get_field_info(dataset, dense=[0,0]):
feature_size = []
for i in range(dense[1]+dense[0]):
if i < dense[0]:
feature_size.append(1)
continue
ind = dataset[i].value_counts().index
print(ind)
size = len(ind)
feature_size.append(size)
return feature_size
class DirectDataset(torch.utils.data.Dataset):
def __init__(self, dataset_r):
dataset = np.array(dataset_r)
print(len(dataset))
self.items = dataset[:, 0:-1].astype(np.float32)
self.n = len(self.items[0])
self.targets = dataset[:, -1].astype(np.float32)
def __len__(self):
return self.targets.shape[0]
def __getitem__(self, index):
return self.items[index], self.targets[index]
class TfsDataset(torch.utils.data.Dataset):
def __init__(self, dataset_path, num=-1):
df = pd.read_pickle(dataset_path)
data = df.to_numpy()
self.items = data[:num, 0:-1].astype(np.float32)
(a, b) = np.shape(self.items)
print(a, b)
self.n = b
print(data[:,-1])
self.targets = self.__preprocess_target(data[:num, -1].astype(np.float32)).astype(np.float32)
# self.field_dims = np.max(self.items, axis50=0) + 1
# self.field_dims = np.array([2]*b)
self.user_field_idx = np.array((0,), dtype=np.float32)
self.item_field_idx = np.array((1,), dtype=np.float32)
def __len__(self):
return self.targets.shape[0]
def __getitem__(self, index):
return self.items[index], self.targets[index]
def __preprocess_target(self, target):
target[target <= 0] = 0
target[target > 0] = 1
return target
class DenseDataset(torch.utils.data.Dataset):
def __init__(self, dataset_path, num=-1, dense=[10,6,0], flag=0):
print(dataset_path)
df = pd.read_pickle(dataset_path)
data = df.to_numpy()
# np.random.shuffle(data)
if flag == 0:
self.items = data[:num, 0:-1].astype(np.float32)
self.targets = data[:num, -1].astype(np.float32)
else:
self.items = data[-1-num:-1, 0:-1].astype(np.float32)
self.targets = data[-1-num:-1, -1].astype(np.float32)
(a, b) = np.shape(self.items)
print(a, b)
print(data[:,-1])
# self.embedding_ind, dim = get_index(self.items, [0])
# self.feature_size = get_field_info(df, dense)
# get_field_info(df, dense)
# self.feature_size.append(dense[2])
# self.feature_size[4] = 3952
# import pickle
# with open(path, 'wb') as d:
# pickle.dump(self.feature_size, d)
# print(self.feature_size)
# self.n = dim
def __len__(self):
return self.targets.shape[0]
def __getitem__(self, index):
return self.items[index], self.targets[index]
if __name__=="__main__":
ds = DenseDataset('raw_data/data_movie_log/log.pkl')
| 3,515 | 29.310345 | 101 | py |
AutoCO | AutoCO-main/exp_simulate/fm_nas/train_arch.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import argparse
from model import NAS, NAS_TS, SNAS, DSNAS, SNAS_TS, DSNAS_TS
from ctr import TfsDataset, DirectDataset, DenseDataset
from torch.utils.data import DataLoader
import time
from utils import DataPrefetcher
from sklearn.metrics import roc_auc_score, log_loss
import pickle
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def get_dataset(path, name, num=-1, flag=0):
if name == 'tfs':
return TfsDataset(path, num)
if name == 'direct':
return DirectDataset(path)
if name == 'embedded':
return DenseDataset(path, num, flag=flag)
class Train_Arch():
def __init__(self,
dim=20,
weight_decay=0.001,
data_size = -1,
epoch=30,
train_scale=0.4,
valid_scale=0.4,
learning_rate=0.001,
batch_size=1024,
device='cuda',
params=None):
self.device = torch.device(device)
self.dim = dim
self.epoch = epoch
self.batch_size = batch_size
self.learning_rate = learning_rate
self.weight_decay = weight_decay
self.params = params
self.model_nas = None
self.data_size = data_size
self.train_scale = train_scale
self.valid_scale = valid_scale
with open(params['feature_size'], 'rb') as f:
self.feature_size = pickle.load(f)
print(self.feature_size)
self.clock = [0 for _ in range(10)]
self.run_first = 0
self.last_arch = None
self.model_path = self.params["model_path"]
def set_dataloader(self, dataset=None, dataset_path=None, data_size=-1,train_scale=-1, valid_scale=-1, flag=0):
if train_scale == -1:
data_size = self.data_size
train_scale = self.train_scale
valid_scale = self.valid_scale
self.log_data = dataset
self.data_path = dataset_path
if dataset == None:
self.dataset = get_dataset(dataset_path, 'embedded', data_size, flag)
else:
self.dataset = get_dataset(dataset, 'direct')
train_length = int(len(self.dataset) * train_scale)
valid_length = int(len(self.dataset) * valid_scale)
test_length = len(self.dataset) - train_length - valid_length
train_dataset, temp_dataset = torch.utils.data.random_split(
self.dataset, (train_length, len(self.dataset) - train_length))
valid_dataset, test_dataset = torch.utils.data.random_split(
temp_dataset, (valid_length, test_length))
self.train_data_loader = DataLoader(train_dataset, batch_size=self.batch_size, num_workers=32)
# valid_size = int(len(valid_dataset)/len(train_data_loader))+1
self.valid_data_loader = DataLoader(valid_dataset, batch_size=self.batch_size, num_workers=32)
self.test_data_loader = DataLoader(test_dataset, batch_size=self.batch_size, num_workers=32)
return self.train_data_loader, self.valid_data_loader, self.test_data_loader
def get_model(self, model_nas, model_struct):
if model_struct not in [0,1,2,3]:
print("no model struct %s in model nas %s class!!!", model_struct, model_nas)
exit()
if model_nas == "nas":
return NAS(self.feature_size, self.dim, self.params, m_type=model_struct)
elif model_nas == "snas":
return SNAS(self.feature_size, self.dim, self.params, m_type=model_struct)
elif model_nas == "dsnas":
return DSNAS(self.feature_size, self.dim, self.params, m_type=model_struct)
elif model_nas == "nas_ts":
return NAS_TS(self.feature_size, self.dim, self.params, m_type=model_struct)
elif model_nas == "snas_ts":
return SNAS_TS(self.feature_size, self.dim, self.params, m_type=model_struct)
elif model_nas == "dsnas_ts":
return DSNAS_TS(self.feature_size, self.dim, self.params, m_type=model_struct)
else:
print("no model named %s in nas train class!!!", model_nas)
exit()
def initial_model(self, model_nas, model_struct, optimizer='adam'):
self.model_nas = model_nas
self.model = self.get_model(model_nas, model_struct).to(self.device)
# print(1)
self.optimizer = torch.optim.Adam(params=self.model.parameters(),
lr=self.learning_rate,
weight_decay=self.weight_decay)
# self.optimizer = torch.optim.Adagrad(params=self.model.parameters(),lr=self.learning_rate, weight_decay=self.weight_decay)
self.arch_optimizer = torch.optim.Adam(params=self.model.arch_parameters(),
lr=self.learning_rate,
weight_decay=self.weight_decay)
if self.params["simulate_type"]=="click":
self.criterion = torch.nn.BCELoss() #reduction="sum"
else:
# self.criterion = torch.nn.MSELoss()
self.criterion = F.binary_cross_entropy
def train_arch(self, train_type=0):
self.model.train()
losses = []
prefetcher = DataPrefetcher(self.train_data_loader, self.device)
total_y =[0,0]
clock = [0 for _ in range(10)]
clock[0] -= time.time()
step = 0
while 1:
t=1
clock[1] -= time.time()
train_data = prefetcher.next()
temperature = 2.5 * np.exp(-0.036 * step)
if train_data == None:
clock[1] += time.time()
break
(fields, target) = train_data
fields, target = fields.to(self.device), target.to(self.device)
clock[1] += time.time()
if train_type == 0:
if "dsnas" in self.model_nas:
others = self.optimizer
elif "snas" in self.model_nas:
others = temperature
else:
others = 0
if "_ts" in self.model_nas:
loss = self.model.step_ts(fields, target, self.criterion, self.arch_optimizer, others)
else:
loss = self.model.step(fields, target, self.criterion, self.arch_optimizer, others)
losses.append(loss.cpu().detach().item())
else:
self.optimizer.zero_grad()
self.arch_optimizer.zero_grad()
if self.model_nas== "snas":
self.model.binarize(temperature)
y = self.model(fields, 1)
loss = self.criterion(y, target.float())
elif "_ts" in self.model_nas:
self.model.binarize()
if train_type == 1:
k, y = self.model.forward_ts(fields, 1)
elif train_type == 2:
k, y = self.model.forward_ts(fields, 0)
# loss_t = F.binary_cross_entropy(input=y,target=target,reduction='sum')
loss_t = self.criterion(y, target.float())*self.batch_size
alpha = 1/len(self.train_data_loader)
loss = alpha * k + loss_t
total_y[0]+=k
total_y[1]+=loss_t
else:
self.model.binarize()
if train_type == 1:
y = self.model(fields, 1)
elif train_type == 2:
y = self.model(fields, 0)
loss = self.criterion(y, target.float())
loss.backward()
self.model.recover()
self.optimizer.step()
losses.append(loss.cpu().detach().item())
clock[0] += time.time()
print('cost:', [round(c, 5) for c in clock[0:7]])
# print("model cost:", [round(c, 5) for c in self.model.cost])
self.model.cost = [0 for _ in range(6)]
# g, gp = self.model.genotype()
# print('genotype: %s' % g)
# print('genotype_p: %s' % gp)
if "nas_ts" in self.model_nas:
print('kl distance ', total_y[0].item() / len(self.train_data_loader),'bce loss ', total_y[1].item() / len(self.train_data_loader))
print('---- loss:', np.mean(losses))
return np.mean(losses)
def run(self):
print("run!!!!!!")
if self.params["trick"] == 0:
arch = self.params["arch"]
for j in range(len(self.model._arch_parameters["binary"])):
self.model._arch_parameters["binary"].data[j, arch[j]] = 1
best_loss = 1000000
stop_cnt = 0
self.model.genotype()
for epoch_i in range(self.epoch):
print(epoch_i, end=' ')
loss = self.train_arch(train_type=2)
if len(self.test_data_loader)>10:
self.test(self.test_data_loader)
if best_loss - 0.000001 < loss:
stop_cnt += 1
if stop_cnt > 6:
break
else:
best_loss = loss
self.save_model(self.model_path)
stop_cnt = 0
self.load_model(self.model_path)
if len(self.test_data_loader)>10:
self.test(self.test_data_loader)
elif self.params["trick"] == 1:
for epoch_i in range(self.epoch):
print(epoch_i, end=' ')
self.train_arch()
self.model.genotype()
self.train_arch(train_type=2)
if len(self.test_data_loader)> 10:
self.test(self.test_data_loader)
best_loss = 1000000
stop_cnt = 0
for i in range(100):
print(i, end=' ')
loss = self.train_arch(train_type=2)
if best_loss - 0.000001 < loss:
stop_cnt += 1
if stop_cnt > 6:
break
else:
best_loss = loss
self.save_model(self.model_path)
stop_cnt = 0
self.load_model(self.model_path)
def test(self, data_loader):
self.model.eval()
self.model.binarize()
targets, predicts, id_list = list(), list(), list()
loss = 0
with torch.no_grad():
for fields, target in data_loader:
fields, target = fields.to(self.device), target.to(self.device)
y = self.model(fields, 0)
loss += F.binary_cross_entropy(input=y,target=target,reduction='mean')
targets.extend(target.tolist())
predicts.extend(y.tolist())
self.model.recover()
auc = roc_auc_score(targets, predicts)
# print("loss: ", loss.item()/len(data_loader))
print("auc: ", auc)#, " g_auc:", cal_group_auc(targets, predicts, id_list)
# print("bce:", torch.nn.functional.binary_cross_entropy(input=predicts,target=target,reduction='mean'))
return auc
def result(self, candidates):
self.model.eval()
self.model.binarize()
candidates = torch.from_numpy(candidates).to(self.device)
if "_ts" in self.model_nas:
_, ranking = self.model.forward_ts(candidates, 0, cal_kl=0)
else:
ranking = self.model(candidates, 0)
self.model.recover()
return ranking.detach().cpu().numpy()
def save_model(self, path):
torch.save(self.model, path)
def load_model(self, path):
self.model_nas = self.params["model_nas"]
self.model = torch.load(path)
def print_w(self):
self.model.print()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_path', type=str, default='raw_data/dataset/dataset_test.pkl')
parser.add_argument('--feature_size', type=str, default='raw_data/data_online/feature_size.pkl')
parser.add_argument('--dim', type=int, default=8)
parser.add_argument('--decay', type=float, default=0.001)
parser.add_argument('--learning', type=float, default=0.001)
parser.add_argument("--device", type=str, default="cuda")
parser.add_argument("--epoch", type=int, default=50)
parser.add_argument("--dataset", type=int, default=-1)
parser.add_argument("--times",type=int,default=1)
parser.add_argument("--optimizer",type= str, default="adam")
parser.add_argument('--model_struct', type=int, default=0)
parser.add_argument('--model_nas', type=str, default="fm")
args = parser.parse_args()
params = {"device": args.device,
"alpha": 0.0,
"dense": [10,6,0],
'feature_size': args.feature_size,
"operator": "multiply",
'trick': 2,
"model_struct": args.model_struct,
"model_nas": args.model_nas,
"first_order": 1,
"calcu_dense": 0,
"f_len": 12,
"early_fix_arch": False,
"simulate_type":"click",
'arch': [4,4,1,4,4,4,2,2,4,4,1,1,1,1,1]} # [1,0,2,0,3,0,3,1,2,2,0,3,0,3,1,0,1,2,3,0,1,2]
# [1,0,2,0,3,0,3,1,2,2,0,3,0,3,1][4,4,1,4,4,4,2,2,4,4,1,1,1,1,1]
framework = Train_Arch(dim=int(args.dim),
weight_decay= float(args.decay),
data_size=int(args.dataset),
device=args.device,
train_scale=0.8,
valid_scale=0,
epoch=int(args.epoch),
params=params)
result = []
for i in range(args.times):
search_start = time.time()
framework.set_dataloader(dataset_path=args.dataset_path)
framework.initial_model(args.model_nas, args.model_struct, "adam")
# framework.params["trick"] = 0
# framework.epoch = int(args.epoch)
# framework.train_scale=0.4
# framework.valid_scale=0.4
# framework.set_dataloader(dataset_path=args.dataset_path)
# # for j in range(len(framework.model._arch_parameters["binary"])):
# # framework.model._arch_parameters["binary"].data[j, np.random.randint(0,3)] = 1
framework.params["arch"] = [1 for i in range(framework.model.multi_len)]
framework.run()
framework.model.genotype()
# framework.test(framework.train_data_loader)
# model_path = "../dataset/data_nas/raw_data/data_nas/rand_0.15_3/model/fm_nas.pt"
# framework.save_model(model_path)
print("---------------------")
print("cost: ", time.time()-search_start)
print("---------------------")
# # framework.load_model(model_path)
# arch = [framework.model._arch_parameters['binary'][i, :].argmax().item()
# for i in range(len(framework.model._arch_parameters['binary']))]
# # framework.params["arch"] = arch
# framework.params["trick"] = 2
# framework.epoch = 30
# framework.train_scale=0.8
# framework.valid_scale=0
# framework.set_dataloader(dataset_path=args.dataset_path)
# framework.run()
result.append(framework.model.genotype())
result.append(framework.test(framework.train_data_loader))
print(result)
| 15,572 | 42.744382 | 143 | py |
AutoCO | AutoCO-main/exp_simulate/fm_nas/train.py | import os
import time
import torch
import pickle
import numpy as np
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score, log_loss
from torch.utils.data import DataLoader
from ctr import TfsDataset, DirectDataset, DenseDataset
from model import OFM, OFM_TS
from utils import cal_group_auc, FTRL
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def get_dataset(path, name, num=-1, flag=0):
if name == 'tfs':
return TfsDataset(path, num)
if name == 'direct':
return DirectDataset(path)
if name == 'embedded':
return DenseDataset(path, num, flag=flag)
class Core(object):
def __init__(self,
dim=20,
weight_decay=0.001,
data_size = -1,
epoch=30,
train_scale=0.4,
valid_scale=0.4,
learning_rate=0.001,
batch_size=1024,
device='cuda',
params=None):
self.device = torch.device(device)
self.dim = dim
self.epoch = epoch
self.batch_size = batch_size
self.learning_rate = learning_rate
self.weight_decay = weight_decay
self.params = params
self.model_nas = None
self.data_size = data_size
self.train_scale = train_scale
self.valid_scale = valid_scale
with open(params['feature_size'], 'rb') as f:
self.feature_size = pickle.load(f)
self.clock = [0 for _ in range(10)]
def set_dataloader(self, dataset=None, dataset_path=None, data_size=-1,train_scale=-1, valid_scale=-1, flag=0):
"split data into 3 part, train, test, valid"
if train_scale == -1:
data_size = self.data_size
train_scale = self.train_scale
valid_scale = self.valid_scale
self.log_data = dataset
self.data_path = dataset_path
if dataset == None:
self.dataset = get_dataset(dataset_path, 'embedded', data_size, flag)
else:
self.dataset = get_dataset(dataset, 'direct')
train_length = int(len(self.dataset) * train_scale)
valid_length = int(len(self.dataset) * valid_scale)
test_length = len(self.dataset) - train_length - valid_length
train_dataset, temp_dataset = torch.utils.data.random_split(
self.dataset, (train_length, len(self.dataset) - train_length))
valid_dataset, test_dataset = torch.utils.data.random_split(
temp_dataset, (valid_length, test_length))
self.train_data_loader = DataLoader(train_dataset, batch_size=self.batch_size, num_workers=32)
# valid_size = int(len(valid_dataset)/len(train_data_loader))+1
self.valid_data_loader = DataLoader(valid_dataset, batch_size=self.batch_size, num_workers=32)
self.test_data_loader = DataLoader(test_dataset, batch_size=self.batch_size, num_workers=32)
return self.train_data_loader, self.valid_data_loader, self.test_data_loader
def get_model(self, model_nas, model_struct):
if model_struct not in [0,2]:
print("no model struct %s in model nas %s class!!!", model_struct, model_nas)
exit()
if model_nas == "fm":
model = OFM(self.feature_size, self.dim, self.params, m_type=model_struct)
model.setotype(self.params["operator"])
return model
elif model_nas == "fm_ts":
model = OFM_TS(self.feature_size, self.dim, self.params, m_type=model_struct)
model.setotype(self.params["operator"])
return model
else:
print("no model named %s in train class!!!", model_nas)
exit()
def initial_model(self, model_nas, model_struct, optimizer='adam'):
self.model_nas = model_nas
self.model = self.get_model(model_nas, model_struct).to(self.device)
# print(1)
if self.params["simulate_type"]=="click":
self.criterion = torch.nn.BCELoss() #reduction="sum"
else:
self.criterion = F.binary_cross_entropy
# self.criterion = torch.nn.MSELoss()
if optimizer == 'adam':
self.optimizer = torch.optim.Adam(params=self.model.parameters(),
lr=self.learning_rate, weight_decay=self.weight_decay)
elif optimizer == 'ftrl':
self.optimizer = FTRL(params=self.model.parameters(),
alpha=1.0, beta=1.0, l1=0.001, l2=0.001)
def train(self, data_loader, flag=0):
self.model.train()
total_loss = 0
total_y =[0,0]
M = len(data_loader)
self.clock[0] -= time.time()
for i, (fields, target) in enumerate(data_loader):
self.clock[1] -= time.time()
fields, target = fields.to(self.device), target.to(self.device)
self.clock[1] += time.time()
self.clock[2] -= time.time()
if self.model_nas == "fm_ts":
k, y = self.model.forward_ts(fields, 0)
# loss_t = F.binary_cross_entropy(input=y,target=target,reduction='sum')
loss_t = self.criterion(y, target.float())*self.batch_size
alpha = 1/len(data_loader)
loss = alpha * k + loss_t
total_y[0]+=k
total_y[1]+=loss_t
else:
y = self.model(fields, 0)
loss = self.criterion(y, target.float())
self.clock[2] += time.time()
self.clock[3] -= time.time()
self.model.zero_grad()
loss.backward()
self.clock[3] += time.time()
self.clock[4] -= time.time()
self.optimizer.step()
self.clock[4] += time.time()
total_loss += loss.item()
self.clock[0] += time.time()
if self.model_nas == "fm_ts":
print('kl distance ', total_y[0].item() / len(data_loader),'bce loss ', total_y[1].item() / len(data_loader))
print('------ loss:', total_loss / len(data_loader))
print([round(c, 5) for c in self.clock[0:7]])
# print(self.model.cost)
self.model.cost = [0 for _ in range(6)]
self.clock = [0 for _ in range(10)]
return total_loss / len(data_loader)
def test(self, data_loader):
self.model.eval()
targets, predicts, id_list = list(), list(), list()
loss = 0
with torch.no_grad():
for fields, target in data_loader:
fields, target = fields.to(self.device), target.to(self.device)
if self.model_nas == "fm_ts":
_,y = self.model.forward_ts(fields, 0, cal_kl=1)
else:
y = self.model(fields, 0)
loss += torch.nn.functional.binary_cross_entropy(input=y,target=target,reduction='sum')
targets.extend(target.tolist())
predicts.extend(y.tolist())
for i in fields.tolist():
id_list.append(",".join([str(s) for s in i[:68]]))
# print("loss: ", loss.item()/len(data_loader))
auc = roc_auc_score(targets, predicts)
print("auc: ", auc)#, " g_auc:", cal_group_auc(targets, predicts, id_list)
# print("bce:", torch.nn.functional.binary_cross_entropy(input=predicts,target=target,reduction='mean'))
return auc
def run(self, flag=0):
epoch = self.epoch
start = time.time()
stop_cnt = 0
best_loss = 10000000
for i in range(epoch):
print(i, end=" ")
loss = self.train(self.train_data_loader)
if best_loss - 0.000001 < loss:
stop_cnt += 1
if stop_cnt > 6:
break
else:
best_loss = loss
stop_cnt = 0
print(len(self.test_data_loader))
if len(self.test_data_loader)> 10:
self.test(self.test_data_loader)
# if i%10==0 or epoch-i<4:
# self.print_w()
# if i%10 == 9:
# print('epoch:', i + 1)
print("cost time: ", time.time()-start)
def result(self, candidates):
self.model.eval()
candidates = torch.from_numpy(candidates).to(self.device)
if self.model_nas == "fm_ts":
_, ranking = self.model.forward_ts(candidates, 0, cal_kl=0)
else:
ranking = self.model(candidates, 0)
return ranking.detach().cpu().numpy()
def print_w(self):
self.model.print()
def save_model(self, path):
torch.save(self.model, path)
def load_model(self, path):
self.model = torch.load(path)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_path', type=str, default='raw_data/dataset/dataset_1400.pkl')
parser.add_argument('--feature_size', type=str, default='raw_data/data_1400w/feature_size_id.pkl')
parser.add_argument('--dim', type=int, default=8)
parser.add_argument('--decay', type=float, default=0.001)
parser.add_argument('--learning', type=float, default=0.001)
parser.add_argument("--device", type=str, default="cuda")
parser.add_argument("--epoch", type=int, default=15)
parser.add_argument("--dataset", type=int, default=-1)
parser.add_argument("--times",type=int,default=1)
parser.add_argument("--optimizer",type= str, default="adam")
parser.add_argument("--model", type=str, default="fm")
parser.add_argument("--oper", type=str, default="multiply")
args = parser.parse_args()
params = {"device": args.device,
"alpha": 0.0,
"dense": [0,5,18],
'feature_size': args.feature_size,
"operator": args.oper,
'trick': 2,
"first_order": 1,
"calcu_dense": 0,
'arch':[1,0,2,0,3,0,3,1,2,2,0,3,0,3,1] } # [1,0,2,0,3,0,3,1,2,2,0,3,0,3,1,0,1,2,3,0,1,2]
framework = Core(dim=int(args.dim),
weight_decay= float(args.decay),
data_size=int(args.dataset),
device=args.device,
split_param=0.8,
epoch=int(args.epoch),
params=params)
framework.set_dataloader(dataset_path=args.dataset_path)
for i in range(args.times):
search_start = time.time()
framework.initial_model(args.model)
# for j in range(len(framework.model._arch_parameters["binary"])):
# framework.model._arch_parameters["binary"].data[j, np.random.randint(0,3)] = 1
framework.run()
# framework.model.genotype()
framework.test(framework.train_data_loader)
print("---------------------")
print("cost: ", time.time()-search_start)
print("---------------------")
# framework.load_model(model_path)
framework.print_w()
# with open("../dataset/data_nas/raw_data/data_nas/model_param_0.15.pkl", 'rb') as f:
# param = torch.load(f)
# cnt = 0
# for name, parameter in framework.model.named_parameters():
# parameter[:] = param[cnt]
# cnt += 1
# framework.test(framework.train_data_loader)
| 11,347 | 39.820144 | 121 | py |
AutoCO | AutoCO-main/exp_public/mushroom/simulate/main.py | import os
import sys
import glob
import numpy as np
import torch
import logging
import argparse
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.utils
import torch.nn.functional as F
from torch.autograd import Variable
import time
import utils
from train import train
from vartional_model import DSNAS_v, NASP_v, MAX_v, PLUS_v, CONCAT_v, MIN_v, MULTIPLY_v, NASP
from baseline import FM, FM_v, FM_v2, Random, Egreedy, Thompson, LinUCB, LinEGreedy, LinThompson
os.environ['KMP_DUPLICATE_LIB_OK']='True'
parser = argparse.ArgumentParser(description="Search.")
parser.add_argument('--data', type=str, default='data', help='location of the data corpus')
parser.add_argument('--lr', type=float, default=5e-2, help='init learning rate')
parser.add_argument('--arch_lr', type=float, default=3e-4, help='learning rate for arch encoding')
parser.add_argument('--weight_decay', type=float, default=1e-5, help='weight decay')
parser.add_argument('--opt', type=str, default='Adagrad', help='choice of opt')
parser.add_argument('--batch_size', type=int, default=512, help='choose batch size')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--search_epochs', type=int, default=100, help='num of searching epochs')
parser.add_argument('--train_epochs', type=int, default=10000, help='num of training epochs')
parser.add_argument('--save', type=str, default='EXP')
parser.add_argument('--seed', type=int, default=1, help='random seed')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--train_portion', type=float, default=0.5, help='portion of training data')
parser.add_argument('--valid_portion', type=float, default=0.25, help='portion of validation data')
parser.add_argument('--dataset', type=str, default='ml-100k', help='dataset')
parser.add_argument('--mode', type=str, default='sif', help='choose how to search')
parser.add_argument('--embedding_dim', type=int, default=8, help='dimension of embedding')
parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')
parser.add_argument('--gen_max_child', action='store_true', default=False, help='generate child network by argmax(alpha)')
parser.add_argument('--gen_max_child_flag', action='store_true', default=False, help='flag of generating child network by argmax(alpha)')
parser.add_argument('--random_sample', action='store_true', default=False, help='true if sample randomly')
parser.add_argument('--early_fix_arch', action='store_true', default=False, help='bn affine flag')
parser.add_argument('--loc_mean', type=float, default=1, help='initial mean value to generate the location')
parser.add_argument('--loc_std', type=float, default=0.01, help='initial std to generate the location')
parser.add_argument('--momentum', type=float, default=0.9, help="momentum")
parser.add_argument('--ofm', action='store_true', default=False, help="different operation with different embedding")
parser.add_argument('--embedding_num', type=int, default=12, help="the size of embedding dictionary")
parser.add_argument('--multi_operation', action='store_true', default=False, help="use multi operation or not")
parser.add_argument('--epsion', type=float, default=0.0, help="epsion of egreedy")
parser.add_argument('--search_epoch', type=int, default=10, help="the epoch num for searching arch")
parser.add_argument('--trans', action='store_true', default=False, help="trans the embedding or not!")
parser.add_argument('--first_order', action='store_true', default=False, help="use first order or not!")
args = parser.parse_args()
print("args ofm:", args.ofm)
print("embedding_num:", args.embedding_num)
save_name = 'experiments/{}/search-{}-{}-{}-{}-{}-{}-{}-{}'.format(args.dataset, time.strftime("%Y%m%d-%H%M%S"),
args.mode, args.save, args.embedding_dim, args.opt, args.lr, args.arch_lr, args.seed)
if args.unrolled:
save_name += '-unrolled'
save_name += '-' + str(np.random.randint(10000))
utils.create_exp_dir(save_name, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(save_name, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
np.random.seed(args.seed)
# torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
# torch.cuda.manual_seed(args.seed)
# logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
data_start = time.time()
if args.mode == "LinUCB" or args.mode == "LinEGreedy" or args.mode == "LinThompson":
train_queue = utils.get_data_queue(args, True)
else:
train_queue = utils.get_data_queue(args, False)
logging.info('prepare data finish! [%f]' % (time.time() - data_start))
if args.mode == "DSNAS_v":
model = DSNAS_v(args.embedding_dim, args.weight_decay, args)
g, gp = model.genotype()
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
optimizer = torch.optim.Adagrad([param for name, param in model.named_parameters() if name != 'log_alpha'],
args.lr)
arch_optimizer = torch.optim.Adam(
[param for name, param in model.named_parameters() if name == 'log_alpha'],
lr=args.arch_lr,
betas=(0.5, 0.999),
weight_decay=0.0
)
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
elif args.mode == "NASP_v":
model = NASP_v(args.embedding_dim, args.weight_decay, args)
g, gp = model.genotype()
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
optimizer = torch.optim.Adagrad([param for name, param in model.named_parameters() if name != 'log_alpha'],
args.lr)
arch_optimizer = torch.optim.Adam(
[param for name, param in model.named_parameters() if name == 'log_alpha'],
lr=args.arch_lr,
betas=(0.5, 0.999),
weight_decay=0.0
)
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
elif args.mode == "NASP":
model = NASP(args.embedding_dim, args.weight_decay, args)
g, gp = model.genotype()
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
optimizer = torch.optim.Adagrad([param for name, param in model.named_parameters() if name != 'log_alpha'],
args.lr)
arch_optimizer = torch.optim.Adam(
[param for name, param in model.named_parameters() if name == 'log_alpha'],
lr=args.arch_lr,
betas=(0.5, 0.999),
weight_decay=0.0
)
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
elif args.mode == "Random":
model = Random(args.embedding_dim, args.weight_decay, args)
g, gp = model.genotype()
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
optimizer = None
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
elif args.mode == "Egreedy":
model = Egreedy(args.embedding_dim, args.weight_decay, args)
g, gp = model.genotype()
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
optimizer = None
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
elif args.mode == "Thompson":
model = Thompson(args.embedding_dim, args.weight_decay, args)
g, gp = model.genotype()
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
optimizer = None
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
elif args.mode == "LinUCB":
model = LinUCB(args.embedding_dim, args.weight_decay, args)
g, gp = model.genotype()
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
optimizer = None
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
elif args.mode == "LinThompson":
model = LinThompson(args.embedding_dim, args.weight_decay, args)
g, gp = model.genotype()
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
optimizer = None
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
elif args.mode == "LinEGreedy":
model = LinEGreedy(args.embedding_dim, args.weight_decay, args)
g, gp = model.genotype()
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
optimizer = None
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
elif args.mode == "FM":
model = FM(args.embedding_dim, args.weight_decay, args)
g, gp = model.genotype()
logging.info('genotype: %s' % g)
logging.info('genotype_p: %s' % gp)
optimizer = torch.optim.Adagrad([param for name, param in model.named_parameters() if name != 'log_alpha'], args.lr)
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
elif args.mode == "FM_v":
model = FM_v(args.embedding_dim, args.weight_decay, args)
optimizer = torch.optim.Adagrad([param for name, param in model.named_parameters() if name != 'log_alpha'],
args.lr)
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
elif args.mode == "MULTIPLY_v":
model = MULTIPLY_v(args.embedding_dim, args.weight_decay, args)
optimizer = torch.optim.Adagrad([param for name, param in model.named_parameters() if name != 'log_alpha'],
args.lr)
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
elif args.mode == "MAX_v":
model = MAX_v(args.embedding_dim, args.weight_decay, args)
optimizer = torch.optim.Adagrad([param for name, param in model.named_parameters() if name != 'log_alpha'],
args.lr)
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
elif args.mode == "MIN_v":
model = MIN_v(args.embedding_dim, args.weight_decay, args)
optimizer = torch.optim.Adagrad([param for name, param in model.named_parameters() if name != 'log_alpha'],
args.lr)
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
elif args.mode == "PLUS_v":
model = PLUS_v(args.embedding_dim, args.weight_decay, args)
optimizer = torch.optim.Adagrad([param for name, param in model.named_parameters() if name != 'log_alpha'],
args.lr)
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
elif args.mode == "CONCAT_v":
model = CONCAT_v(args.embedding_dim, args.weight_decay, args)
optimizer = torch.optim.Adagrad([param for name, param in model.named_parameters() if name != 'log_alpha'],
args.lr)
arch_optimizer = None
for search_epoch in range(1):
g, gp, loss, rewards = train(train_queue, model, optimizer, arch_optimizer, logging)
logging.info("total reward: %s" % rewards)
else:
raise ValueError("bad choice!")
if __name__ == '__main__':
main()
| 13,976 | 50.386029 | 137 | py |
AutoCO | AutoCO-main/exp_public/mushroom/simulate/utils.py | import numpy as np
import pandas as pd
import os
import os.path
import sys
import shutil
import torch
import torch.nn as nn
import torch.utils
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction import DictVectorizer
from sklearn.utils import shuffle
from torch.utils.data import Dataset, DataLoader
from models import PRIMITIVES_BINARY
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.makedirs(path)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
def sample_arch():
arch = {}
arch['mlp'] = {}
arch['mlp']['p'] = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
arch['mlp']['q'] = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
arch['binary'] = PRIMITIVES_BINARY[np.random.randint(len(PRIMITIVES_BINARY))]
return arch
class Mushroom(Dataset):
def __init__(self, root_dir, dummpy):
self.data = pd.read_csv(root_dir)
self.dummpy = dummpy
def __len__(self):
return self.data.shape[0]
def one_hot(self, df, cols):
res = []
for col in cols:
dummies = pd.get_dummies(df[col], prefix=col, drop_first=False)
res.append(dummies)
df = pd.concat(res, axis=1)
return df
def __getitem__(self, index):
sample = {}
if not self.dummpy:
sample["cap-shape"] = self.data.iloc[index]["cap-shape"]
sample["cap-surface"] = self.data.iloc[index]["cap-surface"]
sample["cap-color"] = self.data.iloc[index]["cap-color"]
sample["bruises"] = self.data.iloc[index]["bruises"]
sample["odor"] = self.data.iloc[index]["odor"]
sample["gill-attachment"] = self.data.iloc[index]["gill-attachment"]
sample["gill-spacing"] = self.data.iloc[index]["gill-spacing"]
sample["gill-size"] = self.data.iloc[index]["gill-size"]
sample["gill-color"] = self.data.iloc[index]["gill-color"]
sample["stalk-shape"] = self.data.iloc[index]["stalk-shape"]
sample["stalk-root"] = self.data.iloc[index]["stalk-root"]
sample["stalk-surface-above-ring"] = self.data.iloc[index]["stalk-surface-above-ring"]
sample["stalk-surface-below-ring"] = self.data.iloc[index]["stalk-surface-below-ring"]
sample["stalk-color-above-ring"] = self.data.iloc[index]["stalk-color-above-ring"]
sample["stalk-color-below-ring"] = self.data.iloc[index]["stalk-color-below-ring"]
sample["veil-type"] = self.data.iloc[index]["veil-type"]
sample["veil-color"] = self.data.iloc[index]["veil-color"]
sample["ring-number"] = self.data.iloc[index]["ring-number"]
sample["ring-type"] = self.data.iloc[index]["ring-type"]
sample["spore-print-color"] = self.data.iloc[index]["spore-print-color"]
sample["population"] = self.data.iloc[index]["population"]
sample["habitat"] = self.data.iloc[index]["habitat"]
eat_reward = self.data.iloc[index]["eat_reward"]
noteat_reward = self.data.iloc[index]["noteat_reward"]
sample["label"] = torch.Tensor([noteat_reward, eat_reward])
#sample["label"] = torch.Tensor([eat_reward, noteat_reward])
else:
cols = list(self.data.columns)
_ = cols.remove("eat_reward")
_ = cols.remove("noteat_reward")
data2 = self.one_hot(self.data, cols)
sample["feature"] = torch.Tensor(data2.iloc[index][:])
eat_reward = self.data.iloc[index]["eat_reward"]
noteat_reward = self.data.iloc[index]["noteat_reward"]
sample["label"] = torch.Tensor([eat_reward, noteat_reward])
return sample
def get_data_queue(args, dummpy):
print(args.dataset)
if args.dataset == 'mushroom':
train_data = "../data/data_sample.csv"
train_dataset = Mushroom(train_data, dummpy)
train_queue = DataLoader(train_dataset, batch_size=args.batch_size,pin_memory=True)
return train_queue
else:
return None
class Mushroom2(Dataset):
def __init__(self, contexts, pos_weights):
self.data = contexts
self.pos_weights = pos_weights
def __len__(self):
return self.data["label"].shape[0]
def __getitem__(self, index):
sample = {}
sample["cap-shape"] = self.data["cap-shape"][index]
sample["cap-surface"] = self.data["cap-surface"][index]
sample["cap-color"] = self.data["cap-color"][index]
sample["bruises"] = self.data["bruises"][index]
sample["odor"] = self.data["odor"][index]
sample["gill-attachment"] = self.data["gill-attachment"][index]
sample["gill-spacing"] = self.data["gill-spacing"][index]
sample["gill-size"] = self.data["gill-size"][index]
sample["gill-color"] = self.data["gill-color"][index]
sample["stalk-shape"] = self.data["stalk-shape"][index]
sample["stalk-root"] = self.data["stalk-root"][index]
sample["stalk-surface-above-ring"] = self.data["stalk-surface-above-ring"][index]
sample["stalk-surface-below-ring"] = self.data["stalk-surface-below-ring"][index]
sample["stalk-color-above-ring"] = self.data["stalk-color-above-ring"][index]
sample["stalk-color-below-ring"] = self.data["stalk-color-below-ring"][index]
sample["veil-type"] = self.data["veil-type"][index]
sample["veil-color"] = self.data["veil-color"][index]
sample["ring-number"] = self.data["ring-number"][index]
sample["ring-type"] = self.data["ring-type"][index]
sample["spore-print-color"] = self.data["spore-print-color"][index]
sample["population"] = self.data["population"][index]
sample["habitat"] = self.data["habitat"][index]
sample["label"] = self.data["label"][index]
sample["pos_weights"] = self.pos_weights[index]
return sample
def get_data_queue_bandit(args, contexts, pos_weights):
train_dataset = Mushroom2(contexts, pos_weights)
train_queue = DataLoader(train_dataset, batch_size=args.batch_size,pin_memory=True)
return train_queue
| 6,504 | 43.554795 | 98 | py |
AutoCO | AutoCO-main/exp_public/mushroom/simulate/models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
import utils
import time
PRIMITIVES_BINARY = ['plus', 'multiply', 'max', 'min', 'concat']
PRIMITIVES_NAS = [0, 2, 4, 8, 16]
SPACE_NAS = pow(len(PRIMITIVES_NAS), 5)
OPS = {
'plus': lambda p, q: p + q,
'multiply': lambda p, q: p * q,
'max': lambda p, q: torch.max(torch.stack((p, q)), dim=0)[0],
'min': lambda p, q: torch.min(torch.stack((p, q)), dim=0)[0],
'concat': lambda p, q: torch.cat([p, q], dim=-1),
'norm_0': lambda p: torch.ones_like(p),
'norm_0.5': lambda p: torch.sqrt(torch.abs(p) + 1e-7),
'norm_1': lambda p: torch.abs(p),
'norm_2': lambda p: p ** 2,
'I': lambda p: torch.ones_like(p),
'-I': lambda p: -torch.ones_like(p),
'sign': lambda p: torch.sign(p),
}
def constrain(p):
c = torch.norm(p, p=2, dim=1, keepdim=True)
c[c < 1] = 1.0
p.data.div_(c)
def MixedBinary(embedding_p, embedding_q, weights, FC):
return torch.sum(torch.stack([w * fc(OPS[primitive](embedding_p, embedding_q)) \
for w,primitive,fc in zip(weights,PRIMITIVES_BINARY,FC)]), 0)
class Virtue(nn.Module):
def __init__(self, embedding_dim, reg, embedding_num=12, ofm=False):
super(Virtue, self).__init__()
self.embedding_dim = embedding_dim
self.reg = reg
self.embedding_all = nn.ModuleDict({})
self.columns = ["cap-shape", "cap-surface", "cap-color", "bruises", "odor", "gill-attachment",
"gill-spacing", "gill-size", "gill-color", "stalk-shape", "stalk-root", "stalk-surface-above-ring",
"stalk-surface-below-ring", "stalk-color-above-ring", "stalk-color-below-ring", "veil-type",
"veil-color", "ring-number", "ring-type", "spore-print-color", "population", "habitat"]
if not ofm:
for name in self.columns:
self.embedding_all[name] = nn.Embedding(embedding_num, embedding_dim)
else:
for name in self.columns:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
temp.append(nn.Embedding(embedding_num, embedding_dim))
self.embedding_all[name] = temp
def compute_loss(self, inferences, labels, regs):
labels = torch.reshape(labels, [-1,1])
loss = F.binary_cross_entropy_with_logits(inferences, labels.float())
#loss = F.mse_loss(inferences, labels)
return loss + regs
class Network(Virtue):
def __init__(self, embedding_dim, arch, reg):
super(Network, self).__init__(embedding_dim, reg)
self.arch = arch
self.mlp_p = arch['mlp']['p']
self.mlp_q = arch['mlp']['q']
self.FC = nn.ModuleDict({})
for name1 in self.columns:
for name2 in self.columns:
if arch['binary'] == 'concat':
self.FC[name1+ ":" + name2] = nn.Linear(2*embedding_dim, 1, bias=False)
else:
self.FC[name1 + ":" + name2] = nn.Linear(embedding_dim, 1, bias=False)
def forward(self, features):
for value in self.FC.values():
constrain(next(value.parameters()))
inferences = 0
regs = 0
for name1 in self.columns:
for name2 in self.columns:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
regs += self.reg * (torch.norm(name1_embedding) + torch.norm(name2_embedding))
name1_embedding_trans = self.mlp_p(name1_embedding.view(-1,1)).view(name1_embedding.size())
name2_embedding_trans = self.mlp_p(name2_embedding.view(-1, 1)).view(name2_embedding.size())
inferences += self.FC[name1 + ":" + name2](OPS[self.arch['binary']](name1_embedding_trans, name2_embedding_trans))
return inferences, regs
class Network_Search(Virtue):
def __init__(self, embedding_dim, reg):
super(Network_Search, self).__init__(embedding_dim, reg)
self.FC = nn.ModuleDict({})
for name1 in self.columns:
for name2 in self.columns:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
if primitive == 'concat':
temp.append(nn.Linear(2*embedding_dim, 2, bias=False))
else:
temp.append(nn.Linear(embedding_dim, 2, bias=False))
self.FC[name1 + ":" + name2] = temp
self._initialize_alphas()
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self._arch_parameters = {}
self._arch_parameters['mlp'] = {}
self._arch_parameters['mlp']['p'] = self.mlp_p
self._arch_parameters['mlp']['q'] = self.mlp_q
self._arch_parameters['binary'] = Variable(torch.ones(len(PRIMITIVES_BINARY),
dtype=torch.float, device='cpu') / 2, requires_grad=True)
#self._arch_parameters['binary'] = Variable(torch.Tensor([1.0,1.0,1.0,1.0,1.0]), requires_grad=True)
self._arch_parameters['binary'].data.add_(
torch.randn_like(self._arch_parameters['binary'])*1e-3)
def arch_parameters(self):
return list(self._arch_parameters['mlp']['p'].parameters()) + \
list(self._arch_parameters['mlp']['q'].parameters()) + [self._arch_parameters['binary']]
def new(self):
model_new = Network_Search(self.num_users, self.num_items, self.embedding_dim, self.reg)
for x, y in zip(model_new.arch_parameters(), self.arch_parameters()):
x.data = y.data.clone()
return model_new
def clip(self):
m = nn.Hardtanh(0, 1)
self._arch_parameters['binary'].data = m(self._arch_parameters['binary'])
def binarize(self):
self._cache = self._arch_parameters['binary'].clone()
max_index = self._arch_parameters['binary'].argmax().item()
for i in range(self._arch_parameters['binary'].size(0)):
if i == max_index:
self._arch_parameters['binary'].data[i] = 1.0
else:
self._arch_parameters['binary'].data[i] = 0.0
def recover(self):
self._arch_parameters['binary'].data = self._cache
del self._cache
def forward(self, features):
# for i in range(len(PRIMITIVES_BINARY)):
# constrain(next(self._FC[i].parameters()))
inferences = 0
regs = 0
for name1 in self.columns:
for name2 in self.columns:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
regs += self.reg * (torch.norm(name1_embedding) + torch.norm(name2_embedding))
name1_embedding_trans = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = self.mlp_p(name2_embedding.view(-1, 1)).view(name2_embedding.size())
inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, self._arch_parameters['binary'], self.FC[name1 + ":" + name2])
return inferences, regs
def genotype(self):
genotype = PRIMITIVES_BINARY[self._arch_parameters['binary'].argmax().cpu().numpy()]
genotype_p = F.softmax(self._arch_parameters['binary'], dim=-1)
return genotype, genotype_p.cpu().detach()
def step(self, features, features_valid, lr, arch_optimizer, unrolled):
self.zero_grad()
arch_optimizer.zero_grad()
# binarize before forward propagation
self.binarize()
loss = self._backward_step(features_valid)
# restore weight before updating
self.recover()
arch_optimizer.step()
return loss
def _backward_step(self, features_valid):
inferences, regs = self(features_valid)
loss = self.compute_loss(inferences, features_valid["label"], regs)
loss.backward()
return loss
class DSNAS(Virtue):
def __init__(self, embedding_dim, reg, args):
super(DSNAS, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self.FC = nn.ModuleDict({})
for name1 in self.columns:
for name2 in self.columns:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
if primitive == 'concat':
temp.append(nn.Linear(2*embedding_dim, 2, bias=False))
else:
temp.append(nn.Linear(embedding_dim, 2, bias=False))
self.FC[name1 + ":" + name2] = temp
self.args = args
self._initialize_alphas()
#initialize contextual infos
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
if self.args.multi_operation:
num_op = len(self.columns)
self.log_alpha = torch.nn.Parameter(torch.zeros((num_op*num_op, len(PRIMITIVES_BINARY))).normal_(self.args.loc_mean, self.args.loc_std).requires_grad_())
else:
self.log_alpha = torch.nn.Parameter(torch.zeros((1, len(PRIMITIVES_BINARY))).normal_(self.args.loc_mean, self.args.loc_std).requires_grad_())
self._arch_parameters = [self.log_alpha]
self.weights = Variable(torch.zeros_like(self.log_alpha))
if self.args.early_fix_arch:
self.fix_arch_index = {}
def recommend(self, features):
self.eval()
self.weights = torch.zeros_like(self.log_alpha).scatter_(1, torch.argmax(self.log_alpha, dim=-1).view(-1, 1), 1)
if self.args.early_fix_arch:
if len(self.fix_arch_index.keys()) > 0:
for key, value_lst in self.fix_arch_index.items():
self.weights[key, :].zero_()
self.weights[key, value_lst[0]] = 1
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
for name1 in self.columns:
for name2 in self.columns:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding = self.embedding_all[name1][max_index](features[name1])
name2_embedding = self.embedding_all[name2][max_index](features[name2])
else:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
name1_embedding_trans = name1_embedding#self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = name2_embedding#self.mlp_p(name2_embedding.view(-1, 1)).view(name2_embedding.size())
inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1,), self.FC[name1 + ":" + name2])
#ipdb.set_trace()
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
simulate_index = []
for i in range(len(max_index)):
if np.random.random() < self.args.epsion:
simulate_index.append(np.random.randint(0, 2))
else:
simulate_index.append(max_index[i])
a_ind = np.array([(i, val) for i, val in enumerate(simulate_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def step(self, optimizer, arch_optimizer):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
cnt = 0
time_data = 0
time_forward = 0
time_update = 0
end = -1
for step, features in enumerate(train_bandit):
if end!=-1:
time_data += time.time() - end
begin = time.time()
optimizer.zero_grad()
arch_optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features)
time_forward += time.time() - begin
losses.append(error_loss.cpu().detach().item())
begin2 = time.time()
optimizer.step()
arch_optimizer.step()
time_update += time.time() - begin2
cnt += 1
end = time.time()
print("time_data: ", time_data)
print("time_forward: ", time_forward)
print("time_update: ", time_update)
print("cnt: ", cnt)
return np.mean(losses)
def revised_arch_index(self):
if self.args.early_fix_arch:
sort_log_alpha = torch.topk(F.softmax(self.log_alpha.data, dim=-1), 2)
argmax_index = (sort_log_alpha[0][:, 0] - sort_log_alpha[0][:, 1] >= 0.01)
for id in range(argmax_index.size(0)):
if argmax_index[id] == 1 and id not in self.fix_arch_index.keys():
self.fix_arch_index[id] = [sort_log_alpha[1][id, 0].item(),
self.log_alpha.detach().clone()[id, :]]
for key, value_lst in self.fix_arch_index.items():
self.log_alpha.data[key, :] = value_lst[1]
def forward(self, features):
regs = 0
self.weights = self._get_weights(self.log_alpha)
self.revised_arch_index()
if self.args.early_fix_arch:
if len(self.fix_arch_index.keys()) > 0:
for key, value_lst in self.fix_arch_index.items():
self.weights[key, :].zero_()
self.weights[key, value_lst[0]] = 1
cate_prob = F.softmax(self.log_alpha, dim=-1)
self.cate_prob = cate_prob.clone().detach()
loss_alpha = torch.log(
(self.weights * F.softmax(self.log_alpha, dim=-1)).sum(-1)).sum()
self.weights.requires_grad_()
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
from sklearn.externals.joblib import Parallel, delayed
names_all = []
for name1 in self.columns:
for name2 in self.columns:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding = self.embedding_all[name1][max_index](features[name1])
name2_embedding = self.embedding_all[name2][max_index](features[name2])
else:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
names_all.append([name1_embedding, name2_embedding, cur_weights.view(-1,), self.FC[name1 + ":" + name2]])
res = Parallel(n_jobs=8, backend="threading")(delayed(MixedBinary)(para1, para2, para3, para4) for para1,para2,para3,para4 in names_all)
inferences = sum(res)
# for name1 in self.columns:
# for name2 in self.columns:
# if self.args.multi_operation:
# cur_weights = self.weights[cur_index]
# max_index = cur_weights.argmax().item()
# cur_index += 1
# if self.args.ofm:
# name1_embedding = self.embedding_all[name1][max_index](features[name1])
# name2_embedding = self.embedding_all[name2][max_index](features[name2])
# else:
# name1_embedding = self.embedding_all[name1](features[name1])
# name2_embedding = self.embedding_all[name2](features[name2])
# regs += self.reg * (torch.norm(name1_embedding) + torch.norm(name2_embedding))
# name1_embedding_trans = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
# name2_embedding_trans = self.mlp_p(name2_embedding.view(-1, 1)).view(name2_embedding.size())
# inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1,), self.FC[name1 + ":" + name2])
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
self.weights.grad = torch.zeros_like(self.weights)
(weighted_loss + loss_alpha).backward()
self.block_reward = self.weights.grad.data.sum(-1)
self.log_alpha.grad.data.mul_(self.block_reward.view(-1, 1))
return inferences, weighted_loss, loss_alpha
def _get_weights(self, log_alpha):
if self.args.random_sample:
uni = torch.ones_like(log_alpha)
m = torch.distributions.one_hot_categorical.OneHotCategorical(uni)
else:
m = torch.distributions.one_hot_categorical.OneHotCategorical(probs=F.softmax(log_alpha, dim=-1))
return m.sample()
def arch_parameters(self):
return self._arch_parameters
def genotype(self):
if not self.args.multi_operation:
genotype = PRIMITIVES_BINARY[self.log_alpha.argmax().cpu().numpy()]
genotype_p = F.softmax(self.log_alpha, dim=-1)
else:
genotype = []
for index in self.log_alpha.argmax(axis=1).cpu().numpy():
genotype.append(PRIMITIVES_BINARY[index])
genotype = ":".join(genotype[:10])
genotype_p = F.softmax(self.log_alpha, dim=-1)[:10]
return genotype, genotype_p.cpu().detach()
class Uniform:
def __init__(self, embedding_dim, reg, args):
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
def recommend(self, features):
pos_weights = torch.zeros_like(features["label"])
max_index = np.random.randint(0, 2, features["label"].shape[0])
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
return reward
def step(self, optimizer, arch_optimizer):
return 0
def genotype(self):
return "uniform", 0
class Egreedy:
def __init__(self, embedding_dim, reg, args):
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
self.epsion = 0.2
self.action_rewards = {0:[0,1.0], 1:[0,1.0]}#total reward, action_num
self.max_action = 0
def recommend(self, features):
max_reward = np.float("-inf")
for key in self.action_rewards:
if self.action_rewards[key][0]/self.action_rewards[key][1] > max_reward:
max_reward = self.action_rewards[key][0]/self.action_rewards[key][1]
self.max_action = key
pos_weights = torch.zeros_like(features["label"])
max_index = np.random.randint(0, 2, features["label"].shape[0])
simulate_index = []
for i in range(len(max_index)):
if np.random.random()<self.epsion:
simulate_index.append(max_index[i])
else:
simulate_index.append(self.max_action)
a_ind = np.array([(i, val) for i, val in enumerate(simulate_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
action_rewards = torch.sum(torch.mul(features["label"], pos_weights), dim=0)
action_nums = torch.sum(pos_weights, dim=0)
for key in self.action_rewards:
temp = self.action_rewards[key]
temp[0] += action_rewards[key].cpu().detach().item()
temp[1] += action_nums[key].cpu().detach().item()
self.action_rewards[key] = temp
return reward
def step(self, optimizer, arch_optimizer):
return 0
def genotype(self):
return "uniform", 0
class FM(Virtue):
def __init__(self, embedding_dim, reg, args):
super(FM, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self.FC = nn.ModuleDict({})
for name1 in self.columns:
for name2 in self.columns:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
if primitive == 'concat':
temp.append(nn.Linear(2*embedding_dim, 2, bias=False))
else:
temp.append(nn.Linear(embedding_dim, 2, bias=False))
self.FC[name1 + ":" + name2] = temp
self.args = args
self._initialize_alphas()
#initialize contextual infos
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.log_alpha = torch.Tensor([1, 1, 1, 1, 1.0])
self.weights = Variable(torch.Tensor([0.0, 1.0, 0.0, 0.0, 0.0]))
def recommend(self, features):
self.eval()
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
for name1 in self.columns:
for name2 in self.columns:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding = self.embedding_all[name1][max_index](features[name1])
name2_embedding = self.embedding_all[name2][max_index](features[name2])
else:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
name1_embedding_trans = name1_embedding#self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = name2_embedding#self.mlp_p(name2_embedding.view(-1, 1)).view(name2_embedding.size())
inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1,), self.FC[name1 + ":" + name2])
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def step(self, optimizer, arch_optimizer):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
cnt = 0
for step, features in enumerate(train_bandit):
optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features)
losses.append(error_loss.cpu().detach().item())
optimizer.step()
cnt += 1
print("cnt: ", cnt)
return np.mean(losses)
def forward(self, features):
regs = 0
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
for name1 in self.columns:
for name2 in self.columns:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding = self.embedding_all[name1][max_index](features[name1])
name2_embedding = self.embedding_all[name2][max_index](features[name2])
else:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
regs += self.reg * (torch.norm(name1_embedding) + torch.norm(name2_embedding))
name1_embedding_trans = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = self.mlp_p(name2_embedding.view(-1, 1)).view(name2_embedding.size())
inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1,), self.FC[name1 + ":" + name2])
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
weighted_loss.backward()
return inferences, weighted_loss, 0
def genotype(self):
return "FM", 0
class Plus(FM):
def __init__(self, embedding_dim, reg, args):
super(Plus, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self._initialize_alphas()
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.weights = Variable(torch.Tensor([1.0, 0.0, 0.0, 0.0, 0.0]))
def genotype(self):
return "Plus", 0
class Max(FM):
def __init__(self, embedding_dim, reg, args):
super(Max, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self._initialize_alphas()
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.weights = Variable(torch.Tensor([0.0, 0.0, 1.0, 0.0, 0.0]))
def genotype(self):
return "Max", 0
class Min(FM):
def __init__(self, embedding_dim, reg, args):
super(Min, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self._initialize_alphas()
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.weights = Variable(torch.Tensor([0.0, 0.0, 0.0, 1.0, 0.0]))
def genotype(self):
return "Min", 0
class Concat(FM):
def __init__(self, embedding_dim, reg, args):
super(FM, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self._initialize_alphas()
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.weights = Variable(torch.Tensor([0.0, 0.0, 0.0, 0.0, 1.0]))
def genotype(self):
return "Concat", 0
| 29,059 | 42.897281 | 165 | py |
AutoCO | AutoCO-main/exp_public/mushroom/simulate/baseline.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
import utils
from collections import Counter
from torch.distributions.multivariate_normal import MultivariateNormal
PRIMITIVES_BINARY = ['plus', 'multiply', 'max', 'min', 'concat']
PRIMITIVES_NAS = [0, 2, 4, 8, 16]
SPACE_NAS = pow(len(PRIMITIVES_NAS), 5)
OPS = {
'plus': lambda p, q: p + q,
'multiply': lambda p, q: p * q,
'max': lambda p, q: torch.max(torch.stack((p, q)), dim=0)[0],
'min': lambda p, q: torch.min(torch.stack((p, q)), dim=0)[0],
'concat': lambda p, q: torch.cat([p, q], dim=-1),
'norm_0': lambda p: torch.ones_like(p),
'norm_0.5': lambda p: torch.sqrt(torch.abs(p) + 1e-7),
'norm_1': lambda p: torch.abs(p),
'norm_2': lambda p: p ** 2,
'I': lambda p: torch.ones_like(p),
'-I': lambda p: -torch.ones_like(p),
'sign': lambda p: torch.sign(p),
}
class Virtue_v(nn.Module):
def __init__(self, embedding_dim, reg, embedding_num=12, ofm=False, first_order=False):
super(Virtue_v, self).__init__()
self.embedding_dim = embedding_dim
self.reg = reg
self.embedding_mean = nn.ModuleDict({})
self.embedding_std = nn.ModuleDict({})
if first_order:
self.embedding_first_order = nn.ModuleDict({})
self.columns = ["cap-shape", "cap-surface", "cap-color", "bruises", "odor", "gill-attachment",
"gill-spacing", "gill-size", "gill-color", "stalk-shape", "stalk-root", "stalk-surface-above-ring",
"stalk-surface-below-ring", "stalk-color-above-ring", "stalk-color-below-ring", "veil-type",
"veil-color", "ring-number", "ring-type", "spore-print-color", "population", "habitat"]
for name in self.columns:
self.embedding_mean[name] = nn.Embedding(embedding_num, embedding_dim)
self.embedding_std[name] = nn.Embedding(embedding_num, embedding_dim)
if first_order:
self.embedding_first_order[name] = nn.Embedding(embedding_num, 1)
self.embedding_action = nn.Embedding(2, embedding_dim)
if first_order:
self.embedding_action_first_order = nn.Embedding(2, 1)
class Virtue(nn.Module):
def __init__(self, embedding_dim, reg, embedding_num=12, ofm=False, first_order=False):
super(Virtue, self).__init__()
self.embedding_dim = embedding_dim
self.reg = reg
self.embedding_all = nn.ModuleDict({})
if first_order:
self.embedding_first_order = nn.ModuleDict({})
self.columns = ["cap-shape", "cap-surface", "cap-color", "bruises", "odor", "gill-attachment",
"gill-spacing", "gill-size", "gill-color", "stalk-shape", "stalk-root", "stalk-surface-above-ring",
"stalk-surface-below-ring", "stalk-color-above-ring", "stalk-color-below-ring", "veil-type",
"veil-color", "ring-number", "ring-type", "spore-print-color", "population", "habitat"]
for name in self.columns:
self.embedding_all[name] = nn.Embedding(embedding_num, embedding_dim)
if first_order:
self.embedding_first_order[name] = nn.Embedding(embedding_num, 1)
self.embedding_action = nn.Embedding(2, embedding_dim)
if first_order:
self.embedding_action_first_order = nn.Embedding(2, 1)
class FM_v(Virtue_v):
"""
FM with EE
"""
def __init__(self, embedding_dim, reg, args):
super(FM_v, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num, first_order=args.first_order)
self.args = args
self._initialize_alphas()
#initialize contextual infos
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.log_alpha = torch.Tensor([1, 1, 1, 1, 1.0])
self.weights = Variable(torch.Tensor([0.0, 1.0, 0.0, 0.0, 0.0]))
self.rand_array = torch.randn(3000000)
def reparameterize(self, mu, std):
std = torch.log(1 + torch.exp(std))
v = self.rand_array[:std.numel()].reshape(std.shape)
return (mu + std * v * 0.01)
def KL_distance(self, mean1, mean2, std1, std2):
a = torch.log(std2 / std1) + (std1 * std1 + (mean1 - mean2) * (mean1 - mean2)) / 2 / std2 / std2 - 1.0 / 2.0
return torch.sum(a)
def recommend(self, features):
self.eval()
inferences = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
inferences += torch.sum(name1_embedding*name2_embedding, dim=1, keepdim=True)
inferences_0 = 0 # inferences.clone() #action 0
inferences_1 = 0 # inferences.clone() #action_1
#features with action
for name1 in self.columns:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
inferences_0 += torch.sum(name1_embedding * self.embedding_action(torch.zeros_like(features[name1]).long()), dim=1, keepdim=True)
inferences_1 += torch.sum(name1_embedding * self.embedding_action(torch.ones_like(features[name1]).long()), dim=1, keepdim=True)
if self.args.first_order:
name1_embedding_first_order = self.embedding_first_order[name1](features[name1])
inferences_0 += name1_embedding_first_order
inferences_1 += name1_embedding_first_order
if self.args.first_order:
inferences_0 += self.embedding_action_first_order(torch.zeros_like(features[name1]).long())
inferences_1 += self.embedding_action_first_order(torch.ones_like(features[name1]).long())
inferences = torch.cat([inferences_0, inferences_1], dim=1)
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def step(self, optimizer, arch_optimizer, epoch):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
if epoch < self.args.search_epoch:
train_epoch = (epoch+1)*5
else:
train_epoch = 1
for k in range(train_epoch):
for step, features in enumerate(train_bandit):
optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features)
losses.append(error_loss.cpu().detach().item())
optimizer.step()
return np.mean(losses)
def forward(self, features):
regs = 0
inferences = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
inferences += torch.sum(name1_embedding * name2_embedding, dim=1, keepdim=True)
# features with action
inferences_0 = 0 #inferences.clone() # action 0
inferences_1 = 0 #inferences.clone() # action_1
for name1 in self.columns:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
inferences_0 += torch.sum(name1_embedding * self.embedding_action(torch.zeros_like(features[name1]).long()),
dim=1, keepdim=True)
inferences_1 += torch.sum(name1_embedding * self.embedding_action(torch.ones_like(features[name1]).long()),
dim=1, keepdim=True)
if self.args.first_order:
name1_embedding_first_order = self.embedding_first_order[name1](features[name1])
inferences_0 += name1_embedding_first_order
inferences_1 += name1_embedding_first_order
if self.args.first_order:
inferences_0 += self.embedding_action_first_order(torch.zeros_like(features[name1]).long())
inferences_1 += self.embedding_action_first_order(torch.ones_like(features[name1]).long())
inferences = torch.cat([inferences_0, inferences_1], dim=1)
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
kl = 0
for name in self.columns:
kl += self.KL_distance(self.embedding_mean[name].weight,
0 * torch.ones_like(self.embedding_mean[name].weight),
torch.log(1 + torch.exp(self.embedding_std[name].weight)),
0.1 * torch.ones_like(self.embedding_std[name].weight))
(weighted_loss + kl/features["label"].shape[0]).backward()
# weighted_loss.backward()
return inferences, weighted_loss, 0
def genotype(self):
return "FM_v", 0
class FM(Virtue):
"""
FM without EE
"""
def __init__(self, embedding_dim, reg, args):
super(FM, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num, first_order=args.first_order)
self.args = args
self._initialize_alphas()
#initialize contextual infos
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.log_alpha = torch.Tensor([1, 1, 1, 1, 1.0])
self.weights = Variable(torch.Tensor([0.0, 1.0, 0.0, 0.0, 0.0]))
self.rand_array = torch.randn(3000000)
def recommend(self, features):
self.eval()
inferences = 0
#inferences_0 = 0
#inferences_1 = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
inferences += torch.sum(name1_embedding*name2_embedding, dim=1, keepdim=True)
#inferences_0 += torch.sum(name1_embedding * name2_embedding, dim=1, keepdim=True)
#inferences_1 += torch.sum(name1_embedding * name2_embedding, dim=1, keepdim=True)
inferences_0 = 0 #inferences.clone() # action 0
inferences_1 = 0 #inferences.clone() # action_1
#features with action
for name1 in self.columns:
name1_embedding = self.embedding_all[name1](features[name1])
inferences_0 += torch.sum(name1_embedding * self.embedding_action(torch.zeros_like(features[name1]).long()), dim=1, keepdim=True)
inferences_1 += torch.sum(name1_embedding * self.embedding_action(torch.ones_like(features[name1]).long()), dim=1, keepdim=True)
if self.args.first_order:
name1_embedding_first_order = self.embedding_first_order[name1](features[name1])
inferences_0 += name1_embedding_first_order
inferences_1 += name1_embedding_first_order
if self.args.first_order:
inferences_0 += self.embedding_action_first_order(torch.zeros_like(features[name1]).long())
inferences_1 += self.embedding_action_first_order(torch.ones_like(features[name1]).long())
inferences = torch.cat([inferences_0, inferences_1], dim=1)
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def step(self, optimizer, arch_optimizer, epoch):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
if epoch < self.args.search_epoch:
train_epoch = (epoch+1)*5
else:
train_epoch = 1
for k in range(train_epoch):
for step, features in enumerate(train_bandit):
optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features)
losses.append(error_loss.cpu().detach().item())
optimizer.step()
return np.mean(losses)
def forward(self, features):
regs = 0
inferences = 0
#inferences_0 = 0
#inferences_1 = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
inferences += torch.sum(name1_embedding * name2_embedding, dim=1, keepdim=True)
#inferences_0 += torch.sum(name1_embedding * name2_embedding, dim=1, keepdim=True)
#inferences_1 += torch.sum(name1_embedding * name2_embedding, dim=1, keepdim=True)
# features with action
inferences_0 = 0 #inferences.clone() # action 0
inferences_1 = 0 #inferences.clone() # action_1
for name1 in self.columns:
name1_embedding = self.embedding_all[name1](features[name1])
inferences_0 += torch.sum(name1_embedding * self.embedding_action(torch.zeros_like(features[name1]).long()),
dim=1, keepdim=True)
inferences_1 += torch.sum(name1_embedding * self.embedding_action(torch.ones_like(features[name1]).long()),
dim=1, keepdim=True)
if self.args.first_order:
name1_embedding_first_order = self.embedding_first_order[name1](features[name1])
inferences_0 += name1_embedding_first_order
inferences_1 += name1_embedding_first_order
if self.args.first_order:
inferences_0 += self.embedding_action_first_order(torch.zeros_like(features[name1]).long())
inferences_1 += self.embedding_action_first_order(torch.ones_like(features[name1]).long())
inferences = torch.cat([inferences_0, inferences_1], dim=1)
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
weighted_loss.backward()
return inferences, weighted_loss, 0
def genotype(self):
return "FM", 0
class FM_v2(Virtue_v):
"""
FM with EE and FC layer
"""
def __init__(self, embedding_dim, reg, args):
super(FM_v2, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num, first_order=args.first_order)
self.args = args
self._initialize_alphas()
self.FC = nn.ModuleDict({})
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
self.FC[name1 + ":" + name2] = nn.Linear(embedding_dim, 1, bias=False)
#initialize contextual infos
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.log_alpha = torch.Tensor([1, 1, 1, 1, 1.0])
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.weights = Variable(torch.Tensor([0.0, 1.0, 0.0, 0.0, 0.0]))
self.rand_array = torch.randn(3000000)
def reparameterize(self, mu, std):
std = torch.log(1 + torch.exp(std))
v = self.rand_array[:std.numel()].reshape(std.shape)
return (mu + std * v * 0.01)
def KL_distance(self, mean1, mean2, std1, std2):
a = torch.log(std2 / std1) + (std1 * std1 + (mean1 - mean2) * (mean1 - mean2)) / 2 / std2 / std2 - 1.0 / 2.0
return torch.sum(a)
def recommend(self, features):
self.eval()
inferences = 0
inferences_0 = 0
inferences_1 = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
inferences += self.FC[name1 + ":" + name2](name1_embedding * name2_embedding)
inferences_0 = inferences.clone() # action 0
inferences_1 = inferences.clone() # action_1
#features with action
for name1 in self.columns:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
inferences_0 += torch.sum(name1_embedding * self.embedding_action(torch.zeros_like(features[name1]).long()), dim=1, keepdim=True)
inferences_1 += torch.sum(name1_embedding * self.embedding_action(torch.ones_like(features[name1]).long()), dim=1, keepdim=True)
if self.args.first_order:
name1_embedding_first_order = self.embedding_first_order[name1](features[name1])
inferences_0 += name1_embedding_first_order
inferences_1 += name1_embedding_first_order
if self.args.first_order:
inferences_0 += self.embedding_action_first_order(torch.zeros_like(features[name1]).long())
inferences_1 += self.embedding_action_first_order(torch.ones_like(features[name1]).long())
inferences = torch.cat([inferences_0, inferences_1], dim=1)
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def step(self, optimizer, arch_optimizer, step):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
cnt = 0
for step, features in enumerate(train_bandit):
cnt += 1
optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features)
losses.append(error_loss.cpu().detach().item())
optimizer.step()
print("cnt: ", cnt)
return np.mean(losses)
def forward(self, features):
regs = 0
inferences = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
inferences += self.FC[name1 + ":" + name2](name1_embedding * name2_embedding)
# features with action
inferences_0 = inferences.clone() # action 0
inferences_1 = inferences.clone() # action_1
for name1 in self.columns:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
inferences_0 += torch.sum(name1_embedding * self.embedding_action(torch.zeros_like(features[name1]).long()),
dim=1, keepdim=True)
inferences_1 += torch.sum(name1_embedding * self.embedding_action(torch.ones_like(features[name1]).long()),
dim=1, keepdim=True)
if self.args.first_order:
name1_embedding_first_order = self.embedding_first_order[name1](features[name1])
inferences_0 += name1_embedding_first_order
inferences_1 += name1_embedding_first_order
if self.args.first_order:
inferences_0 += self.embedding_action_first_order(torch.zeros_like(features[name1]).long())
inferences_1 += self.embedding_action_first_order(torch.ones_like(features[name1]).long())
inferences = torch.cat([inferences_0, inferences_1], dim=1)
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
kl = 0
for name in self.columns:
kl += self.KL_distance(self.embedding_mean[name].weight,
0 * torch.ones_like(self.embedding_mean[name].weight),
torch.log(1 + torch.exp(self.embedding_std[name].weight)),
0.1 * torch.ones_like(self.embedding_std[name].weight))
(weighted_loss + kl/features["label"].shape[0]).backward()
return inferences, weighted_loss, 0
def genotype(self):
return "FM_v2", 0
class Random:
def __init__(self, embedding_dim, reg, args):
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
def recommend(self, features):
pos_weights = torch.zeros_like(features["label"])
max_index = np.random.randint(0, 2, features["label"].shape[0])
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
return reward
def step(self, optimizer, arch_optimizer, epoch):
return 0
def genotype(self):
return "Random", 0
class Egreedy:
def __init__(self, embedding_dim, reg, args):
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
self.epsion = 0.2
self.action_rewards = {0:[0,1.0], 1:[0.1,1.0]}#total reward, action_num
self.max_action = 0
def recommend(self, features):
max_reward = np.float("-inf")
for key in self.action_rewards:
if self.action_rewards[key][0]/self.action_rewards[key][1] > max_reward:
max_reward = self.action_rewards[key][0]/self.action_rewards[key][1]
self.max_action = key
pos_weights = torch.zeros_like(features["label"])
max_index = np.random.randint(0, 2, features["label"].shape[0])
simulate_index = []
for i in range(len(max_index)):
if np.random.random()<self.epsion:
simulate_index.append(max_index[i])
else:
simulate_index.append(self.max_action)
a_ind = np.array([(i, val) for i, val in enumerate(simulate_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
action_rewards = torch.sum(torch.mul(features["label"], pos_weights), dim=0)
action_nums = torch.sum(pos_weights, dim=0)
for key in self.action_rewards:
temp = self.action_rewards[key]
temp[0] += action_rewards[key].cpu().detach().item()
temp[1] += action_nums[key].cpu().detach().item()
self.action_rewards[key] = temp
return reward
def step(self, optimizer, arch_optimizer, epoch):
return 0
def genotype(self):
return "Egreedy", 0
class Thompson:
def __init__(self, embedding_dim, reg, args):
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
self.epsion = 0.2
self.action_rewards = {0:[0,0], 1:[0,0]}#total reward, action_num
self.max_action = 0
def recommend(self, features):
#Thompson sampling
values = []
num = 2
N = 10000
for index in range(num):
pos = np.random.beta(1+int(self.action_rewards[index][0]), 2+int(self.action_rewards[index][1]), N)
values.append(pos)
action_pos = np.vstack(values)
action_num = Counter(action_pos.argmax(axis=0))
action_percentage = []
for index in range(num):
action_percentage.append(action_num[index]/N)
simulate_index = []
for i in range(features["label"].shape[0]):
simulate_index.append(np.random.choice(range(num), p=action_percentage))
pos_weights = torch.zeros_like(features["label"])
a_ind = np.array([(i, val) for i, val in enumerate(simulate_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
action_rewards = torch.sum(torch.mul(features["label"], pos_weights), dim=0)
action_nums = torch.sum(pos_weights, dim=0)
for key in self.action_rewards:
temp = self.action_rewards[key]
temp[0] += action_rewards[key].cpu().detach().item()
temp[1] += action_nums[key].cpu().detach().item()
self.action_rewards[key] = temp
return reward
def step(self, optimizer, arch_optimizer, epoch):
return 0
def genotype(self):
return "Thompson", 0
class LinUCB2:
def __init__(self, embedding_dim, reg, args):
self.Aa = torch.eye(119)
self.ba = torch.zeros(119).view(-1,1)
self.alpha = 0.1
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
def recommend(self, features):
action1_features = torch.zeros((features["label"].shape[0], 2))
action1_features[:, 0] = 1.0
action2_features = torch.zeros((features["label"].shape[0], 2))
action2_features[:, 1] = 1.0
action1_input = torch.cat([features["feature"], action1_features], dim=1)
action2_input = torch.cat([features["feature"], action2_features], dim=1)
inputs_all = [action1_input, action2_input]
theta = torch.matmul(torch.inverse(self.Aa), self.ba)
action1_score = torch.matmul(action1_input, theta) + self.alpha * torch.sqrt(
torch.sum(torch.mul(torch.matmul(action1_input, torch.inverse(self.Aa)), action1_input), dim=-1)).view(-1,1)
action2_score = torch.matmul(action2_input, theta) + self.alpha * torch.sqrt(
torch.sum(torch.mul(torch.matmul(action2_input, torch.inverse(self.Aa)), action2_input), dim=-1)).view(-1, 1)
score_all = torch.cat([action1_score, action2_score], dim=1)
max_index = score_all.argmax(dim=1)
print(Counter(max_index.numpy()))
pos_weights = torch.zeros_like(features["label"])
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
#update Aa and ba
for i in range(max_index.shape[0]):
cur_action = max_index[i].item()
cur_reward = features["label"][i, cur_action].item()
cur_feature = inputs_all[cur_action][i]
self.Aa += torch.matmul(cur_feature.view(-1,1), cur_feature.view(1,-1))
self.ba += cur_reward * cur_feature.view(-1,1)
return reward
def step(self, optimizer, arch_optimizer, epoch):
return 0
def genotype(self):
return "LinUCB2", 0
#
class LinUCB:
def __init__(self, embedding_dim, reg, args):
self.action_num = 2
self.feature_dim = 117
self.Aa = []
self.ba = []
for i in range(self.action_num):
self.Aa.append(torch.eye(self.feature_dim))
self.ba.append(torch.zeros(self.feature_dim).view(-1,1))
self.alpha = 1.0
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
def recommend(self, features):
score_all = []
for i in range(self.action_num):
Aa = self.Aa[i]
ba = self.ba[i]
theta = torch.matmul(torch.inverse(Aa), ba)
score = torch.matmul(features["feature"], theta) + self.alpha * torch.sqrt(
torch.sum(torch.mul(torch.matmul(features["feature"], torch.inverse(Aa)), features["feature"]), dim=-1)
).view(-1,1)
score_all.append(score)
score_all = torch.cat(score_all, dim=1)
max_index = score_all.argmax(dim=1)
print(Counter(max_index.numpy()))
pos_weights = torch.zeros_like(features["label"])
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
#update Aa and ba
for i in range(max_index.shape[0]):
cur_action = max_index[i].item()
cur_reward = features["label"][i, cur_action].item()
cur_feature = features["feature"][i]
Aa = self.Aa[cur_action]
ba = self.ba[cur_action]
Aa += torch.matmul(cur_feature.view(-1,1), cur_feature.view(1,-1))
ba += cur_reward * cur_feature.view(-1,1)
self.Aa[cur_action] = Aa
self.ba[cur_action] = ba
return reward
def step(self, optimizer, arch_optimizer, epoch):
return 0
def genotype(self):
return "LinUCB", 0
class LinThompson:
def __init__(self, embedding_dim, reg, args):
self.action_num = 2
self.feature_dim = 117
self.Aa = []
self.ba = []
for i in range(self.action_num):
self.Aa.append(torch.eye(self.feature_dim))
self.ba.append(torch.zeros(self.feature_dim).view(-1, 1))
self.alpha = 1.0
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
def recommend(self, features):
score_all = []
for i in range(self.action_num):
Aa = self.Aa[i]
ba = self.ba[i]
mu = torch.matmul(torch.inverse(Aa), ba)
variance = torch.inverse(Aa)
try:
theta = MultivariateNormal(loc=mu.view(-1), covariance_matrix=self.alpha * variance).sample().view(-1,1)
except:
print("Error here!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
theta = mu.view(-1,1)
score = torch.matmul(features["feature"], theta) + self.alpha * torch.sqrt(
torch.sum(torch.mul(torch.matmul(features["feature"], torch.inverse(Aa)), features["feature"]), dim=-1)
).view(-1, 1)
score_all.append(score)
score_all = torch.cat(score_all, dim=1)
max_index = score_all.argmax(dim=1)
print(Counter(max_index.numpy()))
pos_weights = torch.zeros_like(features["label"])
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
# update Aa and ba
for i in range(max_index.shape[0]):
cur_action = max_index[i].item()
cur_reward = features["label"][i, cur_action].item()
cur_feature = features["feature"][i]
Aa = self.Aa[cur_action]
ba = self.ba[cur_action]
Aa += torch.matmul(cur_feature.view(-1, 1), cur_feature.view(1, -1))
ba += cur_reward * cur_feature.view(-1, 1)
self.Aa[cur_action] = Aa
self.ba[cur_action] = ba
return reward
def step(self, optimizer, arch_optimizer, epoch):
return 0
def genotype(self):
return "LinThompson", 0
class LinEGreedy:
def __init__(self, embedding_dim, reg, args):
self.Aa = torch.eye(117)
self.ba = torch.zeros(117).view(-1,1)
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
self.epsion = 0.2
self.turn = True
def recommend(self, features):
action1_features = torch.zeros((features["label"].shape[0], 2))
action1_features[:, 0] = 1.0
action2_features = torch.zeros((features["label"].shape[0], 2))
action2_features[:, 1] = 1.0
action1_input = torch.cat([features["feature"], action1_features], dim=1)
action2_input = torch.cat([features["feature"], action2_features], dim=1)
inputs_all = [action1_input, action2_input]
theta = torch.matmul(torch.inverse(self.Aa), self.ba)
action1_score = torch.matmul(action1_input, theta)
action2_score = torch.matmul(action2_input, theta)
score_all = torch.cat([action1_score, action2_score], dim=1)
max_index = score_all.argmax(dim=1)
if self.turn:
simulate_index = []
for i in range(len(max_index)):
if np.random.random() < self.epsion:
simulate_index.append(max_index[i].item())
else:
simulate_index.append(np.random.randint(0, 2))
max_index = simulate_index
self.turn = False
print(Counter(max_index))
pos_weights = torch.zeros_like(features["label"])
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
#update Aa and ba
for i in range(len(max_index)):
cur_action = max_index[i]
cur_reward = features["label"][i, cur_action].item()
cur_feature = inputs_all[cur_action][i]
self.Aa += torch.matmul(cur_feature.view(-1,1), cur_feature.view(1,-1))
self.ba += cur_reward * cur_feature.view(-1,1)
return reward
def step(self, optimizer, arch_optimizer, epoch):
return 0
def genotype(self):
return "LinEGreedy", 0
| 37,783 | 46.112219 | 141 | py |
AutoCO | AutoCO-main/exp_public/mushroom/simulate/vartional_model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
import utils
PRIMITIVES_BINARY = ['plus', 'multiply', 'max', 'min', 'concat']
PRIMITIVES_NAS = [0, 2, 4, 8, 16]
SPACE_NAS = pow(len(PRIMITIVES_NAS), 5)
OPS = {
'plus': lambda p, q: p + q,
'multiply': lambda p, q: p * q,
'max': lambda p, q: torch.max(torch.stack((p, q)), dim=0)[0],
'min': lambda p, q: torch.min(torch.stack((p, q)), dim=0)[0],
'concat': lambda p, q: torch.cat([p, q], dim=-1),
'norm_0': lambda p: torch.ones_like(p),
'norm_0.5': lambda p: torch.sqrt(torch.abs(p) + 1e-7),
'norm_1': lambda p: torch.abs(p),
'norm_2': lambda p: p ** 2,
'I': lambda p: torch.ones_like(p),
'-I': lambda p: -torch.ones_like(p),
'sign': lambda p: torch.sign(p),
}
def MixedBinary(embedding_p, embedding_q, weights, FC):
return torch.sum(torch.stack([w * fc(OPS[primitive](embedding_p, embedding_q)) \
for w,primitive,fc in zip(weights,PRIMITIVES_BINARY,FC)]), 0)
class Virtue(nn.Module):
def __init__(self, embedding_dim, reg, embedding_num=12, ofm=False):
super(Virtue, self).__init__()
self.embedding_dim = embedding_dim
self.reg = reg
self.embedding_mean = nn.ModuleDict({})
self.embedding_std = nn.ModuleDict({})
self.columns = ["cap-shape", "cap-surface", "cap-color", "bruises", "odor", "gill-attachment",
"gill-spacing", "gill-size", "gill-color", "stalk-shape", "stalk-root", "stalk-surface-above-ring",
"stalk-surface-below-ring", "stalk-color-above-ring", "stalk-color-below-ring", "veil-type",
"veil-color", "ring-number", "ring-type", "spore-print-color", "population", "habitat"]
if not ofm:
for name in self.columns:
self.embedding_mean[name] = nn.Embedding(embedding_num, embedding_dim)
self.embedding_std[name] = nn.Embedding(embedding_num, embedding_dim)
else:
for name in self.columns:
temp_mean = nn.ModuleList()
temp_std = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
temp_mean.append(nn.Embedding(embedding_num, embedding_dim))
temp_std.append(nn.Embedding(embedding_num, embedding_dim))
self.embedding_mean[name] = temp_mean
self.embedding_std[name] = temp_std
class Virtue2(nn.Module):
def __init__(self, embedding_dim, reg, embedding_num=12, ofm=False):
super(Virtue2, self).__init__()
self.embedding_dim = embedding_dim
self.reg = reg
self.embedding_all = nn.ModuleDict({})
self.columns = ["cap-shape", "cap-surface", "cap-color", "bruises", "odor", "gill-attachment",
"gill-spacing", "gill-size", "gill-color", "stalk-shape", "stalk-root", "stalk-surface-above-ring",
"stalk-surface-below-ring", "stalk-color-above-ring", "stalk-color-below-ring", "veil-type",
"veil-color", "ring-number", "ring-type", "spore-print-color", "population", "habitat"]
if not ofm:
for name in self.columns:
self.embedding_all[name] = nn.Embedding(embedding_num, embedding_dim)
else:
for name in self.columns:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
temp.append(nn.Embedding(embedding_num, embedding_dim))
self.embedding_all[name] = temp
class DSNAS_v(Virtue):
def __init__(self, embedding_dim, reg, args):
super(DSNAS_v, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self.FC = nn.ModuleDict({})
for name1 in self.columns:
for name2 in self.columns:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
if primitive == 'concat':
temp.append(nn.Linear(2*embedding_dim, 2, bias=False))
else:
temp.append(nn.Linear(embedding_dim, 2, bias=False))
self.FC[name1 + ":" + name2] = temp
self.args = args
self._initialize_alphas()
#initialize contextual infos
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
if self.args.multi_operation:
num_op = len(self.columns)
self.log_alpha = torch.nn.Parameter(torch.zeros((num_op*num_op, len(PRIMITIVES_BINARY))).normal_(self.args.loc_mean, self.args.loc_std).requires_grad_())
else:
self.log_alpha = torch.nn.Parameter(torch.zeros((1, len(PRIMITIVES_BINARY))).normal_(self.args.loc_mean, self.args.loc_std).requires_grad_())
self._arch_parameters = [self.log_alpha]
self.weights = Variable(torch.zeros_like(self.log_alpha))
if self.args.early_fix_arch:
self.fix_arch_index = {}
self.rand_array = torch.randn(3000000)
def reparameterize(self, mu, std):
std = torch.log(1 + torch.exp(std))
v = self.rand_array[:std.numel()].reshape(std.shape)
return (mu + std * v * 0.01)
def KL_distance(self, mean1, mean2, std1, std2):
a = torch.log(std2 / std1) + (std1 * std1 + (mean1 - mean2) * (mean1 - mean2)) / 2 / std2 / std2 - 1.0 / 2.0
return torch.sum(a)
def recommend(self, features):
self.eval()
self.weights = torch.zeros_like(self.log_alpha).scatter_(1, torch.argmax(self.log_alpha, dim=-1).view(-1, 1), 1)
if self.args.early_fix_arch:
if len(self.fix_arch_index.keys()) > 0:
for key, value_lst in self.fix_arch_index.items():
self.weights[key, :].zero_()
self.weights[key, value_lst[0]] = 1
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
for name1 in self.columns:
for name2 in self.columns:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding_mean = self.embedding_mean[name1][max_index](features[name1])
name1_embedding_std = self.embedding_std[name1][max_index](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2][max_index](features[name2])
name2_embedding_std = self.embedding_std[name2][max_index](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
else:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
name1_embedding_trans = name1_embedding#self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = name2_embedding#self.mlp_p(name2_embedding.view(-1, 1)).view(name2_embedding.size())
inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1,), self.FC[name1 + ":" + name2])
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def step(self, optimizer, arch_optimizer, epoch):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
for step, features in enumerate(train_bandit):
optimizer.zero_grad()
if epoch < self.args.search_epoch:
arch_optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features, epoch)
losses.append(error_loss.cpu().detach().item())
optimizer.step()
if epoch < self.args.search_epoch:
arch_optimizer.step()
return np.mean(losses)
def revised_arch_index(self, epoch):
if self.args.early_fix_arch:
if epoch < self.args.search_epoch:
sort_log_alpha = torch.topk(F.softmax(self.log_alpha.data, dim=-1), 2)
argmax_index = (sort_log_alpha[0][:, 0] - sort_log_alpha[0][:, 1] >= 0.10)
for id in range(argmax_index.size(0)):
if argmax_index[id] == 1 and id not in self.fix_arch_index.keys():
self.fix_arch_index[id] = [sort_log_alpha[1][id, 0].item(),
self.log_alpha.detach().clone()[id, :]]
if epoch >= self.args.search_epoch:
#fix the arch。
max_index = torch.argmax(self.log_alpha, dim=-1)
for id in range(max_index.size(0)):
if id not in self.fix_arch_index.keys():
self.fix_arch_index[id] = [max_index[id].item(), self.log_alpha.detach().clone()[id, :]]
for key, value_lst in self.fix_arch_index.items():
self.log_alpha.data[key, :] = value_lst[1]
def forward(self, features, epoch):
self.weights = self._get_weights(self.log_alpha)
self.revised_arch_index(epoch)
if self.args.early_fix_arch:
if len(self.fix_arch_index.keys()) > 0:
for key, value_lst in self.fix_arch_index.items():
self.weights[key, :].zero_()
self.weights[key, value_lst[0]] = 1
if epoch<self.args.search_epoch:
cate_prob = F.softmax(self.log_alpha, dim=-1)
self.cate_prob = cate_prob.clone().detach()
loss_alpha = torch.log(
(self.weights * F.softmax(self.log_alpha, dim=-1)).sum(-1)).sum()
self.weights.requires_grad_()
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
from joblib import Parallel, delayed
names_all = []
for name1 in self.columns:
for name2 in self.columns:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding_mean = self.embedding_mean[name1][max_index](features[name1])
name1_embedding_std = self.embedding_std[name1][max_index](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2][max_index](features[name2])
name2_embedding_std = self.embedding_std[name2][max_index](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
else:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
names_all.append(
[name1_embedding, name2_embedding, cur_weights.view(-1, ), self.FC[name1 + ":" + name2]])
res = Parallel(n_jobs=8, backend="threading")(
delayed(MixedBinary)(para1, para2, para3, para4) for para1, para2, para3, para4 in names_all)
inferences = sum(res)
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
kl = 0
for name in self.columns:
if not self.args.ofm:
kl += self.KL_distance(self.embedding_mean[name].weight,
0 * torch.ones_like(self.embedding_mean[name].weight),
torch.log(1 + torch.exp(self.embedding_std[name].weight)),
0.1 * torch.ones_like(self.embedding_std[name].weight))
else:
for index in range(len(PRIMITIVES_BINARY)):
kl += self.KL_distance(self.embedding_mean[name][index].weight,
0 * torch.ones_like(self.embedding_mean[name][index].weight),
torch.log(1 + torch.exp(self.embedding_std[name][index].weight)),
0.1 * torch.ones_like(self.embedding_std[name][index].weight))
if epoch < self.args.search_epoch:
self.weights.grad = torch.zeros_like(self.weights)
(weighted_loss + loss_alpha + kl/features["label"].shape[0]).backward()
self.block_reward = self.weights.grad.data.sum(-1)
self.log_alpha.grad.data.mul_(self.block_reward.view(-1, 1))
return inferences, weighted_loss, loss_alpha
else:
(weighted_loss + kl/features["label"].shape[0]).backward()
return inferences, weighted_loss, 0
def _get_weights(self, log_alpha):
if self.args.random_sample:
uni = torch.ones_like(log_alpha)
m = torch.distributions.one_hot_categorical.OneHotCategorical(uni)
else:
m = torch.distributions.one_hot_categorical.OneHotCategorical(probs=F.softmax(log_alpha, dim=-1))
return m.sample()
def arch_parameters(self):
return self._arch_parameters
def genotype(self):
if not self.args.multi_operation:
genotype = PRIMITIVES_BINARY[self.log_alpha.argmax().cpu().numpy()]
genotype_p = F.softmax(self.log_alpha, dim=-1)
else:
genotype = []
for index in self.log_alpha.argmax(axis=1).cpu().numpy():
genotype.append(PRIMITIVES_BINARY[index])
genotype = ":".join(genotype[:10])
genotype_p = F.softmax(self.log_alpha, dim=-1)[:10]
return genotype, genotype_p.cpu().detach()
class NASP(Virtue2):
def __init__(self, embedding_dim, reg, args):
super(NASP, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self.FC = nn.ModuleDict({})
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
if primitive == 'concat':
temp.append(nn.Linear(2*embedding_dim, 2, bias=False))
else:
temp.append(nn.Linear(embedding_dim, 2, bias=False))
self.FC[name1 + ":" + name2] = temp
self.args = args
self._initialize_alphas()
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
if self.args.multi_operation:
num_op = len(self.columns)
self.log_alpha = torch.nn.Parameter(torch.zeros((int(num_op*(num_op-1)/2), len(PRIMITIVES_BINARY))).normal_(self.args.loc_mean, self.args.loc_std).requires_grad_())
else:
self.log_alpha = torch.nn.Parameter(torch.zeros((1, len(PRIMITIVES_BINARY))).normal_(self.args.loc_mean, self.args.loc_std).requires_grad_())
self._arch_parameters = [self.log_alpha]
self.weights = Variable(torch.zeros_like(self.log_alpha))
if self.args.early_fix_arch:
self.fix_arch_index = {}
self.rand_array = torch.randn(3000000)
def recommend(self, features):
self.eval()
self.weights = torch.zeros_like(self.log_alpha).scatter_(1, torch.argmax(self.log_alpha, dim=-1).view(-1, 1), 1)
if self.args.early_fix_arch:
if len(self.fix_arch_index.keys()) > 0:
for key, value_lst in self.fix_arch_index.items():
self.weights[key, :].zero_()
self.weights[key, value_lst[0]] = 1
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding = self.embedding_all[name1][max_index](features[name1])
name2_embedding = self.embedding_all[name2][max_index](features[name2])
else:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
if self.args.trans:
name1_embedding_trans = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = self.mlp_q(name2_embedding.view(-1, 1)).view(name2_embedding.size())
else:
name1_embedding_trans = name1_embedding
name2_embedding_trans = name2_embedding
inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1,), self.FC[name1 + ":" + name2])
if self.args.first_order:
for name in self.columns:
inferences += self.embedding_first_order[name](features[name])
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def MixedBinary_ofm(self, embedding_p_all, embedding_q_all, weights, FC):
return torch.sum(torch.stack([w * fc(OPS[primitive](embedding_p, embedding_q)) \
for w, primitive, fc, embedding_p, embedding_q in zip(weights, PRIMITIVES_BINARY, FC, embedding_p_all, embedding_q_all)]), 0)
def MixedBinary_all(self, embedding_p, embedding_q, weights, FC):
return torch.sum(torch.stack([w * fc(OPS[primitive](embedding_p, embedding_q)) \
for w, primitive, fc in zip(weights, PRIMITIVES_BINARY, FC)]), 0)
def step(self, optimizer, arch_optimizer, epoch):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
if epoch < self.args.search_epoch:
train_epoch = (epoch+1)*5
else:
train_epoch = 1
for k in range(train_epoch):
for step, features in enumerate(train_bandit):
optimizer.zero_grad()
arch_optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features, epoch, search=True)
losses.append(error_loss.cpu().detach().item())
optimizer.step()
arch_optimizer.step()
return np.mean(losses)
def forward(self, features, epoch, search):
self.weights = torch.zeros_like(self.log_alpha).scatter_(1, torch.argmax(self.log_alpha, dim=-1).view(-1, 1), 1)
self.weights.requires_grad_()
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
from sklearn.externals.joblib import Parallel, delayed
names_all = []
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
embedding_name1_all = []
embedding_name2_all = []
for index_name in range(len(PRIMITIVES_BINARY)):
name1_embedding = self.embedding_all[name1][index_name](features[name1])
embedding_name1_all.append(name1_embedding)
name2_embedding = self.embedding_all[name2][index_name](features[name2])
embedding_name2_all.append(name2_embedding)
else:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
if self.args.trans:
if self.args.ofm:
embedding_name1_all_temp = []
embedding_name2_all_temp = []
for index_temp in range(len(embedding_name1_all)):
embedding_name1_all_temp.append(self.mlp_p(embedding_name1_all[index_temp].view(-1, 1)).view(embedding_name1_all[index_temp].size()))
embedding_name2_all_temp.append(self.mlp_p(embedding_name2_all[index_temp].view(-1, 1)).view(embedding_name2_all[index_temp].size()))
embedding_name1_all = embedding_name1_all_temp
embedding_name2_all = embedding_name2_all_temp
else:
name1_embedding = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding = self.mlp_q(name2_embedding.view(-1, 1)).view(name2_embedding.size())
if self.args.ofm:
names_all.append([embedding_name1_all, embedding_name2_all, cur_weights.view(-1, ), self.FC[name1 + ":" + name2]])
else:
names_all.append(
[name1_embedding, name2_embedding, cur_weights.view(-1, ), self.FC[name1 + ":" + name2]])
if self.args.ofm:
res = Parallel(n_jobs=8, backend="threading")(
delayed(self.MixedBinary_ofm)(para1, para2, para3, para4) for para1, para2, para3, para4 in names_all)
else:
res = Parallel(n_jobs=8, backend="threading")(
delayed(self.MixedBinary_all)(para1, para2, para3, para4) for para1, para2, para3, para4 in names_all)
inferences = sum(res)
if self.args.first_order:
for name in self.columns:
inferences += self.embedding_first_order[name](features[name])
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
weighted_loss.backward()
self.log_alpha.grad = self.weights.grad
return inferences, weighted_loss, 0
def arch_parameters(self):
return self._arch_parameters
def genotype(self):
if not self.args.multi_operation:
genotype = PRIMITIVES_BINARY[self.log_alpha.argmax().cpu().numpy()]
genotype_p = F.softmax(self.log_alpha, dim=-1)
else:
genotype = []
for index in self.log_alpha.argmax(axis=1).cpu().numpy():
genotype.append(PRIMITIVES_BINARY[index])
genotype = ":".join(genotype[:10])
genotype_p = F.softmax(self.log_alpha, dim=-1)[:10]
return genotype, genotype_p.cpu().detach()
class NASP_v(Virtue):
def __init__(self, embedding_dim, reg, args):
super(NASP_v, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self.FC = nn.ModuleDict({})
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
if primitive == 'concat':
temp.append(nn.Linear(2*embedding_dim, 2, bias=False))
else:
temp.append(nn.Linear(embedding_dim, 2, bias=False))
self.FC[name1 + ":" + name2] = temp
self.args = args
self._initialize_alphas()
#initialize contextual infos
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
if self.args.multi_operation:
num_op = len(self.columns)
self.log_alpha = torch.nn.Parameter(torch.zeros((int(num_op*(num_op-1)/2), len(PRIMITIVES_BINARY))).normal_(self.args.loc_mean, self.args.loc_std).requires_grad_())
else:
self.log_alpha = torch.nn.Parameter(torch.zeros((1, len(PRIMITIVES_BINARY))).normal_(self.args.loc_mean, self.args.loc_std).requires_grad_())
self._arch_parameters = [self.log_alpha]
self.weights = Variable(torch.zeros_like(self.log_alpha))
if self.args.early_fix_arch:
self.fix_arch_index = {}
self.rand_array = torch.randn(3000000)
def reparameterize(self, mu, std):
std = torch.log(1 + torch.exp(std))
v = self.rand_array[:std.numel()].reshape(std.shape)
return (mu + std * v * 0.01)
def KL_distance(self, mean1, mean2, std1, std2):
a = torch.log(std2 / std1) + (std1 * std1 + (mean1 - mean2) * (mean1 - mean2)) / 2 / std2 / std2 - 1.0 / 2.0
return torch.sum(a)
def recommend(self, features):
self.eval()
self.weights = torch.zeros_like(self.log_alpha).scatter_(1, torch.argmax(self.log_alpha, dim=-1).view(-1, 1), 1)
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding_mean = self.embedding_mean[name1][max_index](features[name1])
name1_embedding_std = self.embedding_std[name1][max_index](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2][max_index](features[name2])
name2_embedding_std = self.embedding_std[name2][max_index](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
else:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
if self.args.trans:
name1_embedding_trans = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = self.mlp_q(name2_embedding.view(-1, 1)).view(name2_embedding.size())
else:
name1_embedding_trans = name1_embedding
name2_embedding_trans = name2_embedding
inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1,), self.FC[name1 + ":" + name2])
if self.args.first_order:
for name in self.columns:
inferences += self.embedding_first_order[name](features[name])
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def MixedBinary_all(self, embedding_p, embedding_q, weights, FC):
return torch.sum(torch.stack([w * fc(OPS[primitive](embedding_p, embedding_q)) \
for w, primitive, fc in zip(weights, PRIMITIVES_BINARY, FC)]), 0)
def step(self, optimizer, arch_optimizer, epoch):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
if epoch < self.args.search_epoch:
train_epoch = (epoch+1)*5
else:
train_epoch = 1
for k in range(train_epoch):
for step, features in enumerate(train_bandit):
optimizer.zero_grad()
arch_optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features, epoch, search=True)
# if epoch < self.args.search_epoch:
# arch_optimizer.zero_grad()
# output, error_loss, loss_alpha = self.forward(features, epoch, search=True)
# else:
# output, error_loss, loss_alpha = self.forward(features, epoch, search=False)
losses.append(error_loss.cpu().detach().item())
optimizer.step()
arch_optimizer.step()
# if epoch < self.args.search_epoch:
# arch_optimizer.step()
return np.mean(losses)
def revised_arch_index(self, epoch):
if self.args.early_fix_arch:
if epoch < self.args.search_epoch:
sort_log_alpha = torch.topk(F.softmax(self.log_alpha.data, dim=-1), 2)
argmax_index = (sort_log_alpha[0][:, 0] - sort_log_alpha[0][:, 1] >= 0.10)
for id in range(argmax_index.size(0)):
if argmax_index[id] == 1 and id not in self.fix_arch_index.keys():
self.fix_arch_index[id] = [sort_log_alpha[1][id, 0].item(),
self.log_alpha.detach().clone()[id, :]]
if epoch >= self.args.search_epoch:
#fix the arch。
max_index = torch.argmax(self.log_alpha, dim=-1)
for id in range(max_index.size(0)):
if id not in self.fix_arch_index.keys():
self.fix_arch_index[id] = [max_index[id].item(), self.log_alpha.detach().clone()[id, :]]
for key, value_lst in self.fix_arch_index.items():
self.log_alpha.data[key, :] = value_lst[1]
def forward(self, features, epoch, search):
self.weights = torch.zeros_like(self.log_alpha).scatter_(1, torch.argmax(self.log_alpha, dim=-1).view(-1, 1), 1)
self.weights.requires_grad_()
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
from joblib import Parallel, delayed
names_all = []
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding_mean = self.embedding_mean[name1][max_index](features[name1])
name1_embedding_std = self.embedding_std[name1][max_index](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2][max_index](features[name2])
name2_embedding_std = self.embedding_std[name2][max_index](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
else:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
if self.args.trans:
name1_embedding_trans = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = self.mlp_q(name2_embedding.view(-1, 1)).view(name2_embedding.size())
else:
name1_embedding_trans = name1_embedding
name2_embedding_trans = name2_embedding
names_all.append(
[name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1, ), self.FC[name1 + ":" + name2]])
res = Parallel(n_jobs=8, backend="threading")(
delayed(self.MixedBinary_all)(para1, para2, para3, para4) for para1, para2, para3, para4 in names_all)
inferences = sum(res)
if self.args.first_order:
for name in self.columns:
inferences += self.embedding_first_order[name](features[name])
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
kl = 0
for name in self.columns:
if not self.args.ofm:
kl += self.KL_distance(self.embedding_mean[name].weight,
0 * torch.ones_like(self.embedding_mean[name].weight),
torch.log(1 + torch.exp(self.embedding_std[name].weight)),
0.1 * torch.ones_like(self.embedding_std[name].weight))
else:
for index in range(len(PRIMITIVES_BINARY)):
kl += self.KL_distance(self.embedding_mean[name][index].weight,
0 * torch.ones_like(self.embedding_mean[name][index].weight),
torch.log(1 + torch.exp(self.embedding_std[name][index].weight)),
0.1 * torch.ones_like(self.embedding_std[name][index].weight))
(weighted_loss + kl/features["label"].shape[0]).backward()
self.log_alpha.grad = self.weights.grad
return inferences, weighted_loss, 0
def arch_parameters(self):
return self._arch_parameters
def genotype(self):
if not self.args.multi_operation:
genotype = PRIMITIVES_BINARY[self.log_alpha.argmax().cpu().numpy()]
genotype_p = F.softmax(self.log_alpha, dim=-1)
else:
genotype = []
for index in self.log_alpha.argmax(axis=1).cpu().numpy():
genotype.append(PRIMITIVES_BINARY[index])
genotype = ":".join(genotype[:10])
genotype_p = F.softmax(self.log_alpha, dim=-1)[:10]
return genotype, genotype_p.cpu().detach()
class MULTIPLY_v(Virtue):
def __init__(self, embedding_dim, reg, args):
super(MULTIPLY_v, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self.FC = nn.ModuleDict({})
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
if primitive == 'concat':
temp.append(nn.Linear(2*embedding_dim, 2, bias=False))
else:
temp.append(nn.Linear(embedding_dim, 2, bias=False))
self.FC[name1 + ":" + name2] = temp
self.args = args
self._initialize_alphas()
#initialize contextual infos
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.log_alpha = torch.Tensor([0, 1, 0, 0, 0.0])
self.weights = Variable(torch.Tensor([0.0, 1.0, 0.0, 0.0, 0.0]))
self.rand_array = torch.randn(3000000)
def reparameterize(self, mu, std):
std = torch.log(1 + torch.exp(std))
v = self.rand_array[:std.numel()].reshape(std.shape)
return (mu + std * v * 0.01)
def KL_distance(self, mean1, mean2, std1, std2):
a = torch.log(std2 / std1) + (std1 * std1 + (mean1 - mean2) * (mean1 - mean2)) / 2 / std2 / std2 - 1.0 / 2.0
return torch.sum(a)
def recommend(self, features):
self.eval()
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding_mean = self.embedding_mean[name1][max_index](features[name1])
name1_embedding_std = self.embedding_std[name1][max_index](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2][max_index](features[name2])
name2_embedding_std = self.embedding_std[name2][max_index](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
else:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
name1_embedding_trans = name1_embedding#self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = name2_embedding#self.mlp_p(name2_embedding.view(-1, 1)).view(name2_embedding.size())
inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1,), self.FC[name1 + ":" + name2])
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def step(self, optimizer, arch_optimizer, epoch):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
if epoch < self.args.search_epoch:
train_epoch = (epoch+1)*5
else:
train_epoch = 1
for k in range(train_epoch):
for step, features in enumerate(train_bandit):
optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features)
losses.append(error_loss.cpu().detach().item())
optimizer.step()
return np.mean(losses)
def forward(self, features):
regs = 0
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
from joblib import Parallel, delayed
names_all = []
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding_mean = self.embedding_mean[name1][max_index](features[name1])
name1_embedding_std = self.embedding_std[name1][max_index](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2][max_index](features[name2])
name2_embedding_std = self.embedding_std[name2][max_index](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
else:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
names_all.append(
[name1_embedding, name2_embedding, cur_weights.view(-1, ), self.FC[name1 + ":" + name2]])
res = Parallel(n_jobs=8, backend="threading")(
delayed(MixedBinary)(para1, para2, para3, para4) for para1, para2, para3, para4 in names_all)
inferences = sum(res)
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
kl = 0
for name in self.columns:
if not self.args.ofm:
kl += self.KL_distance(self.embedding_mean[name].weight,
0 * torch.ones_like(self.embedding_mean[name].weight),
torch.log(1 + torch.exp(self.embedding_std[name].weight)),
0.1 * torch.ones_like(self.embedding_std[name].weight))
else:
kl += self.KL_distance(self.embedding_mean[name].weight,
0 * torch.ones_like(self.embedding_mean[name].weight),
torch.log(1 + torch.exp(self.embedding_std[name].weight)),
0.1 * torch.ones_like(self.embedding_std[name].weight))
(weighted_loss + kl/features["label"].shape[0]).backward()
return inferences, weighted_loss, 0
def genotype(self):
return "FM", 0
class MAX_v(MULTIPLY_v):
def __init__(self, embedding_dim, reg, args):
super(MAX_v, self).__init__(embedding_dim, reg, args)
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.log_alpha = torch.Tensor([0, 0, 1, 0, 0])
self.weights = Variable(torch.Tensor([0.0, 0.0, 1.0, 0.0, 0.0]))
self.rand_array = torch.randn(3000000)
def genotype(self):
return "MAX", 0
class PLUS_v(MULTIPLY_v):
def __init__(self, embedding_dim, reg, args):
super(PLUS_v, self).__init__(embedding_dim, reg, args)
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.log_alpha = torch.Tensor([1, 0, 0, 0, 0.0])
self.weights = Variable(torch.Tensor([1.0, 0.0, 0.0, 0.0, 0.0]))
self.rand_array = torch.randn(3000000)
def genotype(self):
return "PLUS", 0
class MIN_v(MULTIPLY_v):
def __init__(self, embedding_dim, reg, args):
super(MIN_v, self).__init__(embedding_dim, reg, args)
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.log_alpha = torch.Tensor([0, 0, 0, 1, 0.0])
self.weights = Variable(torch.Tensor([0.0, 0.0, 0.0, 1.0, 0.0]))
self.rand_array = torch.randn(3000000)
def genotype(self):
return "MIN", 0
class CONCAT_v(MULTIPLY_v):
def __init__(self, embedding_dim, reg, args):
super(CONCAT_v, self).__init__(embedding_dim, reg, args)
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.log_alpha = torch.Tensor([0, 0, 0, 0, 1.0])
self.weights = Variable(torch.Tensor([0.0, 0.0, 0.0, 0.0, 1.0]))
self.rand_array = torch.randn(3000000)
def genotype(self):
return "CONCAT", 0
| 50,650 | 49.905528 | 176 | py |
AutoCO | AutoCO-main/exp_public/adult/simulate/utils.py | import numpy as np
import pandas as pd
import os
import os.path
import sys
import shutil
import torch
import torch.nn as nn
import torch.utils
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction import DictVectorizer
from sklearn.utils import shuffle
from torch.utils.data import Dataset, DataLoader
from models import PRIMITIVES_BINARY
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.makedirs(path)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
def sample_arch():
arch = {}
arch['mlp'] = {}
arch['mlp']['p'] = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
arch['mlp']['q'] = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
arch['binary'] = PRIMITIVES_BINARY[np.random.randint(len(PRIMITIVES_BINARY))]
return arch
class Adult(Dataset):
def __init__(self, root_dir, dummpy):
self.data = pd.read_csv(root_dir)
self.dummpy = dummpy
def __len__(self):
return self.data.shape[0]
def one_hot(self, df, cols):
res = []
for col in cols:
dummies = pd.get_dummies(df[col], prefix=col, drop_first=False)
res.append(dummies)
df = pd.concat(res, axis=1)
return df
def __getitem__(self, index):
sample = {}
if not self.dummpy:
sample["workclass"] = self.data.iloc[index]["workclass"]
sample["education"] = self.data.iloc[index]["education"]
sample["marital-status"] = self.data.iloc[index]["marital-status"]
sample["occupation"] = self.data.iloc[index]["occupation"]
sample["relationship"] = self.data.iloc[index]["relationship"]
sample["race"] = self.data.iloc[index]["race"]
sample["sex"] = self.data.iloc[index]["sex"]
sample["native-country"] = self.data.iloc[index]["native-country"]
eat_reward = self.data.iloc[index]["eat_reward"]
noteat_reward = self.data.iloc[index]["noteat_reward"]
sample["label"] = torch.Tensor([eat_reward, noteat_reward])
else:
cols = ["workclass", "education", "marital-status", "occupation", "relationship", "race", "sex", "native-country"]
data2 = self.one_hot(self.data, cols)
sample["feature"] = torch.Tensor(data2.iloc[index][:])
eat_reward = self.data.iloc[index]["eat_reward"]
noteat_reward = self.data.iloc[index]["noteat_reward"]
sample["label"] = torch.Tensor([eat_reward, noteat_reward])
return sample
def get_data_queue(args, dummpy):
print(args.dataset)
if args.dataset == 'Adult':
train_data = "../data/data.csv"
train_dataset = Adult(train_data, dummpy)
train_queue = DataLoader(train_dataset, batch_size=args.batch_size, pin_memory=True)
return train_queue
else:
return None
class Adult2(Dataset):
def __init__(self, contexts, pos_weights):
self.data = contexts
self.pos_weights = pos_weights
def __len__(self):
return self.data["label"].shape[0]
def __getitem__(self, index):
sample = {}
sample["workclass"] = self.data["workclass"][index]
sample["education"] = self.data["education"][index]
sample["marital-status"] = self.data["marital-status"][index]
sample["occupation"] = self.data["occupation"][index]
sample["relationship"] = self.data["relationship"][index]
sample["race"] = self.data["race"][index]
sample["sex"] = self.data["sex"][index]
sample["native-country"] = self.data["native-country"][index]
sample["label"] = self.data["label"][index]
sample["pos_weights"] = self.pos_weights[index]
return sample
def get_data_queue_bandit(args, contexts, pos_weights):
train_dataset = Adult2(contexts, pos_weights)
train_queue = DataLoader(train_dataset, batch_size=args.batch_size,pin_memory=True)
return train_queue | 4,315 | 36.206897 | 126 | py |
AutoCO | AutoCO-main/exp_public/adult/simulate/models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
import utils
import time
PRIMITIVES_BINARY = ['plus', 'multiply', 'max', 'min', 'concat']
PRIMITIVES_NAS = [0, 2, 4, 8, 16]
SPACE_NAS = pow(len(PRIMITIVES_NAS), 5)
OPS = {
'plus': lambda p, q: p + q,
'multiply': lambda p, q: p * q,
'max': lambda p, q: torch.max(torch.stack((p, q)), dim=0)[0],
'min': lambda p, q: torch.min(torch.stack((p, q)), dim=0)[0],
'concat': lambda p, q: torch.cat([p, q], dim=-1),
'norm_0': lambda p: torch.ones_like(p),
'norm_0.5': lambda p: torch.sqrt(torch.abs(p) + 1e-7),
'norm_1': lambda p: torch.abs(p),
'norm_2': lambda p: p ** 2,
'I': lambda p: torch.ones_like(p),
'-I': lambda p: -torch.ones_like(p),
'sign': lambda p: torch.sign(p),
}
def constrain(p):
c = torch.norm(p, p=2, dim=1, keepdim=True)
c[c < 1] = 1.0
p.data.div_(c)
def MixedBinary(embedding_p, embedding_q, weights, FC):
return torch.sum(torch.stack([w * fc(OPS[primitive](embedding_p, embedding_q)) \
for w,primitive,fc in zip(weights,PRIMITIVES_BINARY,FC)]), 0)
class Virtue(nn.Module):
def __init__(self, embedding_dim, reg, embedding_num=12, ofm=False):
super(Virtue, self).__init__()
self.embedding_dim = embedding_dim
self.reg = reg
self.embedding_all = nn.ModuleDict({})
self.columns = ["cap-shape", "cap-surface", "cap-color", "bruises", "odor", "gill-attachment",
"gill-spacing", "gill-size", "gill-color", "stalk-shape", "stalk-root", "stalk-surface-above-ring",
"stalk-surface-below-ring", "stalk-color-above-ring", "stalk-color-below-ring", "veil-type",
"veil-color", "ring-number", "ring-type", "spore-print-color", "population", "habitat"]
if not ofm:
for name in self.columns:
self.embedding_all[name] = nn.Embedding(embedding_num, embedding_dim)
else:
for name in self.columns:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
temp.append(nn.Embedding(embedding_num, embedding_dim))
self.embedding_all[name] = temp
def compute_loss(self, inferences, labels, regs):
labels = torch.reshape(labels, [-1,1])
loss = F.binary_cross_entropy_with_logits(inferences, labels.float())
#loss = F.mse_loss(inferences, labels)
return loss + regs
class Network(Virtue):
def __init__(self, embedding_dim, arch, reg):
super(Network, self).__init__(embedding_dim, reg)
self.arch = arch
self.mlp_p = arch['mlp']['p']
self.mlp_q = arch['mlp']['q']
self.FC = nn.ModuleDict({})
for name1 in self.columns:
for name2 in self.columns:
if arch['binary'] == 'concat':
self.FC[name1+ ":" + name2] = nn.Linear(2*embedding_dim, 1, bias=False)
else:
self.FC[name1 + ":" + name2] = nn.Linear(embedding_dim, 1, bias=False)
def forward(self, features):
for value in self.FC.values():
constrain(next(value.parameters()))
inferences = 0
regs = 0
for name1 in self.columns:
for name2 in self.columns:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
regs += self.reg * (torch.norm(name1_embedding) + torch.norm(name2_embedding))
name1_embedding_trans = self.mlp_p(name1_embedding.view(-1,1)).view(name1_embedding.size())
name2_embedding_trans = self.mlp_p(name2_embedding.view(-1, 1)).view(name2_embedding.size())
inferences += self.FC[name1 + ":" + name2](OPS[self.arch['binary']](name1_embedding_trans, name2_embedding_trans))
return inferences, regs
class Network_Search(Virtue):
def __init__(self, embedding_dim, reg):
super(Network_Search, self).__init__(embedding_dim, reg)
self.FC = nn.ModuleDict({})
for name1 in self.columns:
for name2 in self.columns:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
if primitive == 'concat':
temp.append(nn.Linear(2*embedding_dim, 2, bias=False))
else:
temp.append(nn.Linear(embedding_dim, 2, bias=False))
self.FC[name1 + ":" + name2] = temp
self._initialize_alphas()
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self._arch_parameters = {}
self._arch_parameters['mlp'] = {}
self._arch_parameters['mlp']['p'] = self.mlp_p
self._arch_parameters['mlp']['q'] = self.mlp_q
self._arch_parameters['binary'] = Variable(torch.ones(len(PRIMITIVES_BINARY),
dtype=torch.float, device='cpu') / 2, requires_grad=True)
#self._arch_parameters['binary'] = Variable(torch.Tensor([1.0,1.0,1.0,1.0,1.0]), requires_grad=True)
self._arch_parameters['binary'].data.add_(
torch.randn_like(self._arch_parameters['binary'])*1e-3)
def arch_parameters(self):
return list(self._arch_parameters['mlp']['p'].parameters()) + \
list(self._arch_parameters['mlp']['q'].parameters()) + [self._arch_parameters['binary']]
def new(self):
model_new = Network_Search(self.num_users, self.num_items, self.embedding_dim, self.reg)
for x, y in zip(model_new.arch_parameters(), self.arch_parameters()):
x.data = y.data.clone()
return model_new
def clip(self):
m = nn.Hardtanh(0, 1)
self._arch_parameters['binary'].data = m(self._arch_parameters['binary'])
def binarize(self):
self._cache = self._arch_parameters['binary'].clone()
max_index = self._arch_parameters['binary'].argmax().item()
for i in range(self._arch_parameters['binary'].size(0)):
if i == max_index:
self._arch_parameters['binary'].data[i] = 1.0
else:
self._arch_parameters['binary'].data[i] = 0.0
def recover(self):
self._arch_parameters['binary'].data = self._cache
del self._cache
def forward(self, features):
# for i in range(len(PRIMITIVES_BINARY)):
# constrain(next(self._FC[i].parameters()))
inferences = 0
regs = 0
for name1 in self.columns:
for name2 in self.columns:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
regs += self.reg * (torch.norm(name1_embedding) + torch.norm(name2_embedding))
name1_embedding_trans = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = self.mlp_p(name2_embedding.view(-1, 1)).view(name2_embedding.size())
inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, self._arch_parameters['binary'], self.FC[name1 + ":" + name2])
return inferences, regs
def genotype(self):
genotype = PRIMITIVES_BINARY[self._arch_parameters['binary'].argmax().cpu().numpy()]
genotype_p = F.softmax(self._arch_parameters['binary'], dim=-1)
return genotype, genotype_p.cpu().detach()
def step(self, features, features_valid, lr, arch_optimizer, unrolled):
self.zero_grad()
arch_optimizer.zero_grad()
# binarize before forward propagation
self.binarize()
loss = self._backward_step(features_valid)
# restore weight before updating
self.recover()
arch_optimizer.step()
return loss
def _backward_step(self, features_valid):
inferences, regs = self(features_valid)
loss = self.compute_loss(inferences, features_valid["label"], regs)
loss.backward()
return loss
class DSNAS(Virtue):
def __init__(self, embedding_dim, reg, args):
super(DSNAS, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self.FC = nn.ModuleDict({})
for name1 in self.columns:
for name2 in self.columns:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
if primitive == 'concat':
temp.append(nn.Linear(2*embedding_dim, 2, bias=False))
else:
temp.append(nn.Linear(embedding_dim, 2, bias=False))
self.FC[name1 + ":" + name2] = temp
self.args = args
self._initialize_alphas()
#initialize contextual infos
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
if self.args.multi_operation:
num_op = len(self.columns)
self.log_alpha = torch.nn.Parameter(torch.zeros((num_op*num_op, len(PRIMITIVES_BINARY))).normal_(self.args.loc_mean, self.args.loc_std).requires_grad_())
else:
self.log_alpha = torch.nn.Parameter(torch.zeros((1, len(PRIMITIVES_BINARY))).normal_(self.args.loc_mean, self.args.loc_std).requires_grad_())
self._arch_parameters = [self.log_alpha]
self.weights = Variable(torch.zeros_like(self.log_alpha))
if self.args.early_fix_arch:
self.fix_arch_index = {}
def recommend(self, features):
self.eval()
self.weights = torch.zeros_like(self.log_alpha).scatter_(1, torch.argmax(self.log_alpha, dim=-1).view(-1, 1), 1)
if self.args.early_fix_arch:
if len(self.fix_arch_index.keys()) > 0:
for key, value_lst in self.fix_arch_index.items():
self.weights[key, :].zero_()
self.weights[key, value_lst[0]] = 1
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
for name1 in self.columns:
for name2 in self.columns:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding = self.embedding_all[name1][max_index](features[name1])
name2_embedding = self.embedding_all[name2][max_index](features[name2])
else:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
name1_embedding_trans = name1_embedding#self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = name2_embedding#self.mlp_p(name2_embedding.view(-1, 1)).view(name2_embedding.size())
inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1,), self.FC[name1 + ":" + name2])
#ipdb.set_trace()
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
simulate_index = []
for i in range(len(max_index)):
if np.random.random() < self.args.epsion:
simulate_index.append(np.random.randint(0, 2))
else:
simulate_index.append(max_index[i])
a_ind = np.array([(i, val) for i, val in enumerate(simulate_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def step(self, optimizer, arch_optimizer):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
cnt = 0
time_data = 0
time_forward = 0
time_update = 0
end = -1
for step, features in enumerate(train_bandit):
if end!=-1:
time_data += time.time() - end
begin = time.time()
optimizer.zero_grad()
arch_optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features)
time_forward += time.time() - begin
losses.append(error_loss.cpu().detach().item())
begin2 = time.time()
optimizer.step()
arch_optimizer.step()
time_update += time.time() - begin2
cnt += 1
end = time.time()
print("time_data: ", time_data)
print("time_forward: ", time_forward)
print("time_update: ", time_update)
print("cnt: ", cnt)
return np.mean(losses)
def revised_arch_index(self):
if self.args.early_fix_arch:
sort_log_alpha = torch.topk(F.softmax(self.log_alpha.data, dim=-1), 2)
argmax_index = (sort_log_alpha[0][:, 0] - sort_log_alpha[0][:, 1] >= 0.01)
for id in range(argmax_index.size(0)):
if argmax_index[id] == 1 and id not in self.fix_arch_index.keys():
self.fix_arch_index[id] = [sort_log_alpha[1][id, 0].item(),
self.log_alpha.detach().clone()[id, :]]
for key, value_lst in self.fix_arch_index.items():
self.log_alpha.data[key, :] = value_lst[1]
def forward(self, features):
regs = 0
self.weights = self._get_weights(self.log_alpha)
self.revised_arch_index()
if self.args.early_fix_arch:
if len(self.fix_arch_index.keys()) > 0:
for key, value_lst in self.fix_arch_index.items():
self.weights[key, :].zero_()
self.weights[key, value_lst[0]] = 1
cate_prob = F.softmax(self.log_alpha, dim=-1)
self.cate_prob = cate_prob.clone().detach()
loss_alpha = torch.log(
(self.weights * F.softmax(self.log_alpha, dim=-1)).sum(-1)).sum()
self.weights.requires_grad_()
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
from sklearn.externals.joblib import Parallel, delayed
names_all = []
for name1 in self.columns:
for name2 in self.columns:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding = self.embedding_all[name1][max_index](features[name1])
name2_embedding = self.embedding_all[name2][max_index](features[name2])
else:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
names_all.append([name1_embedding, name2_embedding, cur_weights.view(-1,), self.FC[name1 + ":" + name2]])
res = Parallel(n_jobs=8, backend="threading")(delayed(MixedBinary)(para1, para2, para3, para4) for para1,para2,para3,para4 in names_all)
inferences = sum(res)
# for name1 in self.columns:
# for name2 in self.columns:
# if self.args.multi_operation:
# cur_weights = self.weights[cur_index]
# max_index = cur_weights.argmax().item()
# cur_index += 1
# if self.args.ofm:
# name1_embedding = self.embedding_all[name1][max_index](features[name1])
# name2_embedding = self.embedding_all[name2][max_index](features[name2])
# else:
# name1_embedding = self.embedding_all[name1](features[name1])
# name2_embedding = self.embedding_all[name2](features[name2])
# regs += self.reg * (torch.norm(name1_embedding) + torch.norm(name2_embedding))
# name1_embedding_trans = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
# name2_embedding_trans = self.mlp_p(name2_embedding.view(-1, 1)).view(name2_embedding.size())
# inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1,), self.FC[name1 + ":" + name2])
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
self.weights.grad = torch.zeros_like(self.weights)
(weighted_loss + loss_alpha).backward()
self.block_reward = self.weights.grad.data.sum(-1)
self.log_alpha.grad.data.mul_(self.block_reward.view(-1, 1))
return inferences, weighted_loss, loss_alpha
def _get_weights(self, log_alpha):
if self.args.random_sample:
uni = torch.ones_like(log_alpha)
m = torch.distributions.one_hot_categorical.OneHotCategorical(uni)
else:
m = torch.distributions.one_hot_categorical.OneHotCategorical(probs=F.softmax(log_alpha, dim=-1))
return m.sample()
def arch_parameters(self):
return self._arch_parameters
def genotype(self):
if not self.args.multi_operation:
genotype = PRIMITIVES_BINARY[self.log_alpha.argmax().cpu().numpy()]
genotype_p = F.softmax(self.log_alpha, dim=-1)
else:
genotype = []
for index in self.log_alpha.argmax(axis=1).cpu().numpy():
genotype.append(PRIMITIVES_BINARY[index])
genotype = ":".join(genotype[:10])
genotype_p = F.softmax(self.log_alpha, dim=-1)[:10]
return genotype, genotype_p.cpu().detach()
class Uniform:
def __init__(self, embedding_dim, reg, args):
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
def recommend(self, features):
pos_weights = torch.zeros_like(features["label"])
max_index = np.random.randint(0, 2, features["label"].shape[0])
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
return reward
def step(self, optimizer, arch_optimizer):
return 0
def genotype(self):
return "uniform", 0
class Egreedy:
def __init__(self, embedding_dim, reg, args):
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
self.epsion = 0.2
self.action_rewards = {0:[0,1.0], 1:[0,1.0]}#total reward, action_num
self.max_action = 0
def recommend(self, features):
max_reward = np.float("-inf")
for key in self.action_rewards:
if self.action_rewards[key][0]/self.action_rewards[key][1] > max_reward:
max_reward = self.action_rewards[key][0]/self.action_rewards[key][1]
self.max_action = key
pos_weights = torch.zeros_like(features["label"])
max_index = np.random.randint(0, 2, features["label"].shape[0])
simulate_index = []
for i in range(len(max_index)):
if np.random.random()<self.epsion:
simulate_index.append(max_index[i])
else:
simulate_index.append(self.max_action)
a_ind = np.array([(i, val) for i, val in enumerate(simulate_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
action_rewards = torch.sum(torch.mul(features["label"], pos_weights), dim=0)
action_nums = torch.sum(pos_weights, dim=0)
for key in self.action_rewards:
temp = self.action_rewards[key]
temp[0] += action_rewards[key].cpu().detach().item()
temp[1] += action_nums[key].cpu().detach().item()
self.action_rewards[key] = temp
return reward
def step(self, optimizer, arch_optimizer):
return 0
def genotype(self):
return "uniform", 0
class FM(Virtue):
def __init__(self, embedding_dim, reg, args):
super(FM, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self.FC = nn.ModuleDict({})
for name1 in self.columns:
for name2 in self.columns:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
if primitive == 'concat':
temp.append(nn.Linear(2*embedding_dim, 2, bias=False))
else:
temp.append(nn.Linear(embedding_dim, 2, bias=False))
self.FC[name1 + ":" + name2] = temp
self.args = args
self._initialize_alphas()
#initialize contextual infos
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.log_alpha = torch.Tensor([1, 1, 1, 1, 1.0])
self.weights = Variable(torch.Tensor([0.0, 1.0, 0.0, 0.0, 0.0]))
def recommend(self, features):
self.eval()
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
for name1 in self.columns:
for name2 in self.columns:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding = self.embedding_all[name1][max_index](features[name1])
name2_embedding = self.embedding_all[name2][max_index](features[name2])
else:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
name1_embedding_trans = name1_embedding#self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = name2_embedding#self.mlp_p(name2_embedding.view(-1, 1)).view(name2_embedding.size())
inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1,), self.FC[name1 + ":" + name2])
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def step(self, optimizer, arch_optimizer):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
cnt = 0
for step, features in enumerate(train_bandit):
optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features)
losses.append(error_loss.cpu().detach().item())
optimizer.step()
cnt += 1
print("cnt: ", cnt)
return np.mean(losses)
def forward(self, features):
regs = 0
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
for name1 in self.columns:
for name2 in self.columns:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding = self.embedding_all[name1][max_index](features[name1])
name2_embedding = self.embedding_all[name2][max_index](features[name2])
else:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
regs += self.reg * (torch.norm(name1_embedding) + torch.norm(name2_embedding))
name1_embedding_trans = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = self.mlp_p(name2_embedding.view(-1, 1)).view(name2_embedding.size())
inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1,), self.FC[name1 + ":" + name2])
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
weighted_loss.backward()
return inferences, weighted_loss, 0
def genotype(self):
return "FM", 0
class Plus(FM):
def __init__(self, embedding_dim, reg, args):
super(Plus, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self._initialize_alphas()
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.weights = Variable(torch.Tensor([1.0, 0.0, 0.0, 0.0, 0.0]))
def genotype(self):
return "Plus", 0
class Max(FM):
def __init__(self, embedding_dim, reg, args):
super(Max, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self._initialize_alphas()
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.weights = Variable(torch.Tensor([0.0, 0.0, 1.0, 0.0, 0.0]))
def genotype(self):
return "Max", 0
class Min(FM):
def __init__(self, embedding_dim, reg, args):
super(Min, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self._initialize_alphas()
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.weights = Variable(torch.Tensor([0.0, 0.0, 0.0, 1.0, 0.0]))
def genotype(self):
return "Min", 0
class Concat(FM):
def __init__(self, embedding_dim, reg, args):
super(FM, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self._initialize_alphas()
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.weights = Variable(torch.Tensor([0.0, 0.0, 0.0, 0.0, 1.0]))
def genotype(self):
return "Concat", 0 | 29,058 | 42.962179 | 165 | py |
AutoCO | AutoCO-main/exp_public/adult/simulate/baseline.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
import utils
from collections import Counter
from torch.distributions.multivariate_normal import MultivariateNormal
PRIMITIVES_BINARY = ['plus', 'multiply', 'max', 'min', 'concat']
PRIMITIVES_NAS = [0, 2, 4, 8, 16]
SPACE_NAS = pow(len(PRIMITIVES_NAS), 5)
OPS = {
'plus': lambda p, q: p + q,
'multiply': lambda p, q: p * q,
'max': lambda p, q: torch.max(torch.stack((p, q)), dim=0)[0],
'min': lambda p, q: torch.min(torch.stack((p, q)), dim=0)[0],
'concat': lambda p, q: torch.cat([p, q], dim=-1),
'norm_0': lambda p: torch.ones_like(p),
'norm_0.5': lambda p: torch.sqrt(torch.abs(p) + 1e-7),
'norm_1': lambda p: torch.abs(p),
'norm_2': lambda p: p ** 2,
'I': lambda p: torch.ones_like(p),
'-I': lambda p: -torch.ones_like(p),
'sign': lambda p: torch.sign(p),
}
class Virtue_v(nn.Module):
def __init__(self, embedding_dim, reg, embedding_num=12, ofm=False, first_order=False):
super(Virtue_v, self).__init__()
self.embedding_dim = embedding_dim
self.reg = reg
self.embedding_mean = nn.ModuleDict({})
self.embedding_std = nn.ModuleDict({})
if first_order:
self.embedding_first_order = nn.ModuleDict({})
self.columns = ["workclass", "education", "marital-status", "occupation", "relationship", "race", "sex", "native-country"]
for name in self.columns:
self.embedding_mean[name] = nn.Embedding(embedding_num, embedding_dim)
self.embedding_std[name] = nn.Embedding(embedding_num, embedding_dim)
if first_order:
self.embedding_first_order[name] = nn.Embedding(embedding_num, 1)
self.embedding_action = nn.Embedding(2, embedding_dim)
if first_order:
self.embedding_action_first_order = nn.Embedding(2, 1)
class Virtue(nn.Module):
def __init__(self, embedding_dim, reg, embedding_num=12, ofm=False, first_order=False):
super(Virtue, self).__init__()
self.embedding_dim = embedding_dim
self.reg = reg
self.embedding_all = nn.ModuleDict({})
if first_order:
self.embedding_first_order = nn.ModuleDict({})
self.columns = ["workclass", "education", "marital-status", "occupation", "relationship", "race", "sex", "native-country"]
for name in self.columns:
self.embedding_all[name] = nn.Embedding(embedding_num, embedding_dim)
if first_order:
self.embedding_first_order[name] = nn.Embedding(embedding_num, 1)
self.embedding_action = nn.Embedding(2, embedding_dim)
if first_order:
self.embedding_action_first_order = nn.Embedding(2, 1)
class FM_v(Virtue_v):
"""
FM with EE
"""
def __init__(self, embedding_dim, reg, args):
super(FM_v, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num, first_order=args.first_order)
self.args = args
self._initialize_alphas()
#initialize contextual infos
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.log_alpha = torch.Tensor([1, 1, 1, 1, 1.0])
self.weights = Variable(torch.Tensor([0.0, 1.0, 0.0, 0.0, 0.0]))
self.rand_array = torch.randn(3000000)
def reparameterize(self, mu, std):
std = torch.log(1 + torch.exp(std))
v = self.rand_array[:std.numel()].reshape(std.shape)
return (mu + std * v * 0.01)
def KL_distance(self, mean1, mean2, std1, std2):
a = torch.log(std2 / std1) + (std1 * std1 + (mean1 - mean2) * (mean1 - mean2)) / 2 / std2 / std2 - 1.0 / 2.0
return torch.sum(a)
def recommend(self, features):
self.eval()
inferences = 0
inferences_0 = 0
inferences_1 = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
#inferences_0 += torch.sum(name1_embedding*name2_embedding, dim=1, keepdim=True)
#inferences_1 += torch.sum(name1_embedding*name2_embedding, dim=1, keepdim=True)
inferences += torch.sum(name1_embedding*name2_embedding, dim=1, keepdim=True)
inferences_0 = 0 # inferences.clone() # action 0
inferences_1 = 0 # inferences.clone() # action_1
#features with action
for name1 in self.columns:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
inferences_0 += torch.sum(name1_embedding * self.embedding_action(torch.zeros_like(features[name1]).long()), dim=1, keepdim=True)
inferences_1 += torch.sum(name1_embedding * self.embedding_action(torch.ones_like(features[name1]).long()), dim=1, keepdim=True)
if self.args.first_order:
name1_embedding_first_order = self.embedding_first_order[name1](features[name1])
inferences_0 += name1_embedding_first_order
inferences_1 += name1_embedding_first_order
if self.args.first_order:
inferences_0 += self.embedding_action_first_order(torch.zeros_like(features[name1]).long())
inferences_1 += self.embedding_action_first_order(torch.ones_like(features[name1]).long())
inferences = torch.cat([inferences_0, inferences_1], dim=1)
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def step(self, optimizer, arch_optimizer, epoch):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
if epoch < self.args.search_epoch:
train_epoch = (epoch+1)*5
else:
train_epoch = 1
for k in range(train_epoch):
for step, features in enumerate(train_bandit):
optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features)
losses.append(error_loss.cpu().detach().item())
optimizer.step()
print(self.embedding_mean["workclass"](torch.LongTensor([[1]])))
return np.mean(losses)
def forward(self, features):
regs = 0
inferences = 0
inferences_0 = 0
inferences_1 = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
#inferences_0 += torch.sum(name1_embedding * name2_embedding, dim=1, keepdim=True)
#inferences_1 += torch.sum(name1_embedding * name2_embedding, dim=1, keepdim=True)
inferences += torch.sum(name1_embedding * name2_embedding, dim=1, keepdim=True)
# features with action
inferences_0 = 0 # inferences.clone() # action 0
inferences_1 = 0 # inferences.clone() # action_1
for name1 in self.columns:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
inferences_0 += torch.sum(name1_embedding * self.embedding_action(torch.zeros_like(features[name1]).long()),
dim=1, keepdim=True)
inferences_1 += torch.sum(name1_embedding * self.embedding_action(torch.ones_like(features[name1]).long()),
dim=1, keepdim=True)
if self.args.first_order:
name1_embedding_first_order = self.embedding_first_order[name1](features[name1])
inferences_0 += name1_embedding_first_order
inferences_1 += name1_embedding_first_order
if self.args.first_order:
inferences_0 += self.embedding_action_first_order(torch.zeros_like(features[name1]).long())
inferences_1 += self.embedding_action_first_order(torch.ones_like(features[name1]).long())
inferences = torch.cat([inferences_0, inferences_1], dim=1)
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
kl = 0
for name in self.columns:
kl += self.KL_distance(self.embedding_mean[name].weight,
0 * torch.ones_like(self.embedding_mean[name].weight),
torch.log(1 + torch.exp(self.embedding_std[name].weight)),
0.1 * torch.ones_like(self.embedding_std[name].weight))
(weighted_loss + kl/features["label"].shape[0]).backward()
return inferences, weighted_loss, 0
def genotype(self):
return "FM_v", 0
class FM(Virtue):
"""
FM without EE
"""
def __init__(self, embedding_dim, reg, args):
super(FM, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num, first_order=args.first_order)
self.args = args
self._initialize_alphas()
#initialize contextual infos
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.log_alpha = torch.Tensor([1, 1, 1, 1, 1.0])
self.weights = Variable(torch.Tensor([0.0, 1.0, 0.0, 0.0, 0.0]))
self.rand_array = torch.randn(3000000)
def recommend(self, features):
self.eval()
inferences = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
inferences += torch.sum(name1_embedding*name2_embedding, dim=1, keepdim=True)
inferences_0 = 0 #inferences.clone() # action 0
inferences_1 = 0 #inferences.clone() # action_1
#features with action
for name1 in self.columns:
name1_embedding = self.embedding_all[name1](features[name1])
inferences_0 += torch.sum(name1_embedding * self.embedding_action(torch.zeros_like(features[name1]).long()), dim=1, keepdim=True)
inferences_1 += torch.sum(name1_embedding * self.embedding_action(torch.ones_like(features[name1]).long()), dim=1, keepdim=True)
if self.args.first_order:
name1_embedding_first_order = self.embedding_first_order[name1](features[name1])
inferences_0 += name1_embedding_first_order
inferences_1 += name1_embedding_first_order
if self.args.first_order:
inferences_0 += self.embedding_action_first_order(torch.zeros_like(features[name1]).long())
inferences_1 += self.embedding_action_first_order(torch.ones_like(features[name1]).long())
inferences = torch.cat([inferences_0, inferences_1], dim=1)
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def step(self, optimizer, arch_optimizer, epoch):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
if epoch < self.args.search_epoch:
train_epoch = (epoch+1)*5
else:
train_epoch = 1
for k in range(train_epoch):
for step, features in enumerate(train_bandit):
optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features)
losses.append(error_loss.cpu().detach().item())
optimizer.step()
print(self.embedding_all["workclass"](torch.LongTensor([[1]])))
return np.mean(losses)
def forward(self, features):
regs = 0
inferences = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
inferences += torch.sum(name1_embedding * name2_embedding, dim=1, keepdim=True)
# features with action
inferences_0 = 0 #inferences.clone() # action 0
inferences_1 = 0 #inferences.clone() # action_1
for name1 in self.columns:
name1_embedding = self.embedding_all[name1](features[name1])
inferences_0 += torch.sum(name1_embedding * self.embedding_action(torch.zeros_like(features[name1]).long()),
dim=1, keepdim=True)
inferences_1 += torch.sum(name1_embedding * self.embedding_action(torch.ones_like(features[name1]).long()),
dim=1, keepdim=True)
if self.args.first_order:
name1_embedding_first_order = self.embedding_first_order[name1](features[name1])
inferences_0 += name1_embedding_first_order
inferences_1 += name1_embedding_first_order
if self.args.first_order:
inferences_0 += self.embedding_action_first_order(torch.zeros_like(features[name1]).long())
inferences_1 += self.embedding_action_first_order(torch.ones_like(features[name1]).long())
inferences = torch.cat([inferences_0, inferences_1], dim=1)
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
weighted_loss.backward()
return inferences, weighted_loss, 0
def genotype(self):
return "FM", 0
class FM_v2(Virtue_v):
"""
FM with EE and FC layer
"""
def __init__(self, embedding_dim, reg, args):
super(FM_v2, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num, first_order=args.first_order)
self.args = args
self._initialize_alphas()
self.FC = nn.ModuleDict({})
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
self.FC[name1 + ":" + name2] = nn.Linear(embedding_dim, 1, bias=False)
#initialize contextual infos
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.log_alpha = torch.Tensor([1, 1, 1, 1, 1.0])
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.weights = Variable(torch.Tensor([0.0, 1.0, 0.0, 0.0, 0.0]))
self.rand_array = torch.randn(3000000)
def reparameterize(self, mu, std):
std = torch.log(1 + torch.exp(std))
v = self.rand_array[:std.numel()].reshape(std.shape)
return (mu + std * v * 0.01)
def KL_distance(self, mean1, mean2, std1, std2):
a = torch.log(std2 / std1) + (std1 * std1 + (mean1 - mean2) * (mean1 - mean2)) / 2 / std2 / std2 - 1.0 / 2.0
return torch.sum(a)
def recommend(self, features):
self.eval()
inferences = 0
inferences_0 = 0
inferences_1 = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
inferences += self.FC[name1 + ":" + name2](name1_embedding * name2_embedding)
inferences_0 = inferences.clone() # action 0
inferences_1 = inferences.clone() # action_1
#features with action
for name1 in self.columns:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
inferences_0 += torch.sum(name1_embedding * self.embedding_action(torch.zeros_like(features[name1]).long()), dim=1, keepdim=True)
inferences_1 += torch.sum(name1_embedding * self.embedding_action(torch.ones_like(features[name1]).long()), dim=1, keepdim=True)
if self.args.first_order:
name1_embedding_first_order = self.embedding_first_order[name1](features[name1])
inferences_0 += name1_embedding_first_order
inferences_1 += name1_embedding_first_order
if self.args.first_order:
inferences_0 += self.embedding_action_first_order(torch.zeros_like(features[name1]).long())
inferences_1 += self.embedding_action_first_order(torch.ones_like(features[name1]).long())
inferences = torch.cat([inferences_0, inferences_1], dim=1)
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def step(self, optimizer, arch_optimizer, step):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
cnt = 0
for step, features in enumerate(train_bandit):
cnt += 1
optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features)
losses.append(error_loss.cpu().detach().item())
optimizer.step()
print("cnt: ", cnt)
return np.mean(losses)
def forward(self, features):
regs = 0
inferences = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
inferences += self.FC[name1 + ":" + name2](name1_embedding * name2_embedding)
# features with action
inferences_0 = inferences.clone() # action 0
inferences_1 = inferences.clone() # action_1
for name1 in self.columns:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
inferences_0 += torch.sum(name1_embedding * self.embedding_action(torch.zeros_like(features[name1]).long()),
dim=1, keepdim=True)
inferences_1 += torch.sum(name1_embedding * self.embedding_action(torch.ones_like(features[name1]).long()),
dim=1, keepdim=True)
if self.args.first_order:
name1_embedding_first_order = self.embedding_first_order[name1](features[name1])
inferences_0 += name1_embedding_first_order
inferences_1 += name1_embedding_first_order
if self.args.first_order:
inferences_0 += self.embedding_action_first_order(torch.zeros_like(features[name1]).long())
inferences_1 += self.embedding_action_first_order(torch.ones_like(features[name1]).long())
inferences = torch.cat([inferences_0, inferences_1], dim=1)
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
kl = 0
for name in self.columns:
kl += self.KL_distance(self.embedding_mean[name].weight,
0 * torch.ones_like(self.embedding_mean[name].weight),
torch.log(1 + torch.exp(self.embedding_std[name].weight)),
0.1 * torch.ones_like(self.embedding_std[name].weight))
(weighted_loss + kl/features["label"].shape[0]).backward()
return inferences, weighted_loss, 0
def genotype(self):
return "FM_v2", 0
class Random:
def __init__(self, embedding_dim, reg, args):
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
def recommend(self, features):
pos_weights = torch.zeros_like(features["label"])
max_index = np.random.randint(0, 2, features["label"].shape[0])
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
return reward
def step(self, optimizer, arch_optimizer, epoch):
return 0
def genotype(self):
return "Random", 0
class Egreedy:
def __init__(self, embedding_dim, reg, args):
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
self.epsion = 0.2
self.action_rewards = {0:[0,1.0], 1:[0.1,1.0]}#total reward, action_num
self.max_action = 0
def recommend(self, features):
max_reward = np.float("-inf")
for key in self.action_rewards:
if self.action_rewards[key][0]/self.action_rewards[key][1] > max_reward:
max_reward = self.action_rewards[key][0]/self.action_rewards[key][1]
self.max_action = key
pos_weights = torch.zeros_like(features["label"])
max_index = np.random.randint(0, 2, features["label"].shape[0])
simulate_index = []
for i in range(len(max_index)):
if np.random.random()<self.epsion:
simulate_index.append(max_index[i])
else:
simulate_index.append(self.max_action)
a_ind = np.array([(i, val) for i, val in enumerate(simulate_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
action_rewards = torch.sum(torch.mul(features["label"], pos_weights), dim=0)
action_nums = torch.sum(pos_weights, dim=0)
for key in self.action_rewards:
temp = self.action_rewards[key]
temp[0] += action_rewards[key].cpu().detach().item()
temp[1] += action_nums[key].cpu().detach().item()
self.action_rewards[key] = temp
return reward
def step(self, optimizer, arch_optimizer, epoch):
return 0
def genotype(self):
return "Egreedy", 0
class Thompson:
def __init__(self, embedding_dim, reg, args):
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
self.epsion = 0.2
self.action_rewards = {0:[0,0], 1:[0,0]}#total reward, action_num
self.max_action = 0
def recommend(self, features):
#Thompson sampling
values = []
num = 2
N = 10000
for index in range(num):
pos = np.random.beta(1+int(self.action_rewards[index][0]), 2+int(self.action_rewards[index][1]), N)
values.append(pos)
action_pos = np.vstack(values)
action_num = Counter(action_pos.argmax(axis=0))
action_percentage = []
for index in range(num):
action_percentage.append(action_num[index]/N)
simulate_index = []
for i in range(features["label"].shape[0]):
simulate_index.append(np.random.choice(range(num), p=action_percentage))
pos_weights = torch.zeros_like(features["label"])
a_ind = np.array([(i, val) for i, val in enumerate(simulate_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
action_rewards = torch.sum(torch.mul(features["label"], pos_weights), dim=0)
action_nums = torch.sum(pos_weights, dim=0)
for key in self.action_rewards:
temp = self.action_rewards[key]
temp[0] += action_rewards[key].cpu().detach().item()
temp[1] += action_nums[key].cpu().detach().item()
self.action_rewards[key] = temp
return reward
def step(self, optimizer, arch_optimizer, epoch):
return 0
def genotype(self):
return "Thompson", 0
class LinUCB2:
def __init__(self, embedding_dim, reg, args):
self.Aa = torch.eye(104)
self.ba = torch.zeros(104).view(-1,1)
self.alpha = 0.05
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
def recommend(self, features):
action1_features = torch.zeros((features["label"].shape[0], 2))
action1_features[:, 0] = 1.0
action2_features = torch.zeros((features["label"].shape[0], 2))
action2_features[:, 1] = 1.0
action1_input = torch.cat([features["feature"], action1_features], dim=1)
action2_input = torch.cat([features["feature"], action2_features], dim=1)
inputs_all = [action1_input, action2_input]
theta = torch.matmul(torch.inverse(self.Aa), self.ba)
action1_score = torch.matmul(action1_input, theta) + self.alpha * torch.sqrt(
torch.sum(torch.mul(torch.matmul(action1_input, torch.inverse(self.Aa)), action1_input), dim=-1)).view(-1,1)
action2_score = torch.matmul(action2_input, theta) + self.alpha * torch.sqrt(
torch.sum(torch.mul(torch.matmul(action2_input, torch.inverse(self.Aa)), action2_input), dim=-1)).view(-1, 1)
score_all = torch.cat([action1_score, action2_score], dim=1)
max_index = score_all.argmax(dim=1)
print(Counter(max_index.numpy()))
pos_weights = torch.zeros_like(features["label"])
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
#update Aa and ba
for i in range(max_index.shape[0]):
cur_action = max_index[i].item()
cur_reward = features["label"][i, cur_action].item()
cur_feature = inputs_all[cur_action][i]
self.Aa += torch.matmul(cur_feature.view(-1,1), cur_feature.view(1,-1))
self.ba += cur_reward * cur_feature.view(-1,1)
return reward
def step(self, optimizer, arch_optimizer, epoch):
return 0
def genotype(self):
return "LinUCB2", 0
#
class LinUCB:
def __init__(self, embedding_dim, reg, args):
self.action_num = 2
self.feature_dim = 102
self.Aa = []
self.ba = []
for i in range(self.action_num):
self.Aa.append(torch.eye(self.feature_dim))
self.ba.append(torch.zeros(self.feature_dim).view(-1,1))
self.alpha = 1.0
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
def recommend(self, features):
score_all = []
for i in range(self.action_num):
Aa = self.Aa[i]
ba = self.ba[i]
theta = torch.matmul(torch.inverse(Aa), ba)
score = torch.matmul(features["feature"], theta) + self.alpha * torch.sqrt(
torch.sum(torch.mul(torch.matmul(features["feature"], torch.inverse(Aa)), features["feature"]), dim=-1)
).view(-1,1)
score_all.append(score)
score_all = torch.cat(score_all, dim=1)
max_index = score_all.argmax(dim=1)
print(Counter(max_index.numpy()))
pos_weights = torch.zeros_like(features["label"])
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
#update Aa and ba
for i in range(max_index.shape[0]):
cur_action = max_index[i].item()
cur_reward = features["label"][i, cur_action].item()
cur_feature = features["feature"][i]
Aa = self.Aa[cur_action]
ba = self.ba[cur_action]
Aa += torch.matmul(cur_feature.view(-1,1), cur_feature.view(1,-1))
ba += cur_reward * cur_feature.view(-1,1)
self.Aa[cur_action] = Aa
self.ba[cur_action] = ba
return reward
def step(self, optimizer, arch_optimizer, epoch):
return 0
def genotype(self):
return "LinUCB", 0
class LinThompson:
def __init__(self, embedding_dim, reg, args):
self.action_num = 2
self.feature_dim = 102
self.Aa = []
self.ba = []
for i in range(self.action_num):
self.Aa.append(torch.eye(self.feature_dim))
self.ba.append(torch.zeros(self.feature_dim).view(-1, 1))
self.alpha = 1.0
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
def recommend(self, features):
score_all = []
for i in range(self.action_num):
Aa = self.Aa[i]
ba = self.ba[i]
mu = torch.matmul(torch.inverse(Aa), ba)
variance = torch.inverse(Aa)
try:
theta = MultivariateNormal(loc=mu.view(-1), covariance_matrix=self.alpha * variance).sample().view(-1,1)
except:
print("Error here!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
theta = mu.view(-1,1)
score = torch.matmul(features["feature"], theta) + self.alpha * torch.sqrt(
torch.sum(torch.mul(torch.matmul(features["feature"], torch.inverse(Aa)), features["feature"]), dim=-1)
).view(-1, 1)
score_all.append(score)
score_all = torch.cat(score_all, dim=1)
max_index = score_all.argmax(dim=1)
print(Counter(max_index.numpy()))
pos_weights = torch.zeros_like(features["label"])
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
# update Aa and ba
for i in range(max_index.shape[0]):
cur_action = max_index[i].item()
cur_reward = features["label"][i, cur_action].item()
cur_feature = features["feature"][i]
Aa = self.Aa[cur_action]
ba = self.ba[cur_action]
Aa += torch.matmul(cur_feature.view(-1, 1), cur_feature.view(1, -1))
ba += cur_reward * cur_feature.view(-1, 1)
self.Aa[cur_action] = Aa
self.ba[cur_action] = ba
return reward
def step(self, optimizer, arch_optimizer, epoch):
return 0
def genotype(self):
return "LinThompson", 0
class LinEGreedy:
def __init__(self, embedding_dim, reg, args):
self.Aa = torch.eye(104)
self.ba = torch.zeros(104).view(-1,1)
self.log_alpha = torch.Tensor([1.0, 2.0, 3.0, 4.0])
self.epsion = 0.2
self.turn = True
def recommend(self, features):
action1_features = torch.zeros((features["label"].shape[0], 2))
action1_features[:, 0] = 1.0
action2_features = torch.zeros((features["label"].shape[0], 2))
action2_features[:, 1] = 1.0
action1_input = torch.cat([features["feature"], action1_features], dim=1)
action2_input = torch.cat([features["feature"], action2_features], dim=1)
inputs_all = [action1_input, action2_input]
theta = torch.matmul(torch.inverse(self.Aa), self.ba)
action1_score = torch.matmul(action1_input, theta)
action2_score = torch.matmul(action2_input, theta)
score_all = torch.cat([action1_score, action2_score], dim=1)
max_index = score_all.argmax(dim=1)
if self.turn:
simulate_index = []
for i in range(len(max_index)):
if np.random.random() < self.epsion:
simulate_index.append(max_index[i].item())
else:
simulate_index.append(np.random.randint(0, 2))
max_index = simulate_index
self.turn = False
print(Counter(max_index))
pos_weights = torch.zeros_like(features["label"])
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
#update Aa and ba
for i in range(len(max_index)):
cur_action = max_index[i]
cur_reward = features["label"][i, cur_action].item()
cur_feature = inputs_all[cur_action][i]
self.Aa += torch.matmul(cur_feature.view(-1,1), cur_feature.view(1,-1))
self.ba += cur_reward * cur_feature.view(-1,1)
return reward
def step(self, optimizer, arch_optimizer, epoch):
return 0
def genotype(self):
return "LinEGreedy", 0 | 37,292 | 45.909434 | 141 | py |
AutoCO | AutoCO-main/exp_public/adult/simulate/vartional_model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
import utils
import ipdb
PRIMITIVES_BINARY = ['plus', 'multiply', 'max', 'min', 'concat']
PRIMITIVES_NAS = [0, 2, 4, 8, 16]
SPACE_NAS = pow(len(PRIMITIVES_NAS), 5)
OPS = {
'plus': lambda p, q: p + q,
'multiply': lambda p, q: p * q,
'max': lambda p, q: torch.max(torch.stack((p, q)), dim=0)[0],
'min': lambda p, q: torch.min(torch.stack((p, q)), dim=0)[0],
'concat': lambda p, q: torch.cat([p, q], dim=-1),
'norm_0': lambda p: torch.ones_like(p),
'norm_0.5': lambda p: torch.sqrt(torch.abs(p) + 1e-7),
'norm_1': lambda p: torch.abs(p),
'norm_2': lambda p: p ** 2,
'I': lambda p: torch.ones_like(p),
'-I': lambda p: -torch.ones_like(p),
'sign': lambda p: torch.sign(p),
}
def constrain(p):
c = torch.norm(p, p=2, dim=1, keepdim=True)
c[c < 1] = 1.0
p.data.div_(c)
def MixedBinary(embedding_p, embedding_q, weights, FC):
# return torch.sum(torch.stack([w * fc(OPS[primitive](embedding_p, embedding_q)) \
# for w, primitive, fc in zip(weights, PRIMITIVES_BINARY, FC)]), 0)
pos = weights.argmax().item()
return weights[pos] * FC[pos](OPS[PRIMITIVES_BINARY[pos]](embedding_p, embedding_q))
class Virtue(nn.Module):
def __init__(self, embedding_dim, reg, embedding_num=12, ofm=False):
super(Virtue, self).__init__()
self.embedding_dim = embedding_dim
self.reg = reg
self.embedding_mean = nn.ModuleDict({})
self.embedding_std = nn.ModuleDict({})
#self.embedding_first_order = nn.ModuleDict({})
self.columns = ["workclass", "education", "marital-status", "occupation", "relationship", "race", "sex", "native-country"]
# for name in self.columns:
# self.embedding_first_order[name] = nn.Embedding(embedding_num, 2)
if not ofm:
for name in self.columns:
self.embedding_mean[name] = nn.Embedding(embedding_num, embedding_dim)
self.embedding_std[name] = nn.Embedding(embedding_num, embedding_dim)
else:
for name in self.columns:
temp_mean = nn.ModuleList()
temp_std = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
temp_mean.append(nn.Embedding(embedding_num, embedding_dim))
temp_std.append(nn.Embedding(embedding_num, embedding_dim))
self.embedding_mean[name] = temp_mean
self.embedding_std[name] = temp_std
class Virtue2(nn.Module):
def __init__(self, embedding_dim, reg, embedding_num=12, ofm=False):
super(Virtue2, self).__init__()
self.embedding_dim = embedding_dim
self.reg = reg
self.embedding_all = nn.ModuleDict({})
self.columns = ["workclass", "education", "marital-status", "occupation", "relationship", "race", "sex", "native-country"]
if not ofm:
for name in self.columns:
self.embedding_all[name] = nn.Embedding(embedding_num, embedding_dim)
else:
for name in self.columns:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
temp.append(nn.Embedding(embedding_num, embedding_dim))
self.embedding_all[name] = temp
class DSNAS_v(Virtue):
def __init__(self, embedding_dim, reg, args):
super(DSNAS_v, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self.FC = nn.ModuleDict({})
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
if primitive == 'concat':
temp.append(nn.Linear(2*embedding_dim, 2, bias=False))
else:
temp.append(nn.Linear(embedding_dim, 2, bias=False))
self.FC[name1 + ":" + name2] = temp
self.args = args
self._initialize_alphas()
#initialize contextual infos
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
if self.args.multi_operation:
num_op = len(self.columns)
self.log_alpha = torch.nn.Parameter(torch.zeros((int(num_op*(num_op-1)/2), len(PRIMITIVES_BINARY))).normal_(self.args.loc_mean, self.args.loc_std).requires_grad_())
else:
self.log_alpha = torch.nn.Parameter(torch.zeros((1, len(PRIMITIVES_BINARY))).normal_(self.args.loc_mean, self.args.loc_std).requires_grad_())
self._arch_parameters = [self.log_alpha]
self.weights = Variable(torch.zeros_like(self.log_alpha))
if self.args.early_fix_arch:
self.fix_arch_index = {}
self.rand_array = torch.randn(3000000)
def reparameterize(self, mu, std):
std = torch.log(1 + torch.exp(std))
#v = self.rand_array[:std.numel()].reshape(std.shape)
v = torch.randn(3000000)[:std.numel()].reshape(std.shape)
return (mu + std * v * 0.01)
# def KL_distance(self, mean1, mean2, std1, std2):
# a = torch.log(std2 / std1) + (std1 * std1 + (mean1 - mean2) * (mean1 - mean2)) / 2 / std2 / std2 - 1.0 / 2.0
# return torch.sum(a)
def KL_distance(self, mean1, mean2, std1, std2):
a = 1/2 * (torch.log(torch.det(std2)/torch.det(std1)) - std1.numel() + (mean1 - mean2)*(mean1 - mean2)/torch.square(std2) +
torch.sum(torch.square(std1)/torch.square(std2)))
return a
def recommend(self, features):
self.eval()
self.weights = torch.zeros_like(self.log_alpha).scatter_(1, torch.argmax(self.log_alpha, dim=-1).view(-1, 1), 1)
if self.args.early_fix_arch:
if len(self.fix_arch_index.keys()) > 0:
for key, value_lst in self.fix_arch_index.items():
self.weights[key, :].zero_()
self.weights[key, value_lst[0]] = 1
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding_mean = self.embedding_mean[name1][max_index](features[name1])
name1_embedding_std = self.embedding_std[name1][max_index](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2][max_index](features[name2])
name2_embedding_std = self.embedding_std[name2][max_index](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
else:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
if self.args.trans:
name1_embedding_trans = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = self.mlp_q(name2_embedding.view(-1, 1)).view(name2_embedding.size())
else:
name1_embedding_trans = name1_embedding
name2_embedding_trans = name2_embedding
inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1,), self.FC[name1 + ":" + name2])
if self.args.first_order:
for name in self.columns:
inferences += self.embedding_first_order[name](features[name])
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def step(self, optimizer, arch_optimizer, epoch):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
#if epoch < self.args.search_epoch:
# train_epoch = (epoch+1)*5
#else:
train_epoch = 5
for k in range(train_epoch):
for step, features in enumerate(train_bandit):
optimizer.zero_grad()
if epoch < self.args.search_epoch:
arch_optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features, epoch, search=True)
else:
output, error_loss, loss_alpha = self.forward(features, epoch, search=False)
losses.append(error_loss.cpu().detach().item())
optimizer.step()
if epoch < self.args.search_epoch:
arch_optimizer.step()
return np.mean(losses)
def revised_arch_index(self, epoch):
if self.args.early_fix_arch:
if epoch < self.args.search_epoch:
sort_log_alpha = torch.topk(F.softmax(self.log_alpha.data, dim=-1), 2)
argmax_index = (sort_log_alpha[0][:, 0] - sort_log_alpha[0][:, 1] >= 0.10)
for id in range(argmax_index.size(0)):
if argmax_index[id] == 1 and id not in self.fix_arch_index.keys():
self.fix_arch_index[id] = [sort_log_alpha[1][id, 0].item(),
self.log_alpha.detach().clone()[id, :]]
if epoch >= self.args.search_epoch:
#fix the arch。
max_index = torch.argmax(self.log_alpha, dim=-1)
for id in range(max_index.size(0)):
if id not in self.fix_arch_index.keys():
self.fix_arch_index[id] = [max_index[id].item(), self.log_alpha.detach().clone()[id, :]]
for key, value_lst in self.fix_arch_index.items():
self.log_alpha.data[key, :] = value_lst[1]
def forward(self, features, epoch, search):
#self.weights = self._get_weights(self.log_alpha)
self.weights = torch.zeros_like(self.log_alpha).scatter_(1, torch.argmax(self.log_alpha, dim=-1).view(-1, 1), 1)
# self.revised_arch_index(epoch)
# if self.args.early_fix_arch:
# if len(self.fix_arch_index.keys()) > 0:
# for key, value_lst in self.fix_arch_index.items():
# self.weights[key, :].zero_()
# self.weights[key, value_lst[0]] = 1
if search:
cate_prob = F.softmax(self.log_alpha, dim=-1)
self.cate_prob = cate_prob.clone().detach()
loss_alpha = torch.log(
(self.weights * F.softmax(self.log_alpha, dim=-1)).sum(-1)).sum()
self.weights.requires_grad_()
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
from sklearn.externals.joblib import Parallel, delayed
names_all = []
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding_mean = self.embedding_mean[name1][max_index](features[name1])
name1_embedding_std = self.embedding_std[name1][max_index](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2][max_index](features[name2])
name2_embedding_std = self.embedding_std[name2][max_index](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
else:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
if self.args.trans:
name1_embedding_trans = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = self.mlp_q(name2_embedding.view(-1, 1)).view(name2_embedding.size())
else:
name1_embedding_trans = name1_embedding
name2_embedding_trans = name2_embedding
names_all.append(
[name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1, ), self.FC[name1 + ":" + name2]])
res = Parallel(n_jobs=8, backend="threading")(
delayed(MixedBinary)(para1, para2, para3, para4) for para1, para2, para3, para4 in names_all)
inferences = sum(res)
if self.args.first_order:
for name in self.columns:
inferences += self.embedding_first_order[name](features[name])
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
kl = 0
for name in self.columns:
if not self.args.ofm:
kl += self.KL_distance(self.embedding_mean[name].weight,
0 * torch.ones_like(self.embedding_mean[name].weight),
torch.log(1 + torch.exp(self.embedding_std[name].weight)),
0.1 * torch.ones_like(self.embedding_std[name].weight))
else:
for index in range(len(PRIMITIVES_BINARY)):
kl += self.KL_distance(self.embedding_mean[name][index].weight,
0 * torch.ones_like(self.embedding_mean[name][index].weight),
torch.log(1 + torch.exp(self.embedding_std[name][index].weight)),
0.1 * torch.ones_like(self.embedding_std[name][index].weight))
if search:
self.weights.grad = torch.zeros_like(self.weights)
(weighted_loss + loss_alpha + kl/features["label"].shape[0]).backward()
self.block_reward = self.weights.grad.data.sum(-1)
self.log_alpha.grad.data.mul_(self.block_reward.view(-1, 1))
return inferences, weighted_loss, loss_alpha
else:
(weighted_loss + kl/features["label"].shape[0]).backward()
return inferences, weighted_loss, 0
def _get_weights(self, log_alpha):
if self.args.random_sample:
uni = torch.ones_like(log_alpha)
m = torch.distributions.one_hot_categorical.OneHotCategorical(uni)
else:
m = torch.distributions.one_hot_categorical.OneHotCategorical(probs=F.softmax(log_alpha, dim=-1))
return m.sample()
def arch_parameters(self):
return self._arch_parameters
def genotype(self):
if not self.args.multi_operation:
genotype = PRIMITIVES_BINARY[self.log_alpha.argmax().cpu().numpy()]
genotype_p = F.softmax(self.log_alpha, dim=-1)
else:
genotype = []
for index in self.log_alpha.argmax(axis=1).cpu().numpy():
genotype.append(PRIMITIVES_BINARY[index])
genotype = ":".join(genotype[:10])
genotype_p = F.softmax(self.log_alpha, dim=-1)[:10]
return genotype, genotype_p.cpu().detach()
class NASP(Virtue2):
def __init__(self, embedding_dim, reg, args):
super(NASP, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self.FC = nn.ModuleDict({})
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
if primitive == 'concat':
temp.append(nn.Linear(2*embedding_dim, 2, bias=False))
else:
temp.append(nn.Linear(embedding_dim, 2, bias=False))
self.FC[name1 + ":" + name2] = temp
self.args = args
self._initialize_alphas()
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
if self.args.multi_operation:
num_op = len(self.columns)
self.log_alpha = torch.nn.Parameter(torch.zeros((int(num_op*(num_op-1)/2), len(PRIMITIVES_BINARY))).normal_(self.args.loc_mean, self.args.loc_std).requires_grad_())
else:
self.log_alpha = torch.nn.Parameter(torch.zeros((1, len(PRIMITIVES_BINARY))).normal_(self.args.loc_mean, self.args.loc_std).requires_grad_())
self._arch_parameters = [self.log_alpha]
self.weights = Variable(torch.zeros_like(self.log_alpha))
if self.args.early_fix_arch:
self.fix_arch_index = {}
self.rand_array = torch.randn(3000000)
def recommend(self, features):
self.eval()
self.weights = torch.zeros_like(self.log_alpha).scatter_(1, torch.argmax(self.log_alpha, dim=-1).view(-1, 1), 1)
if self.args.early_fix_arch:
if len(self.fix_arch_index.keys()) > 0:
for key, value_lst in self.fix_arch_index.items():
self.weights[key, :].zero_()
self.weights[key, value_lst[0]] = 1
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding = self.embedding_all[name1][max_index](features[name1])
name2_embedding = self.embedding_all[name2][max_index](features[name2])
else:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
if self.args.trans:
name1_embedding_trans = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = self.mlp_q(name2_embedding.view(-1, 1)).view(name2_embedding.size())
else:
name1_embedding_trans = name1_embedding
name2_embedding_trans = name2_embedding
inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1,), self.FC[name1 + ":" + name2])
if self.args.first_order:
for name in self.columns:
inferences += self.embedding_first_order[name](features[name])
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def MixedBinary_ofm(self, embedding_p_all, embedding_q_all, weights, FC):
return torch.sum(torch.stack([w * fc(OPS[primitive](embedding_p, embedding_q)) \
for w, primitive, fc, embedding_p, embedding_q in zip(weights, PRIMITIVES_BINARY, FC, embedding_p_all, embedding_q_all)]), 0)
def MixedBinary_all(self, embedding_p, embedding_q, weights, FC):
return torch.sum(torch.stack([w * fc(OPS[primitive](embedding_p, embedding_q)) \
for w, primitive, fc in zip(weights, PRIMITIVES_BINARY, FC)]), 0)
def step(self, optimizer, arch_optimizer, epoch):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
if epoch < self.args.search_epoch:
train_epoch = (epoch+1)*5
else:
train_epoch = 1
for k in range(train_epoch):
for step, features in enumerate(train_bandit):
optimizer.zero_grad()
arch_optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features, epoch, search=True)
losses.append(error_loss.cpu().detach().item())
optimizer.step()
arch_optimizer.step()
return np.mean(losses)
def forward(self, features, epoch, search):
self.weights = torch.zeros_like(self.log_alpha).scatter_(1, torch.argmax(self.log_alpha, dim=-1).view(-1, 1), 1)
self.weights.requires_grad_()
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
from sklearn.externals.joblib import Parallel, delayed
names_all = []
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
embedding_name1_all = []
embedding_name2_all = []
for index_name in range(len(PRIMITIVES_BINARY)):
name1_embedding = self.embedding_all[name1][index_name](features[name1])
embedding_name1_all.append(name1_embedding)
name2_embedding = self.embedding_all[name2][index_name](features[name2])
embedding_name2_all.append(name2_embedding)
else:
name1_embedding = self.embedding_all[name1](features[name1])
name2_embedding = self.embedding_all[name2](features[name2])
if self.args.trans:
if self.args.ofm:
embedding_name1_all_temp = []
embedding_name2_all_temp = []
for index_temp in range(len(embedding_name1_all)):
embedding_name1_all_temp.append(self.mlp_p(embedding_name1_all[index_temp].view(-1, 1)).view(embedding_name1_all[index_temp].size()))
embedding_name2_all_temp.append(self.mlp_p(embedding_name2_all[index_temp].view(-1, 1)).view(embedding_name2_all[index_temp].size()))
embedding_name1_all = embedding_name1_all_temp
embedding_name2_all = embedding_name2_all_temp
else:
name1_embedding = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding = self.mlp_q(name2_embedding.view(-1, 1)).view(name2_embedding.size())
if self.args.ofm:
names_all.append([embedding_name1_all, embedding_name2_all, cur_weights.view(-1, ), self.FC[name1 + ":" + name2]])
else:
names_all.append(
[name1_embedding, name2_embedding, cur_weights.view(-1, ), self.FC[name1 + ":" + name2]])
if self.args.ofm:
res = Parallel(n_jobs=8, backend="threading")(
delayed(self.MixedBinary_ofm)(para1, para2, para3, para4) for para1, para2, para3, para4 in names_all)
else:
res = Parallel(n_jobs=8, backend="threading")(
delayed(self.MixedBinary_all)(para1, para2, para3, para4) for para1, para2, para3, para4 in names_all)
inferences = sum(res)
if self.args.first_order:
for name in self.columns:
inferences += self.embedding_first_order[name](features[name])
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
weighted_loss.backward()
self.log_alpha.grad = self.weights.grad
return inferences, weighted_loss, 0
def arch_parameters(self):
return self._arch_parameters
def genotype(self):
if not self.args.multi_operation:
genotype = PRIMITIVES_BINARY[self.log_alpha.argmax().cpu().numpy()]
genotype_p = F.softmax(self.log_alpha, dim=-1)
else:
genotype = []
for index in self.log_alpha.argmax(axis=1).cpu().numpy():
genotype.append(PRIMITIVES_BINARY[index])
genotype = ":".join(genotype[:10])
genotype_p = F.softmax(self.log_alpha, dim=-1)[:10]
return genotype, genotype_p.cpu().detach()
class NASP_v(Virtue):
def __init__(self, embedding_dim, reg, args):
super(NASP_v, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self.FC = nn.ModuleDict({})
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
if primitive == 'concat':
temp.append(nn.Linear(2*embedding_dim, 2, bias=False))
else:
temp.append(nn.Linear(embedding_dim, 2, bias=False))
self.FC[name1 + ":" + name2] = temp
self.args = args
self._initialize_alphas()
#initialize contextual infos
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
if self.args.multi_operation:
num_op = len(self.columns)
self.log_alpha = torch.nn.Parameter(torch.zeros((int(num_op*(num_op-1)/2), len(PRIMITIVES_BINARY))).normal_(self.args.loc_mean, self.args.loc_std).requires_grad_())
else:
self.log_alpha = torch.nn.Parameter(torch.zeros((1, len(PRIMITIVES_BINARY))).normal_(self.args.loc_mean, self.args.loc_std).requires_grad_())
self._arch_parameters = [self.log_alpha]
self.weights = Variable(torch.zeros_like(self.log_alpha))
if self.args.early_fix_arch:
self.fix_arch_index = {}
self.rand_array = torch.randn(3000000)
def reparameterize(self, mu, std):
std = torch.log(1 + torch.exp(std))
v = self.rand_array[:std.numel()].reshape(std.shape)
return (mu + std * v * 0.01)
def KL_distance(self, mean1, mean2, std1, std2):
a = torch.log(std2 / std1) + (std1 * std1 + (mean1 - mean2) * (mean1 - mean2)) / 2 / std2 / std2 - 1.0 / 2.0
return torch.sum(a)
def recommend(self, features):
self.eval()
self.weights = torch.zeros_like(self.log_alpha).scatter_(1, torch.argmax(self.log_alpha, dim=-1).view(-1, 1), 1)
if self.args.early_fix_arch:
if len(self.fix_arch_index.keys()) > 0:
for key, value_lst in self.fix_arch_index.items():
self.weights[key, :].zero_()
self.weights[key, value_lst[0]] = 1
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding_mean = self.embedding_mean[name1][max_index](features[name1])
name1_embedding_std = self.embedding_std[name1][max_index](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2][max_index](features[name2])
name2_embedding_std = self.embedding_std[name2][max_index](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
else:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
if self.args.trans:
name1_embedding_trans = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = self.mlp_q(name2_embedding.view(-1, 1)).view(name2_embedding.size())
else:
name1_embedding_trans = name1_embedding
name2_embedding_trans = name2_embedding
inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1,), self.FC[name1 + ":" + name2])
if self.args.first_order:
for name in self.columns:
inferences += self.embedding_first_order[name](features[name])
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def MixedBinary_ofm(self, embedding_p_all, embedding_q_all, weights, FC):
return torch.sum(torch.stack([w * fc(OPS[primitive](embedding_p, embedding_q)) \
for w, primitive, fc, embedding_p, embedding_q in zip(weights, PRIMITIVES_BINARY, FC, embedding_p_all, embedding_q_all)]), 0)
def MixedBinary_all(self, embedding_p, embedding_q, weights, FC):
return torch.sum(torch.stack([w * fc(OPS[primitive](embedding_p, embedding_q)) \
for w, primitive, fc in zip(weights, PRIMITIVES_BINARY, FC)]), 0)
def step(self, optimizer, arch_optimizer, epoch):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
if epoch < self.args.search_epoch:
train_epoch = (epoch+1)*5
else:
train_epoch = 1
for k in range(train_epoch):
for step, features in enumerate(train_bandit):
optimizer.zero_grad()
arch_optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features, epoch, search=True)
# if epoch < self.args.search_epoch:
# arch_optimizer.zero_grad()
# output, error_loss, loss_alpha = self.forward(features, epoch, search=True)
# else:
# output, error_loss, loss_alpha = self.forward(features, epoch, search=False)
losses.append(error_loss.cpu().detach().item())
optimizer.step()
arch_optimizer.step()
# if epoch < self.args.search_epoch:
# arch_optimizer.step()
return np.mean(losses)
def revised_arch_index(self, epoch):
if self.args.early_fix_arch:
if epoch < self.args.search_epoch:
sort_log_alpha = torch.topk(F.softmax(self.log_alpha.data, dim=-1), 2)
argmax_index = (sort_log_alpha[0][:, 0] - sort_log_alpha[0][:, 1] >= 0.10)
for id in range(argmax_index.size(0)):
if argmax_index[id] == 1 and id not in self.fix_arch_index.keys():
self.fix_arch_index[id] = [sort_log_alpha[1][id, 0].item(),
self.log_alpha.detach().clone()[id, :]]
if epoch >= self.args.search_epoch:
#fix the arch。
max_index = torch.argmax(self.log_alpha, dim=-1)
for id in range(max_index.size(0)):
if id not in self.fix_arch_index.keys():
self.fix_arch_index[id] = [max_index[id].item(), self.log_alpha.detach().clone()[id, :]]
for key, value_lst in self.fix_arch_index.items():
self.log_alpha.data[key, :] = value_lst[1]
def forward(self, features, epoch, search):
self.weights = torch.zeros_like(self.log_alpha).scatter_(1, torch.argmax(self.log_alpha, dim=-1).view(-1, 1), 1)
self.weights.requires_grad_()
regs = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
from sklearn.externals.joblib import Parallel, delayed
names_all = []
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
embedding_name1_all = []
embedding_name2_all = []
for index_name in range(len(PRIMITIVES_BINARY)):
name1_embedding_mean = self.embedding_mean[name1][index_name](features[name1])
name1_embedding_std = self.embedding_std[name1][index_name](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
embedding_name1_all.append(name1_embedding)
name2_embedding_mean = self.embedding_mean[name2][index_name](features[name2])
name2_embedding_std = self.embedding_std[name2][index_name](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
embedding_name2_all.append(name2_embedding)
else:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
regs += 1e-5 * (torch.norm(name1_embedding) + torch.norm(name2_embedding))
if self.args.trans:
if self.args.ofm:
embedding_name1_all_temp = []
embedding_name2_all_temp = []
for index_temp in range(len(embedding_name1_all)):
embedding_name1_all_temp.append(self.mlp_p(embedding_name1_all[index_temp].view(-1, 1)).view(embedding_name1_all[index_temp].size()))
embedding_name2_all_temp.append(self.mlp_p(embedding_name2_all[index_temp].view(-1, 1)).view(embedding_name2_all[index_temp].size()))
embedding_name1_all = embedding_name1_all_temp
embedding_name2_all = embedding_name2_all_temp
else:
name1_embedding = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding = self.mlp_q(name2_embedding.view(-1, 1)).view(name2_embedding.size())
if self.args.ofm:
names_all.append([embedding_name1_all, embedding_name2_all, cur_weights.view(-1, ), self.FC[name1 + ":" + name2]])
else:
names_all.append(
[name1_embedding, name2_embedding, cur_weights.view(-1, ), self.FC[name1 + ":" + name2]])
if self.args.ofm:
res = Parallel(n_jobs=8, backend="threading")(
delayed(self.MixedBinary_ofm)(para1, para2, para3, para4) for para1, para2, para3, para4 in names_all)
else:
res = Parallel(n_jobs=8, backend="threading")(
delayed(self.MixedBinary_all)(para1, para2, para3, para4) for para1, para2, para3, para4 in names_all)
inferences = sum(res)
if self.args.first_order:
for name in self.columns:
inferences += self.embedding_first_order[name](features[name])
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
kl = 0
for name in self.columns:
if not self.args.ofm:
kl += self.KL_distance(self.embedding_mean[name].weight,
0 * torch.ones_like(self.embedding_mean[name].weight),
torch.log(1 + torch.exp(self.embedding_std[name].weight)),
0.1 * torch.ones_like(self.embedding_std[name].weight))
else:
for index in range(len(PRIMITIVES_BINARY)):
kl += self.KL_distance(self.embedding_mean[name][index].weight,
0 * torch.ones_like(self.embedding_mean[name][index].weight),
torch.log(1 + torch.exp(self.embedding_std[name][index].weight)),
0.1 * torch.ones_like(self.embedding_std[name][index].weight))
(weighted_loss + regs + kl/features["label"].shape[0]).backward()
self.log_alpha.grad = self.weights.grad
return inferences, weighted_loss, 0
def arch_parameters(self):
return self._arch_parameters
def genotype(self):
if not self.args.multi_operation:
genotype = PRIMITIVES_BINARY[self.log_alpha.argmax().cpu().numpy()]
genotype_p = F.softmax(self.log_alpha, dim=-1)
else:
genotype = []
for index in self.log_alpha.argmax(axis=1).cpu().numpy():
genotype.append(PRIMITIVES_BINARY[index])
genotype = ":".join(genotype[:10])
genotype_p = F.softmax(self.log_alpha, dim=-1)[:10]
return genotype, genotype_p.cpu().detach()
class MULTIPLY_v(Virtue):
def __init__(self, embedding_dim, reg, args):
super(MULTIPLY_v, self).__init__(embedding_dim, reg, ofm=args.ofm, embedding_num=args.embedding_num)
self.FC = nn.ModuleDict({})
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
temp = nn.ModuleList()
for primitive in PRIMITIVES_BINARY:
if primitive == 'concat':
temp.append(nn.Linear(2 * embedding_dim, 2, bias=False))
else:
temp.append(nn.Linear(embedding_dim, 2, bias=False))
self.FC[name1 + ":" + name2] = temp
self.args = args
self._initialize_alphas()
#initialize contextual infos
self.contexts = {}
self.pos_weights = torch.Tensor()
self.rewards = []
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.log_alpha = torch.Tensor([0.0, 1.0, 0.0, 0.0, 0.0])
self.weights = Variable(torch.Tensor([0.0, 1.0, 0.0, 0.0, 0.0]))
self.rand_array = torch.randn(3000000)
def reparameterize(self, mu, std):
std = torch.log(1 + torch.exp(std))
v = self.rand_array[:std.numel()].reshape(std.shape)
return (mu + std * v * 0.01)
def KL_distance(self, mean1, mean2, std1, std2):
a = torch.log(std2 / std1) + (std1 * std1 + (mean1 - mean2) * (mean1 - mean2)) / 2 / std2 / std2 - 1.0 / 2.0
return torch.sum(a)
def recommend(self, features):
self.eval()
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding_mean = self.embedding_mean[name1][max_index](features[name1])
name1_embedding_std = self.embedding_std[name1][max_index](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2][max_index](features[name2])
name2_embedding_std = self.embedding_std[name2][max_index](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
else:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
if self.args.trans:
name1_embedding_trans = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = self.mlp_q(name2_embedding.view(-1, 1)).view(name2_embedding.size())
else:
name1_embedding_trans = name1_embedding
name2_embedding_trans = name2_embedding
inferences += MixedBinary(name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1,), self.FC[name1 + ":" + name2])
# if self.args.first_order:
# for name in self.columns:
# inferences += self.embedding_first_order[name](features[name])
pos_weights = torch.zeros_like(features["label"])
max_index = torch.argmax(inferences, dim=1)
a_ind = np.array([(i, val) for i, val in enumerate(max_index)])
pos_weights[a_ind[:, 0], a_ind[:, 1]] = 1.0
reward = torch.sum(torch.mul(features["label"], pos_weights)).cpu().detach().item()
self.add_batch(features, pos_weights)
return reward
def add_batch(self, features, pos_weights):
for index in features:
if index in self.contexts:
temp = self.contexts[index]
self.contexts[index] = torch.cat([temp, features[index]], dim=0)
else:
self.contexts[index] = features[index]
self.pos_weights = torch.cat([self.pos_weights, pos_weights], dim=0)
def step(self, optimizer, arch_optimizer, epoch):
self.train()
losses = []
train_bandit = utils.get_data_queue_bandit(self.args, self.contexts, self.pos_weights)
if epoch < 10:
train_epoch = (epoch+1)*5
else:
train_epoch = 1
for i in range(train_epoch):
for step, features in enumerate(train_bandit):
optimizer.zero_grad()
output, error_loss, loss_alpha = self.forward(features)
losses.append(error_loss.cpu().detach().item())
optimizer.step()
return np.mean(losses)
def forward(self, features):
regs = 0
inferences = 0
max_index = self.weights.argmax().item()
cur_weights = self.weights
cur_index = 0
from sklearn.externals.joblib import Parallel, delayed
names_all = []
for index1, name1 in enumerate(self.columns):
for index2, name2 in enumerate(self.columns):
if index1 < index2:
if self.args.multi_operation:
cur_weights = self.weights[cur_index]
max_index = cur_weights.argmax().item()
cur_index += 1
if self.args.ofm:
name1_embedding_mean = self.embedding_mean[name1][max_index](features[name1])
name1_embedding_std = self.embedding_std[name1][max_index](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2][max_index](features[name2])
name2_embedding_std = self.embedding_std[name2][max_index](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
else:
name1_embedding_mean = self.embedding_mean[name1](features[name1])
name1_embedding_std = self.embedding_std[name1](features[name1])
name1_embedding = self.reparameterize(name1_embedding_mean, name1_embedding_std)
name2_embedding_mean = self.embedding_mean[name2](features[name2])
name2_embedding_std = self.embedding_std[name2](features[name2])
name2_embedding = self.reparameterize(name2_embedding_mean, name2_embedding_std)
if self.args.trans:
name1_embedding_trans = self.mlp_p(name1_embedding.view(-1, 1)).view(name1_embedding.size())
name2_embedding_trans = self.mlp_q(name2_embedding.view(-1, 1)).view(name2_embedding.size())
else:
name1_embedding_trans = name1_embedding
name2_embedding_trans = name2_embedding
names_all.append(
[name1_embedding_trans, name2_embedding_trans, cur_weights.view(-1, ), self.FC[name1 + ":" + name2]])
res = Parallel(n_jobs=8, backend="threading")(
delayed(MixedBinary)(para1, para2, para3, para4) for para1, para2, para3, para4 in names_all)
inferences = sum(res)
# if self.args.first_order:
# for name in self.columns:
# inferences += self.embedding_first_order[name](features[name])
loss = (inferences - features["label"])**2
weighted_loss = torch.mean(torch.sum(torch.mul(features["pos_weights"], loss), dim=1))
kl = 0
for name in self.columns:
if not self.args.ofm:
kl += self.KL_distance(self.embedding_mean[name].weight,
0 * torch.ones_like(self.embedding_mean[name].weight),
torch.log(1 + torch.exp(self.embedding_std[name].weight)),
0.1 * torch.ones_like(self.embedding_std[name].weight))
else:
kl += self.KL_distance(self.embedding_mean[name].weight,
0 * torch.ones_like(self.embedding_mean[name].weight),
torch.log(1 + torch.exp(self.embedding_std[name].weight)),
0.1 * torch.ones_like(self.embedding_std[name].weight))
(weighted_loss + kl/features["label"].shape[0]).backward()
return inferences, weighted_loss, 0
def genotype(self):
return "FM_v", 0
class MAX_v(MULTIPLY_v):
def __init__(self, embedding_dim, reg, args):
super(MAX_v, self).__init__(embedding_dim, reg, args)
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.log_alpha = torch.Tensor([1, 1, 1, 1, 1.0])
self.weights = Variable(torch.Tensor([0.0, 0.0, 1.0, 0.0, 0.0]))
self.rand_array = torch.randn(3000000)
def genotype(self):
return "MAX", 0
class PLUS_v(MULTIPLY_v):
def __init__(self, embedding_dim, reg, args):
super(PLUS_v, self).__init__(embedding_dim, reg, args)
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.log_alpha = torch.Tensor([1, 0, 0, 0, 0.0])
self.weights = Variable(torch.Tensor([1.0, 0.0, 0.0, 0.0, 0.0]))
self.rand_array = torch.randn(3000000)
def genotype(self):
return "PLUS", 0
class MIN_v(MULTIPLY_v):
def __init__(self, embedding_dim, reg, args):
super(MIN_v, self).__init__(embedding_dim, reg, args)
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.log_alpha = torch.Tensor([0, 0, 0, 1, 0.0])
self.weights = Variable(torch.Tensor([0.0, 0.0, 0.0, 1.0, 0.0]))
self.rand_array = torch.randn(3000000)
def genotype(self):
return "MIN", 0
class CONCAT_v(MULTIPLY_v):
def __init__(self, embedding_dim, reg, args):
super(CONCAT_v, self).__init__(embedding_dim, reg, args)
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1))
self.log_alpha = torch.Tensor([0, 0, 0, 0, 1.0])
self.weights = Variable(torch.Tensor([0.0, 0.0, 0.0, 0.0, 1.0]))
self.rand_array = torch.randn(3000000)
def genotype(self):
return "CONCAT", 0
| 55,609 | 50.730233 | 176 | py |
malfoy | malfoy-master/v2x/config/config.py | # -*- coding: utf-8 -*-
import os
import sys
import torch
torch.set_num_threads(1)
ROOT_DIR = os.path.normpath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), '../..'))
LOGS_DIR = os.path.join(ROOT_DIR, 'logs')
RESOURCES_DIR = os.path.join(ROOT_DIR, 'resources')
MODEL_DIR = os.path.join(ROOT_DIR, 'models')
GRAPH_DIR = os.path.join(ROOT_DIR, 'graphs')
PSTATS_FILE = os.path.join(LOGS_DIR, 'profile.stats')
COUNTDOWN_FILE = os.path.join(MODEL_DIR, 'countdownHistory')
BATCHCREATE_FILE = os.path.join(MODEL_DIR, 'batchCreateHistory')
DEVICE = torch.device('cpu')
DTYPE = torch.FloatTensor
class FILES():
ALLOC_FILE = ('allocfile',os.path.join(LOGS_DIR,'allocation'))
PERF_FILE =('perfile',os.path.join(LOGS_DIR,'performance'))
PLM_FILE = ('plmfile',os.path.join(LOGS_DIR,'priceLearningModel'))
BID_FILE = ('bidfile',os.path.join(LOGS_DIR,'finishedBids'))
SL_FILE = ('supervisefile',os.path.join(LOGS_DIR,'supervisedLearningModel'))
INVMDL_FILE = ('invmodelfile',os.path.join(LOGS_DIR,'invModel'))
FWDMDL_FILE = ('forwardmodelfile',os.path.join(LOGS_DIR,'forwardModel'))
REWARD_FILE = ('rewardfile',os.path.join(LOGS_DIR,'reward'))
class RESOURCE_SITE_PARAMS():
serviceCapa = [(15,16),(15,16),
(15,16)] # Range of random service capacity.
# when services are created. items.
# for different types of services.
resourceCapa = [[(0,1),(0,1),(0,1)],
[(100,101),(100,101),(250,251)],
[(200,201),(200,201),(250,251)],
[(500,501),(500,501),(1000,1001)],
[(1000,1001),(1000,1001),(2000,2001)]]
# Range of random resource capacity
# when resources are created.
# Items are for different types of
# resources.
transCost = 0.01 # Transmission cost between sites.
burnIn = 4000 # Frozen period for capacity
# adjustment.
lowerCapaLimit = 0.4 # For capacity adjustment.
upperCapaLimit = 0.8 # For capacity adjustment.
resProfileSelectionThreshold = 2 # Before collecting this much
# of data, take user's resource
# profile. After that trust
# the service's estimated profile.
randomizeQueueTimeThres = 10 # Min sample size for getting
# queue length distribution
class SERVICE_PARAMS():
avgCapacityPeriod = 50 # For calculation of average
# capacity.
avgCostPeriod = 10 # For calculation of average cost.
discount = 0.8 # Price discount of slower servers.
utilPredictionWeights = [1/50] * 50
capacityBuffer = 0.1 # Buffer in capacity adjustment.
resProfileUpdateThreshold = 20 # At least to collect so many data
# points before curve fitting
# for resNeedsEstim.
null = 1E-7 # Anything below is null.
class RSU_PARAMS():
secondPriceThres = 3 # Least number of failed bids
# record for regression
overload = 0.1 # % overload allowed on resource
# sites at time of allocation.
class VEHICLE_PARAMS():
totalBudget = (2000,1500) # Total budget at time of creation.
minNrBids = 5 # Minimum number of simultaneous
# bids a vehicle can activate.
# Whenever active nr. bids drops
# below this, new bids are
# created.
maxNrBids = 100 # Maximum nr of simultaneous bids
# a vehicle can activate.
totalNrBids = sys.maxsize # Maxixmum total nr. bids a
# vehicle can create.
maxSensingDist = 1000 # Distance for sensing the RSU.
# Can be useful when there are
# multiple RSUs.
budgetRenewal = 1 # Number of bids to share the
# available budget.
maxLifespan = 50000 # Vehicle lifespan range
envCategory = 5 # Discretize nr. bids as env. input
priceCategory = 5 # Discretize price
competitorDataThres = 0 # Nr bids to collect before
# creating transitionTbl
plmTrainingThres = 5 # Interval (in terms of new
# priceLearningModel inputs)
lamMax = 0.6 # Max. lambda of the exponential
# distribution for
# generating new bids
# e.g. 0.2 means 1 batch every 5
# time steps.
lamChangeInterval = 30 # Interval of changing lambda.
lamMin = 0.02 # Min. lambda.
ph = 1 # transition prob. high to low
pl = 1 # transition prob. low to high
lamCategory = 5 # Nr. of lambda categories.
# the higher the number, the more
# extreme the lambdas.
stagingMaxsize = 50 # Maxsize for staged batches.
stagingMaxtime = 250 # Longest time to be staged.
stagingThreshold = [0.6] # An indicator of higher than
# threshold will be submitted.
stagingPeriod = 10 # Max time to wait until next
# evaluation.
curiosityTrainingThres = 3 #
curiosityExtRewardThres = 1 # external reward interval
class BID_PARAMS():
QoS1 = 100 # High performance E2E processing time.
QoS2 = 500 # Low performance E2E processing time.
servAmount1 = 1 # Low service amount
servAmount2 = 3 # High service amount
minDataSize = 0.4 # Min data size in mbit.
maxDataSize = 4 # Max data size in mbit.
maxlength = 1 # Maximum chain length.
nrRebid = 1 # Max number of rebids.
requestCountdown1 = 100 # request creation interval for service1
requestCountdown2 = 500 # for service2
class MDL_PARAMS():
width = 402 # Visual param
height = 308 # Visual param
rsuPos = (int(width/2),int(height/2)) # Visual param
rsuInterval = 20 # Visual param
resSitePos = (80,150) # Visual param
resSiteInterval = 20 # Visual param
vehicleYposChoice = (50,65) # Visual param
vehicleInterval = 1 # Visual param
lam = 0 # Lambda of the poisson distribution for
# generating new vehicles.
totalSimTime = 1000000 # Simulation time
timeForNewVehicles = 1000000 # Latest time to create new vehicles
nrSites = 2 # Default number of sites to create
initVehicles = 27 # Initial number of vehicles to create.
# When lam==0, no new vehicles will be
# created after initialization, to give
# the vehicles the chance to learn.
nrRsu = 1 # Default number of RSUs to create.
recent = 350 # Performance indicator for only
# the recent periods
class RESOURCE_PARAMS():
defaultMaxAmount = 200 # default maximum resource amount
unitCost = [(2,2),(20,5)] # cost per resource unit per time unit
class TRANSITION_PARAMS():
trainingThres = 3 # Interval (in terms of new nr. inputs)
# to train the competitorLearnModel.
historyPeriods = 1 # Number of historical states to use
# for forecast of next state.
class COMP_MODEL_PARAMS():
hidden_size1 = 64
hidden_size2 = 64
batch_size = 10
hitory_record = 10
epoch = 10
learning_rate = 0.3
pretrain_nr_record = 2 # No training until this nr. inputs.
# Afterwards train on the most recent
# history_record number of records
# every TRANSITION_PARAMS.trainingThres
class PRICE_MODEL_PARAMS():
evaluation_start = 1000000
batch_size = 30
history_record = 62 # total size of input
epoch = 1
epoch_supervise = 5
train_all_records = 62 # Before this nr. inputs, train on all
# records. after this, train on the
# most recent history_record number of
# records every
# VEHICLE_PARAMS.plmTrainingThres
critic_type = 'ConvCritic' # 'ConvCritic' or 'Critic' class
critic_num_filter = 32
critic_hidden_size1 = 128
critic_hidden_size2 = 128
critic_lr_min = 0.1
critic_lr_reduce_rate = 0.99
critic_learning_rate = 0.9
critic_dropout_rate = 0.0
reward_rate = 0.99 # Continuing tasks with function estimator
# should not use discount.
# Use average reward instead.
reward_min = 0.01
reward_reduce_rate = 1
critic_pretrain_nr_record = 62 # no training until this nr. inputs
actor_type = 'ConvActor' # 'ConvActor' or 'Actor' class
actor_num_filter = 64
actor_hidden_size1 = 128
actor_hidden_size2 = 128
actor_lr_min = 0.1
actor_lr_reduce_rate = 0.99
actor_learning_rate = 0.9
actor_dropout_rate = 0.0
actor_pretrain_nr_record = 32 # No training until this nr. inputs.
sharedlayer_output_dim = 128 # If no compression in sharedLayers,
# set the value to -1.
add_randomness = 0 # Valued between 0 and 1. if greater
# than zero, then in inference function,
# action is randomly chosen
# when generated random number is smaller
# than add_randomness * learning rate
exploration = 128 # Before the model accumulated this
# number of records, the
# learning rate does not reduce.
supervise_learning_rate = 0.1 # learning rate for supervised learning
supervise_hidden_size1 = 64 # MLP hidden layer
supervise_hidden_size2 = 128 # MLP hidden layer
fsp_rl_weight_constant = exploration * 2
reward_ext_weight = 0 # Balance between ext. and int. reward.
# Higher weight means more important ex. reward.
reward_int_weight = 0.9 # Balance between utility and curiosity prediction
# loss in intrinsic reward.
# Higher weight means more important utility.
class CURIOUS_MODEL_PARAMS():
feature_hidden_size1 = 64
feature_hidden_size2 = 128
feature_outputDim = 32
inv_hidden_size1 = 64
inv_hidden_size2 = 128
inv_learning_rate = 0.1
forward_hidden_size1 = 64
forward_hidden_size2 = 128
forward_learning_rate = 0.1
batch_size = 30
epoch = 5
invLoss_weight = 0.5 | 13,031 | 50.920319 | 80 | py |
malfoy | malfoy-master/v2x/solutions/price_model_curious.py | # -*- coding: utf-8 -*-
import os
import numpy as np
import torch
from torch import tensor,nn
from torch.autograd import Variable
torch.autograd.set_detect_anomaly(True)
from torch.nn import functional as F
from torch.distributions.multivariate_normal import MultivariateNormal
from ..config.config import (PRICE_MODEL_PARAMS as pmp,
VEHICLE_PARAMS as vp,
CURIOUS_MODEL_PARAMS as cmp,
DEVICE,DTYPE,MODEL_DIR)
from ..solutions.attention import Attention
class Model():
train_all_records = pmp.train_all_records
history_record = pmp.history_record
critic_pretrain_nr_record = pmp.critic_pretrain_nr_record
batch_size = pmp.batch_size
epoch = pmp.epoch
epoch_supervise = pmp.epoch_supervise
def __init__(self,evaluation=False,loadModel=False,curiosity=False,
maxReward=1):
self.unique_id = None
self.evaluation = evaluation
self.loadModel = loadModel
self.curiosity = curiosity
self.reward = dict()
self.reward_normalized = dict()
self.reward_curious = dict()
self.inputVec = dict()
self.nextStateVec = dict()
# price model output: first digit is whether to delay.
# rest of output is proportion of allocated budget to each bid
self.output = dict()
self.maxReward = maxReward
self.inputCounter = 0
self.firstInput = 0
def _prepInput(self,inputVec,pos=None,var=True):
if pos is None:
pos = list(range(len(inputVec)))
x = []
if isinstance(inputVec,dict):
for k in pos:
x.append(inputVec[str(k)])
x = np.array(x)
else:
x = np.array([x for i,x in enumerate(inputVec) if i in pos])
if var:
return Variable(tensor(x,device=DEVICE).type(DTYPE))
else:
return tensor(x,device=DEVICE).type(DTYPE)
def _removeRecord(self,number=1,model='priceLearningModel'):
pos = self.firstInput
for i in range(number):
try:
_ = self.inputVec.pop(str(pos))
except KeyError:
pass
try:
_ = self.output.pop(str(pos))
except KeyError:
pass
if model=='priceLearningModel':
try:
_ = self.reward.pop(str(pos))
except KeyError:
pass
try:
_ = self.nextStateVec.pop(str(pos))
except KeyError:
pass
if self.curiosity:
for j in range(pos+1):
try:
_ = self.reward_curious.pop(str(j))
except KeyError:
continue
try:
_ = self.reward_normalized.pop(str(j))
except KeyError:
continue
pos += 1
self.firstInput += number
def _prep_reward(self,rewardVec,randomize=True,normalize=True,
curiosity=False):
extraPos = 0
if curiosity:
# add one more position for reward from previous round
# state vector of curiosity model is the concatenation
# of current state and previous reward
extraPos = 1
try:
length = len([v for k,v in rewardVec.items()
if v is not None and not np.isnan(v)])
except ValueError:
length = len([v for k,v in rewardVec.items()
if v[0] is not None and not np.isnan(v[0])])
if length>=self.train_all_records:
length = min(length,self.history_record+extraPos)
try:
pos = [int(k) for k,v in rewardVec.items()
if v is not None and not np.isnan(v)]
except ValueError:
pos = [int(k) for k,v in rewardVec.items()
if v[0] is not None and not np.isnan(v[0])]
pos.sort()
r = []
for k in pos:
r.append(rewardVec[str(k)])
# reward from curiosity model
r = tensor(r[-length+extraPos:],device=DEVICE).type(DTYPE)
if randomize:
r = (r + (torch.abs(r+1E-7)/100)**0.5 * torch.randn(len(r))
).type(DTYPE)
if normalize:
try:
maxReward = torch.max(torch.abs(r))
except:
maxReward = self.maxReward
if maxReward > self.maxReward:
self.maxReward = maxReward
else:
maxReward = self.maxReward
r = r / maxReward
# position for previous reward from MEC, to be concatenated to
# the state vector of curiosity model
pos_tm1 = pos[-length:-extraPos]
# position for input and output vectors
pos = pos[-length+extraPos:]
return r,pos,pos_tm1
def prep_data(self,time,plmfile,reward=None,reward_curious=None,
inputVec=None,nextStateVec=None,output=None,
curious=False,model='priceLearningModel'):
if reward is None:
reward = self.reward.copy()
if reward_curious is None:
if model=='priceLearningModel':
reward_curious = self.reward_curious
else:
reward_curious = reward
if inputVec is None:
inputVec = self.inputVec
if nextStateVec is None:
nextStateVec = self.nextStateVec
if output is None:
output = self.output
try:
if not curious:
currentIdx = max([int(k) for k in reward.keys()])
else:
currentIdx = max([int(k) for k in reward_curious.keys()])
except: # if no reward is recorded yet
return
if (currentIdx<max(self.critic_pretrain_nr_record,self.batch_size)):
plmfile.write('{};{};{};too few data points.\n'.format(
time,self.unique_id,0))
plmfile.flush()
return
if not curious: # prepare data for RL without curiosity model
r,pos,_ = self._prep_reward(rewardVec=reward)
if model=='supervisedModel':
r = r.repeat_interleave(2)
pos = [y for z in [[x*2,x*2+1] for x in pos] for y in z]
r = r.view(-1,1)
x = self._prepInput(inputVec,pos,var=True)
y = self._prepInput(nextStateVec,pos,var=False)
a = self._prepInput(output,pos,var=False)
assert len(x)==len(y)==len(r)
if model=='supervisedModel':
return (currentIdx,x,y,r,a,pos)
r_batch = r[self.batch_size-1:]
a_batch = a[self.batch_size-1:]
pos_batch = pos[self.batch_size-1:]
x_batch = self._create_batch(x)
y_batch = self._create_batch(y)
return (currentIdx,x_batch,y_batch,r_batch,a_batch,pos_batch)
# prepare data for curiosity model
interval = 1
r_curious,pos,pos_tm1 = self._prep_reward(
rewardVec=reward_curious,curiosity=True)
if model=='curiosity':
r_copy = r_curious.detach().numpy()
for i,k in enumerate(pos):
# to be added to interval reward in update_curiousReward()
self.reward_normalized[str(k)] = r_copy[i]
if model=='supervisedModel':
# supervised model has two positions for each round, one is
# from behavior strategy, the other one from best response
r_curious = r_curious.repeat_interleave(2)
pos = [y for z in [[x*2,x*2+1] for x in pos] for y in z]
interval = 2
r_curious = r_curious[:-interval].view(-1,1)
r = self._prepInput(reward,pos_tm1,var=True)
if model=='supervisedModel':
r = r.repeat_interleave(2)
r_x = r.view(-1,1)
r_y = r[interval:].view(-1,1)
x = self._prepInput(inputVec,pos,var=True)
y = self._prepInput(nextStateVec,pos[:-interval],var=True)
x = torch.cat([x,r_x],dim=1)
y = torch.cat([y,r_y],dim=1)
a_inv = self._prepInput(output,pos,var=False)
a_fwd = self._prepInput(output,pos,var=True)
assert len(x)==len(y)+interval==len(r_curious)+interval==len(a_inv)
# create data batches of batchsize
x_batch = []
y_batch = []
r_curious_batch = r_curious[self.batch_size-1:]
a_inv_batch = a_inv[self.batch_size-1:]
a_fwd_batch = a_fwd[self.batch_size-1:]
pos_batch = pos[self.batch_size-1:]
for idx in np.arange(self.batch_size,len(x)+1):
x_batch.append(x[idx-self.batch_size:idx])
for idx in np.arange(self.batch_size,len(y)+1):
y_batch.append(y[idx-self.batch_size:idx])
if len(y_batch)==0: # not enough data for one batch
plmfile.write('{};{};{};too few data points.\n'.format(
time,self.unique_id,0))
plmfile.flush()
return
x_batch = torch.stack(x_batch,dim=0)
y_batch = torch.stack(y_batch,dim=0)
return (currentIdx,x_batch,y_batch,r_curious_batch,a_inv_batch,
a_fwd_batch,pos_batch)
def _create_batch(self,x):
x_batch = []
for idx in np.arange(self.batch_size,len(x)+1):
x_batch.append(x[idx-self.batch_size:idx])
try:
x_batch = torch.stack(x_batch,dim=0)
return x_batch
except:
return
def collectInput(self,inputVec,model='priceLearningModel',
buffer=None):
if buffer is None:
buffer = self.history_record * 2
self.inputVec[str(self.inputCounter)] = inputVec
self.inputCounter += 1
if len(self.inputVec)>max(self.history_record+buffer,
self.train_all_records+buffer):
self._removeRecord(model=model)
return str(self.inputCounter-1) # id for matching output and reward
def collectOutput(self,output,idx):
self.output[idx] = output
class PriceLearningModel(Model):
'''
vehicle price learning model, A2C. input is competitor state, new bid
info and environment variables. output is whether to hold bids for
the current round, and budget allocation to each bid. number of
bids sharing the same budget pool is currently fixed.
inputs, outputs and rewards are all normalized.
'''
actor_learning_rate = pmp.actor_learning_rate
actor_lr_min = pmp.actor_lr_min
actor_lr_reduce_rate = pmp.actor_lr_reduce_rate
critic_learning_rate = pmp.critic_learning_rate
critic_lr_min = pmp.critic_lr_min
critic_lr_reduce_rate = pmp.critic_lr_reduce_rate
actor_pretrain_nr_record = pmp.actor_pretrain_nr_record
reward_rate = pmp.reward_rate
reward_min = pmp.reward_min
reward_reduce_rate = pmp.reward_reduce_rate
add_randomness = pmp.add_randomness
exploration = pmp.exploration
actor_type = pmp.actor_type
critic_type = pmp.critic_type
def __init__(self,uniqueId,dimOutput=1,evaluation=False,loadModel=False,
curiosity=False,cumstep=0,endOfGame=5000,ca=True,
maxReward=1):
super().__init__(evaluation,loadModel,curiosity,maxReward)
self.unique_id = uniqueId + '_plm'
self.dimOutput = dimOutput
self.actor = None
self.critic = None
self.actor_optimizer = None
self.critic_optimizer = None
self.attention = None
self.ca = ca # ignore attention net(aka credit assignment) if False.
if self.ca:
self.attentionpath = os.path.join(MODEL_DIR,
self.unique_id+'_attention.pkl')
self.avg_reward = 0
self.avg_reward_ext = 0
# fictitious self play fsp
self.criticpath = os.path.join(MODEL_DIR,
self.unique_id+'_critic_train_fsp.pkl')
self.actorpath = os.path.join(MODEL_DIR,
self.unique_id+'_actor_train_fsp.pkl')
self.trainingdata = None # vectors of state, next state, reward, action
self.reward_ext = dict() # external reward for curiosity model
# control training of atttention net:
# (to train, to save attention.trainingdata)
self.trainWeightvector = (False,False)
self.exploration = max(
int(self.exploration / 2**(cumstep / endOfGame)),16)
def _initBudget(self):
''' random budget split if model is not available '''
return list(np.random.rand(self.dimOutput))
#return np.random.rand(self.dimOutput)
def _initActor(self,inputDim,outputDim,sharedLayers=None):
paramDim = int(outputDim + outputDim + (outputDim**2 - outputDim) / 2)
if self.actor_type=='Actor':
self.actor = MLP_Wrapper(inputDim,paramDim,
pmp.actor_hidden_size1,pmp.actor_hidden_size2)
else:
self.actor = CNNHighway(inputDim,paramDim,pmp.actor_num_filter,
pmp.actor_dropout_rate,pmp.actor_hidden_size1,
pmp.actor_hidden_size2,pmp.sharedlayer_output_dim,
sharedLayers)
if DEVICE!=torch.device('cpu'):
self.actor = nn.DataParallel(self.actor)
self.actor.to(DEVICE)
self.actor_params = list(filter(lambda p: p.requires_grad,
self.actor.parameters()))
self.actor_optimizer = torch.optim.SGD(self.actor_params,
lr=self.actor_learning_rate)
if self.evaluation or self.loadModel:
# evaluation: only run inference with previously trained model
# loadModel: load pre-trained model
self._reload(self.actorpath)
def _initCritic(self,inputDim,outputDim,sharedLayers=None):
if self.critic_type=='Critic':
self.critic = MLP_Wrapper(inputDim,outputDim,
pmp.critic_hidden_size1,pmp.critic_hidden_size2)
else:
self.critic = CNNHighway(inputDim,outputDim,pmp.critic_num_filter,
pmp.critic_dropout_rate,pmp.critic_hidden_size1,
pmp.critic_hidden_size2,pmp.sharedlayer_output_dim,
sharedLayers)
if DEVICE!=torch.device('cpu'):
self.critic = nn.DataParallel(self.critic)
self.critic.to(DEVICE)
self.critic_params = list(filter(lambda p: p.requires_grad,
self.critic.parameters()))
self.critic_optimizer = torch.optim.SGD(self.critic_params,
lr=self.critic_learning_rate)
if self.evaluation or self.loadModel:
# evaluation: only run inference with previously trained model
# loadModel: load pre-trained model
self._reload(self.criticpath)
def _initAttention(self,inputDim,outputDim):
self.attention = Attention(self.unique_id,input_size=inputDim,
output_size=outputDim,maxReward=1)
if DEVICE!=torch.device('cpu'):
self.attention = nn.DataParallel(self.attention)
self.attention.to(DEVICE)
self.attention.setOptim(lr=0.01)
if self.evaluation or self.loadModel:
# evaluation: only run inference with previously trained model
# loadModel: load pre-trained model
self._reload(self.attentionpath)
def _reload(self,path):
try:
checkpoint = torch.load(path)
if path==self.criticpath:
self.critic.load_state_dict(checkpoint)
elif path==self.actorpath:
self.actor.load_state_dict(checkpoint)
elif path==self.attentionpath:
self.attention.load_state_dict(checkpoint)
else:
pass
except:
pass
def _updateLearningRate(self):
currentIdx = max([int(k) for k in self.reward.keys()])
if currentIdx < self.exploration:
critic_lr_reduce_rate = 1
actor_lr_reduce_rate = 1
reward_reduce_rate = 1
else:
critic_lr_reduce_rate = self.critic_lr_reduce_rate
actor_lr_reduce_rate = self.actor_lr_reduce_rate
reward_reduce_rate = self.reward_reduce_rate
if self.critic is not None:
self.critic_learning_rate = max(self.critic_lr_min,
self.critic_learning_rate * critic_lr_reduce_rate)
self.critic_optimizer = torch.optim.SGD(self.critic_params,
lr=self.critic_learning_rate)
if self.actor is not None:
self.actor_learning_rate = max(self.actor_lr_min,
self.actor_learning_rate * actor_lr_reduce_rate)
self.actor_optimizer = torch.optim.SGD(self.actor_params,
lr=self.actor_learning_rate)
self.reward_rate = max(self.reward_min,
self.reward_rate * reward_reduce_rate)
def _critic_loss_func(self,value,next_value,reward,avg_reward,rate,
invloss,fwdloss):
if invloss is None:
invloss = torch.zeros(len(reward))
fwdloss = torch.zeros(len(reward))
reward_int = (reward.view(1,-1)
- cmp.invLoss_weight * invloss.view(1,-1)
- (1-cmp.invLoss_weight) * fwdloss.view(1,-1)).mean()
reward_int = reward_int.detach()
advantage = (reward + next_value - value
- cmp.invLoss_weight * invloss.view(-1,1)
- (1-cmp.invLoss_weight) * fwdloss.view(-1,1))
for i in range(len(advantage)):
advantage[i] -= avg_reward
if not torch.isnan(advantage[i]):
avg_reward += rate * advantage[i].item()
return advantage.pow(2).mean(),advantage,avg_reward,reward_int
def _createCovMat(self,diag,tril):
# with batchsize
z = torch.zeros(size=[diag.size(0)],device=DEVICE).type(DTYPE)
diag = 1E-7 + diag # strictly positive
elements = []
trilPointer = 0
for i in range(diag.shape[1]):
for j in range(diag.shape[1]):
if j<i:
elements.append(tril[:,trilPointer])
trilPointer += 1
elif j==i:
elements.append(diag[:,i])
else:
elements.append(z)
scale_tril = torch.stack(elements,dim=-1).view(-1,self.dimOutput,
self.dimOutput)
return scale_tril
def _actor_loss_func(self,log_prob_actions,advantage):
'''
log_prob is the loss function according to the policy gradient with
continuous action space.
the MSELoss between budget ratio and 1 is to ensure the output will
approach 1 in total.
'''
# pytorch default optimizer uses gradient descent methods, while
# some algorithms such as REINFORCE assume gradient ascent for
# update. If such algorithm, then use -log_prob instead.
return (advantage.detach() * -log_prob_actions
).mean()
def _chooseAction(self,params):
'''
action space is multi-dimentional continuous variables. therefore use
parameterized action estimators, and a multivariate gaussian
distribution to output joint probability of the actions.
parameters in this case includes N means and N*N covariance
matrix elements. Therefore this solution is not scalable when N
increases. Another solution would be to use a RNN, such as in
https://arxiv.org/pdf/1806.00589.pdf
or http://papers.nips.cc/paper/6398-learning-multiagent-communication-with-backpropagation.pdf
or https://arxiv.org/pdf/1705.05035.pdf
derivatives of a multivariate gaussian: see matrix cookbook chapter 8:
http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3274/pdf/imm3274.pdf
params are split into mean, covariance matrix diagonal,
cov matrix triangle lower half (since cov matrix is symmetric).
also make sure cov is positive definite.
'''
mean,diag,tril = params.split([self.dimOutput,self.dimOutput,
params.shape[1]-2*self.dimOutput],dim=-1)
scale_tril = self._createCovMat(diag,tril)
dist = MultivariateNormal(loc=mean,scale_tril=scale_tril)
# https://pytorch.org/docs/stable/distributions.html#pathwise-derivative
actions = dist.rsample()
log_prob_actions = dist.log_prob(actions)
return actions,log_prob_actions,mean
def _saveModel(self):
if self.critic is not None:
torch.save(self.critic.state_dict(),self.criticpath)
if self.actor is not None:
torch.save(self.actor.state_dict(),self.actorpath)
if self.attention is not None:
torch.save(self.attention.state_dict(),self.attentionpath)
def _inference_prepInput(self,inputVec,randomness=None,output_r=False):
if self.critic is None or self.actor is None:
x = tensor(self._initBudget(),device=DEVICE).type(DTYPE)
r = torch.rand(len(x))
return (False,x,r)
if randomness is None:
randomness = self.add_randomness * self.actor_learning_rate
nr = np.random.rand()
if nr<randomness:
x = tensor(self._initBudget(),device=DEVICE).type(DTYPE)
r = torch.rand(len(x))
return (False,x,r)
fullInput = list(self.inputVec.values())[
-(self.batch_size-1):] + [inputVec]
x = self._prepInput(inputVec=fullInput)
if self.curiosity: # add latest MEC reward to the state vector
r,_,_ = self._prep_reward(self.reward,randomize=True,
normalize=True,curiosity=False)
r = Variable(r[-len(x):]).view(-1,1)
x = torch.cat([x,r],dim=1)
if not output_r:
return (True,x,None)
else:
r_output,_,_ = self._prep_reward(self.reward_curious,
randomize=True,normalize=True,curiosity=False)
r_output = r_output[-len(x):].view(1,-1)
return (True,x,r_output)
else:
if not output_r:
return (True,x,None)
else:
r,_,_ = self._prep_reward(self.reward,randomize=True,
normalize=True,curiosity=False)
r_output = r[-len(x):].view(1,-1)
return (True,x,r_output)
def inference(self,inputVec,randomness=None):
exist,x,_ = self._inference_prepInput(inputVec,randomness)
if not exist:
return x
self.actor.eval()
with torch.no_grad():
x = x[None, :, :]
params = self.actor(x)
actions,_,_ = self._chooseAction(params)
return torch.clamp(actions[0],0,1)
def inference_weightVec(self,phi=None,r=None):
output = self.attention.inference(input_tensor=phi,target_tensor=r)
output = output * len(output)
return output
def train(self,time,plmfile,rewardfile,invfile,fwdfile,curMdl=None):
self.trainingdata = self.prep_data(time,plmfile,
curious=self.curiosity)
if self.trainingdata is None:
return
if not self.curiosity:
currentIdx,x,y,r,_,pos = self.trainingdata
else:
currentIdx,s_t,s_tp1,r,_,_,pos = self.trainingdata
s_t = s_t[:-1]
# if curiosity model is detached from price learning model
x = s_t
y = s_tp1
if self.critic is None:
self._initCritic(x.shape[-1],1)
if self.actor is None and currentIdx>=self.actor_pretrain_nr_record:
self._initActor(x.shape[-1],self.dimOutput,
self.critic.sharedLayers)
if self.attention is None and self.ca:
self._initAttention(
inputDim=self.actor.sharedLayers.outputDim,outputDim=1)
self._saveModel()
for epoch in range(self.epoch): # price learning model epoch=1
pointer = 0
epoch_loss_critic = []
epoch_loss_actor = []
epoch_loss_attention = []
epoch_reward_int = []
while pointer+1<len(x):
idx = range(pointer,min(pointer+self.batch_size,len(x)))
invloss_vec = torch.zeros(len(idx))
fwdloss_vec = torch.zeros(len(idx))
if self.curiosity:
invloss_vec,fwdloss_vec = curMdl.train(time,
trainingdata=self.trainingdata,invmodelfile=invfile,
forwardmodelfile=fwdfile,pretrain=False,idx=idx,
sharedLayers=self.actor.sharedLayers)
x_batch = x[idx]
y_batch = y[idx]
r_batch = r[idx]
reward = np.nan
get_r_weight = (self.actor is not None and self.ca
and pointer+self.batch_size<=len(x))
if get_r_weight:
r_weight = self.inference_weightVec(
phi=self.actor.sharedLayers(x_batch),
r=r_batch.type(DTYPE))
r_weight = tensor(r_weight)
if r_weight.sum()==0 or torch.isnan(r_weight.sum()):
r_weight = torch.ones(len(r_batch))
r_batch = r_batch.view(-1,1) * r_weight.view(-1,1)
values = self.critic(x_batch)
next_values = self.critic(y_batch)
(critic_loss,advantage,self.avg_reward,
reward_int) = self._critic_loss_func(values,next_values,
r_batch,self.avg_reward,self.reward_rate,
invloss_vec,fwdloss_vec)
epoch_loss_critic.append(critic_loss)
epoch_reward_int.append(reward_int)
loss = critic_loss
if self.actor is not None:
action_params = self.actor(x_batch)
actions,log_prob_actions,mean = self._chooseAction(
action_params)
actor_loss = self._actor_loss_func(log_prob_actions,
advantage)
epoch_loss_actor.append(actor_loss)
loss += actor_loss
self.actor_optimizer.zero_grad()
self.critic_optimizer.zero_grad()
loss.backward()
self.actor_optimizer.step()
self.critic_optimizer.step()
# record intrinsic and extrinsic rewards
if self.trainWeightvector[1]:
# new ext. reward
try:
reward_ext_key = str(max([int(k)
for k in self.reward_ext.keys()]))
reward = self.reward_ext[reward_ext_key]
except:
pass
rewardfile.write('{};{};{};{};{}\n'.format(time,
self.unique_id,epoch,reward_int,reward))
rewardfile.flush()
# trainWeightVector[0]: true when price learning model
# is trained, or when there is new external reward.
# trainWeightVector[1]: true when there is new external reward
# train weight vector on the last batch before ext. reward
if (self.curiosity and pointer+self.batch_size==len(x)
and self.trainWeightvector[0]):
try:
reward_ext_key = str(max([int(k)
for k in self.reward_ext.keys()]))
except:
break
reward = self.reward_ext[reward_ext_key]
if self.ca: # if attention net:
if self.trainWeightvector[1]: # new ext. reward
input_tensor = self.actor.sharedLayersOutput.detach().clone()
attention_loss = self.attention.train(
input_tensor=input_tensor,
target_tensor=r_batch.type(DTYPE),
end_value=reward)
self.attention.trainingdata = (currentIdx,s_t,
s_tp1,r)
# retrain on new features and past ext. reward
else:
try:
(currentIdx,s_t,
s_tp1,r) = self.attention.trainingdata
x = s_t
x_batch = x[idx]
r_batch = r[idx]
except:
break
input_tensor = self.actor.sharedLayers(x_batch).detach().clone()
attention_loss = self.attention.train(
input_tensor=input_tensor,
target_tensor=r_batch.type(DTYPE),
end_value=reward)
if (attention_loss==np.inf or attention_loss==np.nan
or torch.isinf(attention_loss)
or torch.isnan(attention_loss)):
self._initAttention(
inputDim=self.actor.sharedLayers.outputDim,
outputDim=1)
self._reload(self.attentionpath)
else:
epoch_loss_attention.append(attention_loss)
pointer += 1
if pointer+self.batch_size>len(x):
break
avgLossCritic = sum(epoch_loss_critic)
if len(epoch_loss_critic) > 0:
avgLossCritic /= len(epoch_loss_critic)
avgLossActor = sum(epoch_loss_actor)
if len(epoch_loss_actor) > 0:
avgLossActor /= len(epoch_loss_actor)
avgLossAttention = sum(epoch_loss_attention)
if len(epoch_loss_attention) > 0:
avgLossAttention /= len(epoch_loss_attention)
else:
avgLossAttention = np.nan
plmfile.write('{};{};{};{};{};{};{};{}\n'.format(time,
self.unique_id,epoch,len(x),self.avg_reward,
avgLossCritic, avgLossActor, avgLossAttention))
if avgLossCritic!=0 and torch.isnan(avgLossCritic):
plmfile.write(
'{};{};{};{};{};{};{};{};critic restarted.\n'.format(
time,self.unique_id,epoch,len(x),self.avg_reward,
avgLossCritic,avgLossActor,avgLossAttention))
self._initCritic(x.shape[-1],1)
self._reload(self.criticpath)
if avgLossActor!=0 and torch.isnan(avgLossActor):
plmfile.write(
'{};{};{};{};{};{};{};{};actor restarted.\n'.format(
time,self.unique_id,epoch,len(x),self.avg_reward,
avgLossCritic,avgLossActor,avgLossAttention))
self._initActor(x.shape[-1],self.dimOutput,
self.critic.sharedLayers)
self._reload(self.actorpath)
plmfile.flush()
self._updateLearningRate()
self.trainingdata = None
def collectNextState(self,stateVec,idx):
envVec = self.inputVec[idx][len(stateVec):]
nextState = stateVec + envVec
self.nextStateVec[idx] = nextState
def collectReward(self,reward,idx,rewardType='in'):
if rewardType=='in':
if (idx not in self.reward.keys() or self.reward[idx] is None
or np.isnan(self.reward[idx])):
if idx in self.inputVec.keys():
self.reward[idx] = reward
else:
self.reward[idx] += reward
else: # if rewardType=='ex':
self.reward_ext[idx] = reward
def update_curiousReward(self,rewardVec):
if rewardVec is None:
for k in self.reward_normalized.keys(): # add bidding payoff
self.reward_curious[k] = (pmp.reward_int_weight
* self.reward_normalized[k])
self.trainingdata = None
return
for (k,v) in rewardVec.items():
self.reward_curious[k] = v # add reward from curiosity model
if k in self.reward_normalized.keys(): # add bidding payoff
self.reward_curious[k] += (pmp.reward_int_weight
* self.reward_normalized[k])
self.reward_curious[k] = ((1-pmp.reward_ext_weight)
* self.reward_curious[k])
if k in self.reward_ext.keys():
self.reward_curious[k] += (pmp.reward_ext_weight
* self.reward_ext[k]/self.maxReward)
self.trainingdata = None
class MLP(nn.Module):
'''multilayer perceptron as another form of highway
'''
def __init__(self,inputDim,outputDim,hidden_size1,hidden_size2):
super().__init__()
self.batchNorm = nn.BatchNorm1d(inputDim)
self.hidden1 = nn.Linear(inputDim,hidden_size1)
self.hidden2 = nn.Linear(hidden_size1,hidden_size2)
self.batchNorm2 = nn.BatchNorm1d(hidden_size2)
self.hidden3 = nn.Linear(hidden_size2,outputDim)
def forward(self,x):
batchnorm = self.batchNorm(x)
hidden1 = F.relu(self.hidden1(batchnorm))
hidden2 = F.relu(self.batchNorm2(self.hidden2(hidden1)))
hidden3 = self.hidden3(hidden2)
return hidden3
class MLP_Wrapper(nn.Module):
'''value function estimator. sigmoid layer is used for output to
control the output range.
'''
def __init__(self,inputDim,outputDim,hidden_size1,hidden_size2):
super().__init__()
self.mlp = MLP(inputDim,outputDim,hidden_size1,hidden_size2)
self.predict = nn.Sigmoid() # output between 0 and 1
def forward(self,x):
mlp = self.mlp(x)
predict = self.predict(mlp)
return predict
class Highway(nn.Module):
def __init__(self,in_features,out_features,num_layers=1,bias=0):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.num_layers = num_layers
self.bias = bias
self.cells = nn.ModuleList()
for idx in range(self.num_layers):
g = nn.Sequential(
nn.Linear(self.in_features, self.out_features),
nn.ReLU(inplace=True)
)
t = nn.Sequential(
nn.Linear(self.in_features, self.out_features),
nn.Sigmoid()
)
self.cells.append(g)
self.cells.append(t)
def forward(self,x):
for i in range(0,len(self.cells),2):
g = self.cells[i]
t = self.cells[i+1]
nonlinearity = g(x)
transformGate = t(x) + self.bias
x = nonlinearity * transformGate + (1-transformGate) * x
return x
class SharedLayers(nn.Module):
filter_size = list(np.arange(1,pmp.batch_size,step=2,dtype=int))
def __init__(self,inputDim,outputDim,num_filter):
super().__init__()
self.num_filter = ([num_filter]
+ [num_filter * 2] * int(len(self.filter_size)/2)
+ [num_filter] * len(self.filter_size))[0:len(self.filter_size)]
self.num_filter_total = sum(self.num_filter)
self.inputDim = inputDim
self.outputDim = outputDim if outputDim>0 else self.num_filter_total
self.seqLength = pmp.batch_size
self.batchNorm = nn.BatchNorm1d(pmp.batch_size)
self.convs = nn.ModuleList()
for fsize, fnum in zip(self.filter_size, self.num_filter):
# kernel_size = depth, height, width
conv = nn.Sequential(
nn.Conv2d(in_channels=1,out_channels=fnum,
kernel_size=(fsize,inputDim),
padding=0,stride=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(fnum),
nn.MaxPool2d(kernel_size=(self.seqLength-fsize+1,1),stride=1)
)
self.convs.append(conv)
self.highway = Highway(self.num_filter_total,self.num_filter_total,
num_layers=1, bias=0)
self.compress = nn.Linear(self.num_filter_total,self.outputDim)
def forward(self,x):
batchnorm = self.batchNorm(x)
xs = list()
for i,conv in enumerate(self.convs):
x0 = conv(batchnorm.view(-1,1,self.seqLength,self.inputDim))
x0 = x0.view((x0.shape[0],x0.shape[1]))
xs.append(x0)
cats = torch.cat(xs,1)
highwayOutput = self.highway(cats)
sharedLayersOutput = nn.Sigmoid()(self.compress(highwayOutput))
return sharedLayersOutput
class CNNHighway(nn.Module):
def __init__(self,inputDim,outputDim,num_filter,dropout_rate,
hidden_size1,hidden_size2,sharedlayer_output_dim,
sharedLayers):
super().__init__()
self.inputDim = inputDim
self.outputDim = outputDim
self.dropout_rate = dropout_rate
if sharedLayers is None:
self.sharedLayers = SharedLayers(inputDim,sharedlayer_output_dim,
num_filter)
else:
self.sharedLayers = sharedLayers
self.sharedLayersOutputDim = self.sharedLayers.outputDim
# p: probability of an element to be zeroed. Default: 0.0
self.dropout = nn.Dropout(p=self.dropout_rate)
self.fc_conv = nn.Linear(self.sharedLayersOutputDim,outputDim)
self.predict = nn.Sigmoid()
def forward(self,x):
self.sharedLayersOutput = self.sharedLayers(x)
dropout = F.relu(self.dropout(self.sharedLayersOutput))
fc_conv = F.relu(self.fc_conv(dropout))
predict = self.predict(fc_conv)
return predict
class SupervisedModel(Model):
supervise_learning_rate = pmp.supervise_learning_rate
supervise_hidden_size1 = pmp.supervise_hidden_size1
supervise_hidden_size2 = pmp.supervise_hidden_size2
targetUpperBound = vp.totalBudget[0]
def __init__(self,uniqueId,dimOutput=1,evaluation=False,loadModel=False,
curiosity=False,maxReward=1):
super().__init__(evaluation,loadModel,curiosity,maxReward)
self.unique_id = uniqueId + '_supervised'
self.dimOutput = dimOutput
self.supervise = None # the model
self.supervisepath = os.path.join(MODEL_DIR,
self.unique_id+'_train_fsp.pkl')
def _initBudget(self):
''' random budget split if model is not available '''
return list(np.random.rand(self.dimOutput))
def _reload(self,path):
try:
checkpoint = torch.load(path)
if path==self.supervisepath:
self.supervise.load_state_dict(checkpoint)
except:
pass
def _saveModel(self,supervise=True):
if supervise:
torch.save(self.supervise.state_dict(),self.supervisepath)
def _initSupervise(self,inputDim,outputDim):
self.supervise = MLP_Wrapper(inputDim,outputDim,
self.supervise_hidden_size1,self.supervise_hidden_size2)
if DEVICE!=torch.device('cpu'):
self.supervise = nn.DataParallel(self.supervise)
self.supervise.to(DEVICE)
self.supervise_params = list(filter(lambda p: p.requires_grad,
self.supervise.parameters()))
self.supervise_optimizer = torch.optim.SGD(self.supervise_params,
lr=self.supervise_learning_rate)
self.loss_func = torch.nn.MSELoss()
if self.evaluation or self.loadModel:
# evaluation: only run inference with previously trained model
# loadModel: load pre-trained model
self._reload(self.supervisepath)
def inference(self,inputVec,pmdlReward=None):
if self.supervise is None:
return tensor(self._initBudget(),device=DEVICE).type(DTYPE)
if self.curiosity: # add latest MEC reward to the state vector
fullInput = list(self.inputVec.values())[
-(self.batch_size-1):] + [inputVec]
x = self._prepInput(inputVec=fullInput)
r,_,_ = self._prep_reward(pmdlReward,randomize=False,
normalize=True,curiosity=False)
r = Variable(r[-len(x):]).view(x.shape[0],1)
x = torch.cat([x,r],dim=1)
else:
x = self._prepInput(inputVec=inputVec)
x = x.reshape(1,-1)
self.supervise.eval()
actions = self.supervise(x)
return torch.clamp(actions[0],0,1)
def train(self,time,supervisefile,pmdlReward):
trainingdata = self.prep_data(time,supervisefile,reward=pmdlReward,
reward_curious=pmdlReward,inputVec=self.inputVec,
nextStateVec=self.inputVec,output=self.output,
curious=self.curiosity,model='supervisedModel')
if trainingdata is not None:
if not self.curiosity:
currentIdx,x,_,_,y,_ = trainingdata
else:
currentIdx,x,_,_,y,_,_ = trainingdata
else:
return
x = x.view(x.size()[0],-1)
if self.supervise is None:
self._initSupervise(x.shape[1],self.dimOutput)
self._saveModel()
if len(x)<self.batch_size:
replace = True
else:
replace = False
for epoch in range(self.epoch_supervise):
epoch_loss_supervise = []
idx = np.random.choice(len(x),size=self.batch_size,
replace=replace)
x_batch = x[idx]
y_batch = y[idx]
prediction = self.supervise(x_batch)
supervise_loss = self.loss_func(prediction, y_batch)
self.supervise_optimizer.zero_grad()
supervise_loss.backward()
self.supervise_optimizer.step()
epoch_loss_supervise.append(supervise_loss)
avgLossSupervise = sum(epoch_loss_supervise)
if len(epoch_loss_supervise) > 0:
avgLossSupervise /= len(epoch_loss_supervise)
supervisefile.write('{};{};{};{};{}\n'.format(time,
self.unique_id,epoch,len(x),avgLossSupervise))
if avgLossSupervise!=0 and torch.isnan(avgLossSupervise):
supervisefile.write(
'{};{};{};{};{};supervised learning restarted.\n'.format(
time,self.unique_id,epoch,len(x),avgLossSupervise))
self._initSupervise(x.shape[1],self.dimOutput)
self._reload(self.supervisepath)
supervisefile.flush()
class InverseModel(nn.Module):
'''The inverse module in curiosity model '''
def __init__(self,feature_hidden_size1,feature_hidden_size2,
inv_hidden_size1,inv_hidden_size2,outputDim,
sharedLayers=None):
if sharedLayers is None:
return
super().__init__()
self.features = sharedLayers
self.feature_outputDim = sharedLayers.outputDim
inv_inputDim = 2 * self.feature_outputDim
self.batchNorm = nn.BatchNorm1d(inv_inputDim)
self.hidden1 = nn.Linear(inv_inputDim,inv_hidden_size1)
self.hidden2 = nn.Linear(inv_hidden_size1,inv_hidden_size2)
self.batchNorm2 = nn.BatchNorm1d(inv_hidden_size2)
self.hidden3 = nn.Linear(inv_hidden_size2,outputDim)
self.predict = nn.Sigmoid()
def forward(self,oldstate,newstate):
# states: (batchsize,batchsize,featureDim)
# features (sharedlayers) output: (batchsize,sharedlayers.outputDim)
# cat output (batchnorm1d input): (batchsize,2xsharedlayers.outputDim)
# therefore the size of batchnorm1d should be 2xsharedlayers.outputDim
oldfeatures = self.features(oldstate)
self.oldfeatures = oldfeatures.detach().clone()
newfeatures = self.features(newstate)
self.newfeatures = newfeatures.detach().clone()
x = torch.cat((oldfeatures,newfeatures),-1)
batchnorm = self.batchNorm(x)
hidden1 = F.relu(self.hidden1(batchnorm))
hidden2 = F.relu(self.batchNorm2(self.hidden2(hidden1)))
hidden3 = self.hidden3(hidden2)
predict = self.predict(hidden3)
return predict
class ForwardModel(nn.Module):
'''The forward module in curiosity model '''
feature_hidden_size1 = cmp.feature_hidden_size1
feature_hidden_size2 = cmp.feature_hidden_size2
def __init__(self,inputDim_action,
forward_hidden_size1,forward_hidden_size2,outputDim,
sharedLayers=None):
if sharedLayers is None:
return
super().__init__()
self.features = sharedLayers
self.feature_outputDim = sharedLayers.outputDim
forward_inputDim = inputDim_action + self.feature_outputDim
self.batchNorm = nn.BatchNorm1d(forward_inputDim)
self.hidden1 = nn.Linear(forward_inputDim,forward_hidden_size1)
self.hidden2 = nn.Linear(forward_hidden_size1,forward_hidden_size2)
self.batchNorm2 = nn.BatchNorm1d(forward_hidden_size2)
self.hidden3 = nn.Linear(forward_hidden_size2,outputDim)
self.predict = nn.Sigmoid()
def forward(self,action,oldstate):
oldfeatures = self.features(oldstate)
x = torch.cat((action,oldfeatures),-1)
batchnorm = self.batchNorm(x)
hidden1 = F.relu(self.hidden1(batchnorm))
hidden2 = F.relu(self.batchNorm2(self.hidden2(hidden1)))
hidden3 = self.hidden3(hidden2)
predict = self.predict(hidden3)
return predict
class Curiosity(Model):
feature_hidden_size1 = cmp.feature_hidden_size1
feature_hidden_size2 = cmp.feature_hidden_size2
inv_hidden_size1 = cmp.inv_hidden_size1
inv_hidden_size2 = cmp.inv_hidden_size2
inv_learning_rate = cmp.inv_learning_rate
forward_hidden_size1 = cmp.forward_hidden_size1
forward_hidden_size2 = cmp.forward_hidden_size2
forward_learning_rate = cmp.forward_learning_rate
batch_size = cmp.batch_size
epoch = cmp.epoch
def __init__(self,uniqueId,
dimAction=1,evaluation=False,loadModel=False,maxReward=1):
super().__init__(evaluation,loadModel,curiosity=True,
maxReward=maxReward)
self.unique_id = uniqueId + '_curious'
self.dimOutput_action = dimAction
self.invmodel = None # the inverse model
self.invmodelpath = os.path.join(MODEL_DIR,
self.unique_id+'_train_inv.pkl')
self.forwardmodel = None # the forward model
self.forwardmodelpath = os.path.join(MODEL_DIR,
self.unique_id+'_train_forward.pkl')
self.reward = None
self.sharedLayers = None
def _initSharedLayers(self,sharedLayers=None):
if self.sharedLayers is None:
self.sharedLayers = sharedLayers
self.feature_outputDim = sharedLayers.outputDim
def _initOutput_action(self):
''' random output if inverse model is not available '''
return list(np.random.rand(self.dimOutput_action))
def _initOutput_features(self):
''' random output if forward model is not available '''
return list(np.random.rand(self.feature_outputDim))
def _reload_invmodel(self,path):
try:
checkpoint = torch.load(path)
if path==self.invmodelpath:
self.invmodel.load_state_dict(checkpoint)
except:
pass
def _reload_forwardmodel(self,path):
try:
checkpoint = torch.load(path)
if path==self.forwardmodelpath:
self.forwardmodel.load_state_dict(checkpoint)
except:
pass
def _saveModel(self,invmodel=True,forwardmodel=True):
if invmodel and self.invmodel is not None:
torch.save(self.invmodel.state_dict(),self.invmodelpath)
if forwardmodel and self.forwardmodel is not None:
torch.save(self.forwardmodel.state_dict(),self.forwardmodelpath)
def _initInvmodel(self,sharedLayers=None):
self._initSharedLayers(sharedLayers)
self.invmodel = InverseModel(
self.feature_hidden_size1,self.feature_hidden_size2,
self.inv_hidden_size1,self.inv_hidden_size2,
self.dimOutput_action,self.sharedLayers)
if DEVICE!=torch.device('cpu'):
self.invmodel = nn.DataParallel(self.invmodel)
self.invmodel.to(DEVICE)
self.invmodel_params = list(filter(lambda p: p.requires_grad,
self.invmodel.parameters()))
self.invmodel_optimizer = torch.optim.SGD(self.invmodel_params,
lr=self.inv_learning_rate)
self.invmodel_loss_func = torch.nn.MSELoss()
if self.evaluation or self.loadModel:
# evaluation: only run inference with previously trained model
# loadModel: load pre-trained model
self._reload_invmodel(self.invmodelpath)
def _initForwardmodel(self):
if self.invmodel is None:
return
self.forwardmodel = ForwardModel(self.dimOutput_action,
self.forward_hidden_size1,self.forward_hidden_size2,
self.feature_outputDim,self.invmodel.features)
if DEVICE!=torch.device('cpu'):
self.forwardmodel = nn.DataParallel(self.forwardmodel)
self.forwardmodel.to(DEVICE)
self.forwardmodel_params = list(filter(lambda p: p.requires_grad,
self.forwardmodel.parameters()))
self.forwardmodel_optimizer = torch.optim.SGD(self.forwardmodel_params,
lr=self.forward_learning_rate)
self.forwardmodel_loss_func = torch.nn.MSELoss()
if self.evaluation or self.loadModel:
# evaluation: only run inference with previously trained model
# loadModel: load pre-trained model
self._reload_forwardmodel(self.forwardmodelpath)
def _calc_reward(self,pos,oldInputVec_state,newInputVec_state,
inputVec_actualAction):
idx = range(len(oldInputVec_state))
s_t_batch = oldInputVec_state[idx]
s_tp1_batch = newInputVec_state[idx]
a_f_batch = inputVec_actualAction[idx]
a,phi,phi_tp1 = self.inference_invmodel(s_t_batch,s_tp1_batch)
phi_tp1_hat = self.inference_forwardmodel(
s_t_batch,a_f_batch).detach().numpy()
phi_tp1 = phi_tp1.detach().numpy()
a_i_batch = a.detach().numpy()
a_f_batch = a_f_batch.detach().numpy()
invLoss = list(((a_i_batch-a_f_batch)**2).mean(axis=1))
fwdLoss = list(((phi_tp1-phi_tp1_hat)**2).mean(axis=1))
predLoss = fwdLoss
keys = [str(k) for k in pos]
self.reward = dict([(k,(1-pmp.reward_int_weight)*v)
for (k,v) in zip(keys,predLoss)])
def inference_invmodel(self,oldInputVec_state,newInputVec_state):
if self.invmodel is None:
return (tensor(self._initOutput_action(),
device=DEVICE).type(DTYPE),
tensor(self._initOutput_features(),
device=DEVICE).type(DTYPE),
tensor(self._initOutput_features(),
device=DEVICE).type(DTYPE))
self.invmodel.eval()
actions = self.invmodel(oldInputVec_state,newInputVec_state)
newfeatures = self.invmodel.newfeatures
oldfeatures = self.invmodel.oldfeatures
return torch.clamp(actions,0,1), oldfeatures, newfeatures
def inference_forwardmodel(self,oldInputVec_state,actualAction):
self.forwardmodel.eval()
newstate = self.forwardmodel(actualAction,oldInputVec_state)
return newstate
def train(self,time,trainingdata,invmodelfile,forwardmodelfile,
pretrain=True,idx=None,sharedLayers=None):
if trainingdata is None:
return None,None
(currentIdx,oldInputVec_state,newInputVec_state,_,
actualAction_inv,actualAction_fwd,pos) = trainingdata
s_t = oldInputVec_state[:-1]
s_tp1 = newInputVec_state
a_t_inv = actualAction_inv[:-1]
a_t_fwd = actualAction_fwd[:-1]
pos = pos[:-1]
if self.invmodel is None:
self._initInvmodel(sharedLayers)
self._initForwardmodel()
self._saveModel()
if len(s_t)<self.batch_size:
replace = True
else:
replace = False
training_epoch = 1
if pretrain:
training_epoch = self.epoch
for epoch in range(training_epoch):
epoch_loss_invmodel = []
epoch_loss_forwardmodel = []
if pretrain or idx is None:
idx = np.random.choice(len(s_t),size=self.batch_size,
replace=replace)
s_t_batch = s_t[idx]
s_tp1_batch = s_tp1[idx]
a_i_batch = a_t_inv[idx]
a_f_batch = a_t_fwd[idx]
action_pred = self.invmodel(s_t_batch,s_tp1_batch)
invmodel_loss = self.invmodel_loss_func(action_pred,a_i_batch)
invloss_vec = (action_pred-a_i_batch).pow(2).mean(dim=-1)
if pretrain:
self.invmodel_optimizer.zero_grad()
invmodel_loss.backward()
self.invmodel_optimizer.step()
epoch_loss_invmodel.append(invmodel_loss.detach())
newfeature_actual = self.invmodel.newfeatures
feature_pred = self.forwardmodel(a_f_batch,s_t_batch)
forwardmodel_loss = self.forwardmodel_loss_func(feature_pred,
newfeature_actual)
fwdloss_vec = (feature_pred-newfeature_actual).pow(2).mean(dim=-1)
if pretrain:
self.forwardmodel_optimizer.zero_grad()
forwardmodel_loss.backward()
self.forwardmodel_optimizer.step()
epoch_loss_forwardmodel.append(forwardmodel_loss.detach())
avgLossInvmodel = sum(epoch_loss_invmodel)
avgLossForwardmodel = sum(epoch_loss_forwardmodel)
if len(epoch_loss_invmodel) > 0:
avgLossInvmodel /= len(epoch_loss_invmodel)
if len(epoch_loss_forwardmodel) > 0:
avgLossForwardmodel /= len(epoch_loss_forwardmodel)
if pretrain:
invmodelfile.write('{};{};{};{};{}\n'.format(time,
self.unique_id,epoch,len(s_t),avgLossInvmodel))
invmodelfile.flush()
forwardmodelfile.write('{};{};{};{};{}\n'.format(time,
self.unique_id,epoch,len(s_t),avgLossForwardmodel))
forwardmodelfile.flush()
if avgLossInvmodel!=0 and torch.isnan(avgLossInvmodel):
invmodelfile.write(
'{};{};{};{};{};inverse model learning restarted.\n'.format(
time,self.unique_id,epoch,len(s_t),avgLossInvmodel))
invmodelfile.flush()
self._initInvmodel(self.sharedLayers)
self._reload_invmodel(self.invmodelpath)
if avgLossForwardmodel!=0 and torch.isnan(avgLossForwardmodel):
forwardmodelfile.write(
'{};{};{};{};{};forward model learning restarted.\n'.format(
time,self.unique_id,epoch,len(s_t),avgLossForwardmodel))
forwardmodelfile.flush()
self._initForwardmodel()
self._reload_forwardmodel(self.forwardmodelpath)
self._calc_reward(pos,s_t,s_tp1,a_t_fwd)
return invloss_vec,fwdloss_vec
| 58,987 | 42.373529 | 106 | py |
malfoy | malfoy-master/v2x/solutions/attention.py | # -*- coding: utf-8 -*-
import torch
from torch import nn,optim,tensor
from torch.nn import functional as F
from ..config.config import (PRICE_MODEL_PARAMS as pmp,DEVICE)
class EncoderRNN(nn.Module):
max_length = pmp.batch_size
def __init__(self, input_size, hidden_size=128):
super().__init__()
self.hidden_size = hidden_size
self.embedding = nn.Linear(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
def forward(self, input_tensor, hidden):
embedded = F.relu(self.embedding(input_tensor)).view(1, 1, -1)
output = embedded
output, hidden = self.gru(output, hidden)
return output, hidden
class DecoderRNN(nn.Module):
dropout_p = 0.1
max_length = pmp.batch_size
def __init__(self, output_size, hidden_size=128):
super().__init__()
self.embedding = nn.Linear(output_size, hidden_size)
self.attn = nn.Linear(hidden_size * 2, self.max_length)
self.attn_combine = nn.Linear(hidden_size * 2, hidden_size)
self.dropout = nn.Dropout(self.dropout_p)
self.gru = nn.GRU(hidden_size, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
def forward(self, decoder_input, hidden, encoder_outputs):
embedded = F.relu(self.embedding(decoder_input)).view(1,1,-1)
embedded = self.dropout(embedded)
attn_weights = F.softmax(
self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)
attn_applied = torch.bmm(attn_weights.unsqueeze(0),
encoder_outputs.unsqueeze(0))
output = torch.cat((embedded[0], attn_applied[0]), 1)
output = self.attn_combine(output).unsqueeze(0)
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = nn.Sigmoid()(self.out(output[0]))
return output, hidden, attn_weights
class Attention(nn.Module):
teacher_forcing_ratio = 0.5
epoch = 5
criterion = nn.MSELoss()
def __init__(self,unique_id,input_size,output_size,maxReward):
super().__init__()
self.unique_id = unique_id + '_attentionMdl'
self.output_size = output_size
self.maxReward = maxReward
self.encoder = EncoderRNN(input_size)
self.decoder = DecoderRNN(output_size,self.encoder.hidden_size)
self.encoder_optimizer = None
self.decoder_optimizer = None
self.avg_reward = 0
self.trainingdata = None
def setOptim(self,lr=0.01):
self.encoder_optimizer = optim.SGD(self.encoder.parameters(),lr=lr)
self.decoder_optimizer = optim.SGD(self.decoder.parameters(),lr=lr)
def initHidden(self,hidden_size):
return torch.zeros(1, 1, hidden_size, device=DEVICE).float()
def train(self,input_tensor,target_tensor,end_value=0):
if end_value > self.maxReward:
self.maxReward = end_value
end_value = end_value / self.maxReward
encoder_hidden = self.initHidden(self.encoder.hidden_size)
sequence_length = input_tensor.size()[0] # input/output sequence length
encoder_outputs = torch.zeros(self.encoder.max_length,
self.encoder.hidden_size, device=DEVICE)
for ei in range(sequence_length):
encoder_output, encoder_hidden = self.encoder(
input_tensor[ei], encoder_hidden)
encoder_outputs[ei] = encoder_output[0, 0] # first (virtual) input
decoder_inputs = torch.cat([
target_tensor.view(-1,1).float(),
tensor([end_value],device=DEVICE).view(-1,1).float()])
decoder_hidden = encoder_hidden
loss = 0
for di in range(sequence_length):
decoder_output, decoder_hidden, decoder_attention = self.decoder(
decoder_inputs[di], decoder_hidden, encoder_outputs)
loss += self.criterion(decoder_output.view(-1,1),
decoder_inputs[di+1].view(-1,1))
attention_loss = loss.detach()
self.encoder_optimizer.zero_grad()
self.decoder_optimizer.zero_grad()
loss.backward()
self.encoder_optimizer.step()
self.decoder_optimizer.step()
return attention_loss
def inference(self,input_tensor,target_tensor,end_value=0.0):
maxlength = self.encoder.max_length
self.encoder.eval()
self.decoder.eval()
with torch.no_grad():
encoder_hidden = self.initHidden(self.encoder.hidden_size)
sequence_length = input_tensor.size()[0] # input/output sequence length
encoder_outputs = torch.zeros(maxlength,
self.encoder.hidden_size, device=DEVICE)
for ei in range(sequence_length):
encoder_output, encoder_hidden = self.encoder(input_tensor[ei],
encoder_hidden)
encoder_outputs[ei] += encoder_output[0, 0]
decoder_inputs = torch.cat(
[tensor([-1.],device=DEVICE).view(-1,1),
target_tensor.view(-1,1),
tensor([end_value],device=DEVICE).view(-1,1)]).float()
decoder_hidden = encoder_hidden
for di in range(maxlength):
decoder_output,decoder_hidden,decoder_attention = self.decoder(
decoder_inputs[di], decoder_hidden, encoder_outputs)
return decoder_attention.data.squeeze()
| 5,674 | 39.827338 | 83 | py |